rge.c revision 198157
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31#ifdef HAVE_KERNEL_OPTION_HEADERS
32#include "opt_device_polling.h"
33#endif
34
35#include <sys/types.h>
36#include <sys/endian.h>
37#include <sys/systm.h>
38#include <sys/sockio.h>
39#include <sys/param.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/limits.h>
44#include <sys/bus.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/kernel.h>
48#include <sys/module.h>
49#include <sys/socket.h>
50#define __RMAN_RESOURCE_VISIBLE
51#include <sys/rman.h>
52#include <sys/taskqueue.h>
53
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/ethernet.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59
60#include <net/bpf.h>
61
62#include <net/if_types.h>
63#include <net/if_vlan_var.h>
64
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/ip.h>
68
69#include <vm/vm.h>
70#include <vm/pmap.h>
71
72#include <machine/reg.h>
73#include <machine/cpu.h>
74#include <machine/mips_opcode.h>
75#include <machine/asm.h>
76#include <machine/mips-exts.h>
77#include <machine/cpuregs.h>
78
79#include <machine/param.h>
80#include <machine/intr_machdep.h>
81#include <machine/clock.h>	/* for DELAY */
82#include <machine/bus.h>
83#include <machine/resource.h>
84#include <mips/xlr/interrupt.h>
85#include <mips/xlr/msgring.h>
86#include <mips/xlr/iomap.h>
87#include <mips/xlr/debug.h>
88#include <mips/xlr/pic.h>
89#include <mips/xlr/xlrconfig.h>
90#include <mips/xlr/shared_structs.h>
91#include <mips/xlr/board.h>
92
93#include <dev/rmi/xlr/atx_cpld.h>
94#include <dev/rmi/xlr/xgmac_mdio.h>
95#include <dev/rmi/xlr/rge.h>
96
97#include <dev/mii/mii.h>
98#include <dev/mii/miivar.h>
99#include "miidevs.h"
100#include <dev/mii/brgphyreg.h>
101#include "miibus_if.h"
102#include <sys/sysctl.h>
103
104/* #include "opt_rge.h" */
105
106MODULE_DEPEND(rge, ether, 1, 1, 1);
107MODULE_DEPEND(rge, miibus, 1, 1, 1);
108
109/* #define DEBUG */
110/*#define RX_COPY */
111
112#define RGE_TX_THRESHOLD 1024
113#define RGE_TX_Q_SIZE 1024
114
115#ifdef DEBUG
116#undef dbg_msg
117int		mac_debug = 1;
118#define dbg_msg(fmt, args...) \
119        do {\
120            if (mac_debug) {\
121                printf("[%s@%d|%s]: cpu_%d: " fmt, \
122                __FILE__, __LINE__, __FUNCTION__,  PCPU_GET(cpuid), ##args);\
123            }\
124        } while(0);
125
126#define DUMP_PACKETS
127#else
128#undef dbg_msg
129#define dbg_msg(fmt, args...)
130int		mac_debug = 0;
131#endif
132
133#define MAC_B2B_IPG             88
134
135/* frame sizes need to be cacheline aligned */
136#define MAX_FRAME_SIZE          1536
137#define MAX_FRAME_SIZE_JUMBO    9216
138
139#define MAC_SKB_BACK_PTR_SIZE   SMP_CACHE_BYTES
140#define MAC_PREPAD              0
141#define BYTE_OFFSET             2
142#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
143#define MAC_CRC_LEN             4
144#define MAX_NUM_MSGRNG_STN_CC   128
145
146#define MAX_NUM_DESC		1024
147#define MAX_SPILL_SIZE          (MAX_NUM_DESC + 128)
148
149#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
150
151#define MAX_FRIN_SPILL          (MAX_SPILL_SIZE << 2)
152#define MAX_FROUT_SPILL         (MAX_SPILL_SIZE << 2)
153#define MAX_CLASS_0_SPILL       (MAX_SPILL_SIZE << 2)
154#define MAX_CLASS_1_SPILL       (MAX_SPILL_SIZE << 2)
155#define MAX_CLASS_2_SPILL       (MAX_SPILL_SIZE << 2)
156#define MAX_CLASS_3_SPILL       (MAX_SPILL_SIZE << 2)
157
158/*****************************************************************
159 * Phoenix Generic Mac driver
160 *****************************************************************/
161
162extern uint32_t cpu_ltop_map[32];
163typedef enum {
164	xlr_mac_speed_10, xlr_mac_speed_100,
165	xlr_mac_speed_1000, xlr_mac_speed_rsvd
166}		xlr_mac_speed_t;
167
168typedef enum {
169	xlr_mac_duplex_auto, xlr_mac_duplex_half,
170	xlr_mac_duplex_full
171}		xlr_mac_duplex_t;
172
173typedef enum {
174	xlr_mac_link_down,
175	xlr_mac_link_up,
176}		xlr_mac_link_t;
177
178typedef enum {
179	xlr_mac_fc_auto, xlr_mac_fc_disabled, xlr_mac_fc_frame,
180	xlr_mac_fc_collision, xlr_mac_fc_carrier
181}		xlr_mac_fc_t;
182
183
184struct rge_softc_stats {
185	unsigned long	rx_frames;
186	unsigned long	tx_frames;
187	unsigned long	rx_packets;
188	unsigned long	rx_bytes;
189	unsigned long	tx_packets;
190	unsigned long	tx_bytes;
191};
192
193struct driver_data {
194
195	/*
196	 * Let these be the first fields in this structure the structure is
197	 * cacheline aligned when allocated in init_etherdev
198	 */
199	struct fr_desc *frin_spill;
200	struct fr_desc *frout_spill;
201	union rx_tx_desc *class_0_spill;
202	union rx_tx_desc *class_1_spill;
203	union rx_tx_desc *class_2_spill;
204	union rx_tx_desc *class_3_spill;
205	int		spill_configured;
206
207	struct rge_softc *sc;	/* pointer to freebsd device soft-pointer */
208	struct rge_softc_stats stats;
209	struct mtx	lock;
210
211	xlr_reg_t      *mmio;
212	xlr_reg_t      *mii_mmio;
213	xlr_reg_t      *pcs_mmio;
214	xlr_reg_t      *serdes_mmio;
215
216	int             txbucket;
217	int             rfrbucket;
218
219	int		phy_oldbmsr;
220	int		phy_oldanlpar;
221	int		phy_oldk1stsr;
222	int		phy_oldlinkstat;
223	unsigned char	phys_addr[2];
224
225	xlr_mac_speed_t	speed;	/* current speed */
226	xlr_mac_duplex_t duplex;/* current duplex */
227	xlr_mac_link_t	link;	/* current link */
228	xlr_mac_fc_t	flow_ctrl;	/* current flow control setting */
229	int		advertising;
230
231	int		id;
232	int		type;
233	int		mode;
234	int		instance;
235	int		phy_addr;
236	int		frin_to_be_sent[8];
237	int		init_frin_desc;
238};
239
240/* static int mac_frin_to_be_sent_thr[8]; */
241
242enum {
243	PORT_TX,
244	PORT_TX_COMPLETE,
245	PORT_STARTQ,
246	PORT_STOPQ,
247	PORT_START_DEV_STATE,
248	PORT_STOP_DEV_STATE,
249};
250
251#ifdef ENABLED_DEBUG
252static int	port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
253#define port_inc_counter(port, counter) 	atomic_add_int(&port_counters[port][(counter)], 1)
254#define port_set_counter(port, counter, value) 	atomic_set_int(&port_counters[port][(counter)], (value))
255#else
256#define port_inc_counter(port, counter)				/*Nothing*/
257#define port_set_counter(port, counter, value)		/*Nothing*/
258#endif
259
260int xlr_rge_tx_prepend[MAXCPU];
261int xlr_rge_tx_done[MAXCPU];
262int xlr_rge_get_p2d_failed[MAXCPU];
263int xlr_rge_msg_snd_failed[MAXCPU];
264int xlr_rge_tx_ok_done[MAXCPU];
265int xlr_rge_rx_done[MAXCPU];
266int xlr_rge_repl_done[MAXCPU];
267
268static __inline__ unsigned int
269ldadd_wu(unsigned int value, unsigned long *addr)
270{
271	__asm__	 __volatile__( ".set push\n"
272			       ".set noreorder\n"
273			       "move $8, %2\n"
274			       "move $9, %3\n"
275			       /* "ldaddwu $8, $9\n" */
276			       ".word 0x71280011\n"
277			       "move %0, $8\n"
278			       ".set pop\n"
279			       : "=&r"(value), "+m"(*addr)
280			       : "0"(value), "r" ((unsigned long)addr)
281			       :  "$8", "$9");
282	return value;
283}
284
285/* #define mac_stats_add(x, val) ({(x) += (val);}) */
286#define mac_stats_add(x, val) ldadd_wu(val, &x)
287
288struct rge_softc {
289	int		unit;
290	int		irq;
291	unsigned char	dev_addr[6];
292	unsigned long	base_addr;
293	unsigned long	mem_end;
294	struct ifnet   *rge_ifp;/* interface info */
295	device_t	rge_dev;
296	int		mtu;
297	int		flags;
298	struct driver_data priv;
299	struct mtx	rge_mtx;
300	device_t	rge_miibus;
301	struct mii_data	rge_mii;/* MII/media information */
302	bus_space_handle_t rge_bhandle;
303	bus_space_tag_t	rge_btag;
304	void           *rge_intrhand;
305	struct resource	rge_irq;
306	struct resource *rge_res;
307	struct ifmedia	rge_ifmedia;	/* TBI media info */
308	int		rge_if_flags;
309	int		rge_link;	/* link state */
310	int		rge_link_evt;	/* pending link event */
311	struct callout	rge_stat_ch;
312	void            (*xmit) (struct ifnet *);
313	void            (*stop) (struct rge_softc *);
314	int             (*ioctl) (struct ifnet *, u_long, caddr_t);
315	struct rge_softc_stats *(*get_stats) (struct rge_softc *);
316	int active;
317	int link_up;
318};
319
320#define XLR_MAX_CORE 8
321#define RGE_LOCK_INIT(_sc, _name) \
322  mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
323#define RGE_LOCK(_sc)   mtx_lock(&(_sc)->rge_mtx)
324#define RGE_LOCK_ASSERT(_sc)  mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
325#define RGE_UNLOCK(_sc)   mtx_unlock(&(_sc)->rge_mtx)
326#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
327
328#define XLR_MAX_MACS     8
329#define XLR_MAX_TX_FRAGS 14
330#define MAX_P2D_DESC_PER_PORT 512
331struct p2d_tx_desc {
332	uint64_t	frag   [XLR_MAX_TX_FRAGS + 2];
333};
334#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
335
336struct rge_softc *dev_mac[XLR_MAX_MACS];
337static int	dev_mac_xgs0;
338static int	dev_mac_gmac0;
339
340static int	gmac_common_init_done;
341
342
343static int	rge_probe(device_t);
344static int	rge_attach(device_t);
345static int	rge_detach(device_t);
346static int	rge_suspend(device_t);
347static int	rge_resume(device_t);
348static void	rge_release_resources(struct rge_softc *);
349static void	rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
350static void	rge_intr(void *);
351static void	rge_start_locked(struct ifnet *, int);
352static void	rge_start(struct ifnet *);
353static int	rge_ioctl(struct ifnet *, u_long, caddr_t);
354static void	rge_init(void *);
355static void	rge_stop(struct rge_softc *);
356static void	rge_watchdog(struct ifnet *);
357static void	rge_shutdown(device_t);
358static void	rge_reset(struct rge_softc *);
359
360static struct mbuf *get_mbuf(void);
361static void	free_buf(vm_paddr_t paddr);
362static void	*get_buf(void);
363
364static void	xlr_mac_get_hwaddr(struct rge_softc *);
365static void	xlr_mac_setup_hwaddr(struct driver_data *);
366static void	rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
367static void	rmi_xlr_xgmac_init(struct driver_data *priv);
368static void	rmi_xlr_gmac_init(struct driver_data *priv);
369static void	mac_common_init(void);
370static void	rge_mii_write(struct device *, int, int, int);
371static int	rge_mii_read(struct device *, int, int);
372static void	rmi_xlr_mac_mii_statchg(device_t);
373static int	rmi_xlr_mac_mediachange(struct ifnet *);
374static void	rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
375static void	xlr_mac_set_rx_mode(struct rge_softc *sc);
376void
377rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
378			    int stid, struct msgrng_msg *msg,
379			    void *data);
380static void	mac_frin_replenish(void *);
381static int	rmi_xlr_mac_open(struct rge_softc *);
382static int	rmi_xlr_mac_close(struct rge_softc *);
383static int
384mac_xmit(struct mbuf *, struct rge_softc *,
385	 struct driver_data *, int, struct p2d_tx_desc *);
386static int	rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
387static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
388static void	rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
389static int	rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
390static int	rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
391static void	rmi_xlr_config_spill_area(struct driver_data *priv);
392static int	rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
393static int
394rmi_xlr_mac_set_duplex(struct driver_data *s,
395		       xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
396static void serdes_regs_init(struct driver_data *priv);
397static int rmi_xlr_gmac_reset(struct driver_data *priv);
398
399/*Statistics...*/
400static int get_p2d_desc_failed = 0;
401static int msg_snd_failed =0;
402
403SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
404		    &get_p2d_desc_failed, 0, "p2d desc failed");
405SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
406		    &msg_snd_failed, 0, "msg snd failed");
407
408struct callout	xlr_tx_stop_bkp;
409
410static device_method_t rge_methods[] = {
411	/* Device interface */
412	DEVMETHOD(device_probe, rge_probe),
413	DEVMETHOD(device_attach, rge_attach),
414	DEVMETHOD(device_detach, rge_detach),
415	DEVMETHOD(device_shutdown, rge_shutdown),
416	DEVMETHOD(device_suspend, rge_suspend),
417	DEVMETHOD(device_resume, rge_resume),
418
419	/* MII interface */
420	DEVMETHOD(miibus_readreg, rge_mii_read),
421	DEVMETHOD(miibus_writereg, rge_mii_write),
422	DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
423	{0, 0}
424};
425
426static driver_t	rge_driver = {
427	"rge",
428	rge_methods,
429	sizeof(struct rge_softc)
430};
431
432static devclass_t rge_devclass;
433
434DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
435DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
436
437#ifndef __STR
438#define __STR(x) #x
439#endif
440#ifndef STR
441#define STR(x) __STR(x)
442#endif
443
444#define XKPHYS        0x8000000000000000
445
446static __inline__ uint32_t
447lw_40bit_phys(uint64_t phys, int cca)
448{
449	uint64_t	addr;
450	uint32_t	value = 0;
451	unsigned long	flags;
452
453	addr = XKPHYS | ((uint64_t)cca << 59) | (phys & 0xfffffffffcULL);
454
455	enable_KX(flags);
456	__asm__		__volatile__(
457			      		".set push\n"
458			      		".set noreorder\n"
459			      		".set mips64\n"
460			      		"lw    %0, 0(%1) \n"
461			      		".set pop\n"
462			      :		"=r"         (value)
463			      :		"r"          (addr) );
464
465	disable_KX(flags);
466	return value;
467}
468
469
470static __inline__ uint64_t
471ld_40bit_phys(uint64_t phys, int cca)
472{
473	uint64_t	addr;
474	uint64_t	value = 0;
475	unsigned long	flags;
476
477
478	addr =  XKPHYS | ((uint64_t)cca << 59) | (phys & 0xfffffffffcULL);
479	enable_KX(flags);
480	__asm__		__volatile__(
481			      		".set push\n"
482			      		".set noreorder\n"
483			      		".set mips64\n"
484			      		"ld    %0, 0(%1) \n"
485			      		".set pop\n"
486			      :		"=r"         (value)
487			      :		"r"          (addr));
488
489	disable_KX(flags);
490	return value;
491}
492
493
494void           *xlr_tx_ring_mem;
495
496struct tx_desc_node {
497	struct p2d_tx_desc *ptr;
498	TAILQ_ENTRY   (tx_desc_node) list;
499};
500#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
501struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
502static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
503static int xlr_total_active_core = 0;
504
505/*
506 * This should contain the list of all free tx frag desc nodes pointing to tx
507 * p2d arrays
508 */
509static
510TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
511	{
512		TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
513		TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
514		TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
515		TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
516		TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
517		TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
518		TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
519		TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
520	};
521
522/* This contains a list of free tx frag node descriptors */
523static	TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
524	{
525		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
526		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
527		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
528		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
529		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
530		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
531		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
532		TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
533	};
534
535static struct mtx tx_desc_lock[XLR_MAX_CORE];
536
537static inline void
538mac_make_desc_rfr(struct msgrng_msg *msg,
539		     vm_paddr_t addr)
540{
541	msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
542	msg->msg1 = msg->msg2 = msg->msg3 = 0;
543}
544
545#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
546
547static void
548init_p2d_allocation(void)
549{
550	int active_core[8]= {0};
551	int i=0;
552	uint32_t cpumask;
553	int cpu;
554
555	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
556
557	for (i = 0; i < 32; i++) {
558		if (cpumask & (1 << i)) {
559			cpu = cpu_ltop_map[i];
560			if(!active_core[cpu/4]){
561				active_core[cpu/4] = 1;
562				xlr_total_active_core++;
563			}
564		}
565	}
566	for(i=0; i<XLR_MAX_CORE; i++){
567		if(active_core[i])
568			xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES/xlr_total_active_core;
569	}
570	printf("Total Active Core %d\n",xlr_total_active_core);
571}
572
573
574static void
575init_tx_ring(void)
576{
577	int		i;
578	int j=0;
579	struct tx_desc_node *start, *node;
580	struct p2d_tx_desc *tx_desc;
581	vm_paddr_t	paddr;
582	vm_offset_t	unmapped_addr;
583
584	for(i=0; i<XLR_MAX_CORE; i++)
585		mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
586
587	start = &tx_desc_nodes[0];
588	/* TODO: try to get this from KSEG0 */
589	xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
590				       M_DEVBUF, M_NOWAIT | M_ZERO, 0,
591				       0x10000000, XLR_CACHELINE_SIZE, 0);
592
593	if (xlr_tx_ring_mem == NULL) {
594		panic("TX ring memory allocation failed");
595	}
596	paddr = vtophys((vm_offset_t) xlr_tx_ring_mem);
597
598	unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
599
600
601	tx_desc = (struct p2d_tx_desc *)unmapped_addr;
602
603	for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
604		node = start + i;
605		node->ptr = tx_desc;
606		tx_desc++;
607		TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
608		j = (i / (XLR_MAX_TX_DESC_NODES/xlr_total_active_core));
609	}
610}
611
612static inline struct p2d_tx_desc *
613get_p2d_desc(void)
614{
615	struct tx_desc_node *node;
616	struct p2d_tx_desc *tx_desc = NULL;
617	int cpu = xlr_cpu_id();
618
619	mtx_lock_spin(&tx_desc_lock[cpu]);
620	node = TAILQ_FIRST(&tx_frag_desc[cpu]);
621	if (node) {
622		xlr_tot_avail_p2d[cpu]--;
623		TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
624		tx_desc = node->ptr;
625		TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
626	}else{
627		/*Increment p2d desc fail count*/
628		get_p2d_desc_failed++;
629	}
630	mtx_unlock_spin(&tx_desc_lock[cpu]);
631	return tx_desc;
632}
633static void
634free_p2d_desc(struct p2d_tx_desc *tx_desc)
635{
636	struct tx_desc_node *node;
637	int cpu = xlr_cpu_id();
638
639	mtx_lock_spin(&tx_desc_lock[cpu]);
640	node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
641	KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
642
643	TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
644	node->ptr = tx_desc;
645	TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
646	xlr_tot_avail_p2d[cpu]++;
647	mtx_unlock_spin(&tx_desc_lock[cpu]);
648
649}
650
651static int
652build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
653{
654	struct mbuf    *m;
655	vm_paddr_t	paddr;
656	uint64_t	p2d_len;
657	int		nfrag;
658	vm_paddr_t	p1  , p2;
659	uint32_t	len1  , len2;
660	vm_offset_t	taddr;
661	uint64_t	fr_stid;
662	fr_stid = (xlr_cpu_id() << 3) + xlr_thr_id() + 4;
663
664	if (tx_desc == NULL)
665		return 1;
666
667	nfrag = 0;
668	for (m = m_head; m != NULL; m = m->m_next) {
669		if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
670			free_p2d_desc(tx_desc);
671			return 1;
672		}
673		if (m->m_len != 0) {
674			paddr = vtophys(mtod(m, vm_offset_t));
675			p1 = paddr + m->m_len;
676			p2 = vtophys(((vm_offset_t) m->m_data + m->m_len));
677			if (p1 != p2) {
678				len1 = (uint32_t)
679					(PAGE_SIZE - (paddr & PAGE_MASK));
680				tx_desc->frag[nfrag] = (127ULL << 54) |
681					((uint64_t) len1 << 40) | paddr;
682				nfrag++;
683				taddr = (vm_offset_t) m->m_data + len1;
684				p2 = vtophys(taddr);
685				len2 = m->m_len - len1;
686				if (nfrag >= XLR_MAX_TX_FRAGS)
687					panic("TX frags exceeded");
688
689				tx_desc->frag[nfrag] = (127ULL << 54) |
690					((uint64_t) len2 << 40) | p2;
691
692				taddr += len2;
693				p1 = vtophys(taddr);
694
695				if ((p2 + len2) != p1) {
696					printf("p1 = %llx p2 = %llx\n", p1, p2);
697					printf("len1 = %x len2 = %x\n", len1,
698					       len2);
699					printf("m_data %p\n", m->m_data);
700					DELAY(1000000);
701					panic("Multiple Mbuf segment discontiguous\n");
702				}
703			} else {
704				tx_desc->frag[nfrag] = (127ULL << 54) |
705					((uint64_t) m->m_len << 40) | paddr;
706			}
707			nfrag++;
708		}
709	}
710	/* set eop in the last tx p2d desc */
711	tx_desc->frag[nfrag - 1] |= (1ULL << 63);
712	paddr = vtophys((vm_offset_t) tx_desc);
713	tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
714	nfrag++;
715	tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t) (vm_offset_t) tx_desc;
716	tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t) (vm_offset_t) m_head;
717
718	p2d_len = (nfrag * 8);
719	p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
720		(p2d_len << 40) | paddr;
721
722	return 0;
723}
724static void
725release_tx_desc(struct msgrng_msg *msg, int rel_buf)
726{
727	vm_paddr_t	paddr = msg->msg0 & 0xffffffffffULL;
728	uint64_t	temp;
729	struct p2d_tx_desc *tx_desc;
730	struct mbuf    *m;
731
732	paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
733
734	temp = ld_40bit_phys(paddr, 3);
735
736	tx_desc = (struct p2d_tx_desc *)((vm_offset_t) temp);
737
738	if (rel_buf) {
739		paddr += sizeof(uint64_t);
740
741		temp = ld_40bit_phys(paddr, 3);
742
743		m = (struct mbuf *)((vm_offset_t) temp);
744		m_freem(m);
745	}
746
747	free_p2d_desc(tx_desc);
748}
749
750#ifdef RX_COPY
751#define RGE_MAX_NUM_DESC (6 * MAX_NUM_DESC)
752uint8_t        *rge_rx_buffers[RGE_MAX_NUM_DESC];
753static struct mtx rge_rx_mtx;
754int		g_rx_buf_head;
755
756static void
757init_rx_buf(void)
758{
759	int		i;
760	uint8_t        *buf, *start;
761	uint32_t	size  , *ptr;
762	mtx_init(&rge_rx_mtx, "xlr rx_desc", NULL, MTX_SPIN);
763
764	size = (RGE_MAX_NUM_DESC * (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE));
765
766	start = (uint8_t *) contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO,
767				      0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
768	if (start == NULL)
769		panic("NO RX BUFFERS");
770	buf = start;
771	size = (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE);
772	for (i = 0; i < RGE_MAX_NUM_DESC; i++) {
773		buf = start + (i * size);
774		ptr = (uint32_t *) buf;
775		*ptr = (uint32_t) buf;
776		rge_rx_buffers[i] = buf + XLR_CACHELINE_SIZE;
777	}
778}
779
780static void    *
781get_rx_buf(void)
782{
783	void           *ptr = NULL;
784	mtx_lock_spin(&rge_rx_mtx);
785	if (g_rx_buf_head < RGE_MAX_NUM_DESC) {
786		ptr = (void *)rge_rx_buffers[g_rx_buf_head];
787		g_rx_buf_head++;
788	}
789	mtx_unlock_spin(&rge_rx_mtx);
790	return ptr;
791}
792#endif
793
794static struct mbuf    *
795get_mbuf(void)
796{
797	struct mbuf    *m_new = NULL;
798
799	if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
800		return NULL;
801
802	m_new->m_len = MCLBYTES;
803	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
804	return m_new;
805}
806
807static void
808free_buf(vm_paddr_t paddr)
809{
810	struct mbuf    *m;
811	vm_offset_t	temp;
812
813	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
814	m = (struct mbuf *)temp;
815	if (m != NULL)
816		m_freem(m);
817}
818
819static void *
820get_buf(void)
821{
822#ifdef RX_COPY
823	return get_rx_buf();
824#else
825	struct mbuf    *m_new = NULL;
826#ifdef INVARIANTS
827	vm_paddr_t	temp1, temp2;
828#endif
829	unsigned int *md;
830
831	m_new = get_mbuf();
832
833	if (m_new == NULL)
834		return NULL;
835
836	m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
837	md = (unsigned int *)m_new->m_data;
838	md[0] = (unsigned int)m_new;	/* Back Ptr */
839	md[1] = 0xf00bad;
840	m_adj(m_new, XLR_CACHELINE_SIZE);
841
842
843	/* return (void *)m_new; */
844#ifdef INVARIANTS
845	temp1 = vtophys((vm_offset_t) m_new->m_data);
846	temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
847	if ((temp1 + 1536) != temp2)
848		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
849#endif
850	return (void *)m_new->m_data;
851#endif
852}
853
854/**********************************************************************
855 **********************************************************************/
856static void
857rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
858{
859	uint32_t	regval;
860	int		tx_threshold = 1518;
861
862	if (flag) {
863		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
864		regval |= (1 << O_TX_CONTROL__TxEnable) |
865			(tx_threshold << O_TX_CONTROL__TxThreshold);
866
867		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
868
869		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
870		regval |= 1 << O_RX_CONTROL__RxEnable;
871		if (priv->mode == XLR_PORT0_RGMII)
872			regval |= 1 << O_RX_CONTROL__RGMII;
873		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
874
875		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
876		regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
877		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
878	} else {
879		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
880		regval &= ~((1 << O_TX_CONTROL__TxEnable) |
881			    (tx_threshold << O_TX_CONTROL__TxThreshold));
882
883		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
884
885		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
886		regval &= ~(1 << O_RX_CONTROL__RxEnable);
887		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
888
889		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
890		regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
891		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
892	}
893}
894
895/**********************************************************************
896 **********************************************************************/
897static __inline__ int
898xlr_mac_send_fr(struct driver_data *priv,
899		vm_paddr_t addr, int len)
900{
901	int		stid = priv->rfrbucket;
902	struct msgrng_msg msg;
903	int vcpu = (xlr_cpu_id()<<2)+xlr_thr_id();
904
905        mac_make_desc_rfr(&msg, addr);
906
907	/* Send the packet to MAC */
908	dbg_msg("mac_%d: Sending free packet %llx to stid %d\n",
909	       priv->instance, addr, stid);
910	if (priv->type == XLR_XGMAC) {
911		while (message_send(1, MSGRNG_CODE_XGMAC, stid, &msg));
912	} else {
913		while (message_send(1, MSGRNG_CODE_MAC, stid, &msg));
914		xlr_rge_repl_done[vcpu]++;
915	}
916
917	return 0;
918}
919
920/**************************************************************/
921
922static void
923xgmac_mdio_setup(volatile unsigned int *_mmio)
924{
925	int		i;
926	uint32_t	rd_data;
927	for (i = 0; i < 4; i++) {
928		rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
929		rd_data = rd_data & 0xffffdfff;	/* clear isolate bit */
930		xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
931	}
932}
933
934/**********************************************************************
935 *  Init MII interface
936 *
937 *  Input parameters:
938 *  	   s - priv structure
939 ********************************************************************* */
940#define PHY_STATUS_RETRIES 25000
941
942static void
943rmi_xlr_mac_mii_init(struct driver_data *priv)
944{
945	xlr_reg_t      *mii_mmio = priv->mii_mmio;
946
947	/* use the lowest clock divisor - divisor 28 */
948	xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
949}
950
951/**********************************************************************
952 *  Read a PHY register.
953 *
954 *  Input parameters:
955 *  	   s - priv structure
956 *  	   phyaddr - PHY's address
957 *  	   regidx = index of register to read
958 *
959 *  Return value:
960 *  	   value read, or 0 if an error occurred.
961 ********************************************************************* */
962
963static int
964rge_mii_read_internal(xlr_reg_t *mii_mmio, int phyaddr, int regidx)
965{
966	int		i = 0;
967
968	/* setup the phy reg to be used */
969	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
970		      (phyaddr << 8) | (regidx << 0));
971	/* Issue the read command */
972	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
973		      (1 << O_MII_MGMT_COMMAND__rstat));
974
975	/* poll for the read cycle to complete */
976	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
977		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
978			break;
979	}
980
981	/* clear the read cycle */
982	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
983
984	if (i == PHY_STATUS_RETRIES) {
985		return 0xffffffff;
986	}
987	/* Read the data back */
988	return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
989}
990
991static int
992rge_mii_read(struct device *dev, int phyaddr, int regidx)
993{
994	struct rge_softc *sc = device_get_softc(dev);
995	return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
996}
997
998/**********************************************************************
999 *  Set MII hooks to newly selected media
1000 *
1001 *  Input parameters:
1002 *  	   ifp - Interface Pointer
1003 *
1004 *  Return value:
1005 *  	   nothing
1006 ********************************************************************* */
1007static int
1008rmi_xlr_mac_mediachange(struct ifnet *ifp)
1009{
1010	struct rge_softc *sc = ifp->if_softc;
1011
1012	if (ifp->if_flags & IFF_UP)
1013		mii_mediachg(&sc->rge_mii);
1014
1015	return 0;
1016}
1017
1018/**********************************************************************
1019 *  Get the current interface media status
1020 *
1021 *  Input parameters:
1022 *  	   ifp  - Interface Pointer
1023 *  	   ifmr - Interface media request ptr
1024 *
1025 *  Return value:
1026 *  	   nothing
1027 ********************************************************************* */
1028static void
1029rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1030{
1031	struct rge_softc *sc = ifp->if_softc;
1032
1033	/*Check whether this is interface is active or not.*/
1034	ifmr->ifm_status = IFM_AVALID;
1035	if(sc->link_up){
1036		ifmr->ifm_status |= IFM_ACTIVE;
1037	}else{
1038		ifmr->ifm_active = IFM_ETHER;
1039	}
1040}
1041
1042/**********************************************************************
1043 *  Write a value to a PHY register.
1044 *
1045 *  Input parameters:
1046 *  	   s - priv structure
1047 *  	   phyaddr - PHY to use
1048 *  	   regidx - register within the PHY
1049 *  	   regval - data to write to register
1050 *
1051 *  Return value:
1052 *  	   nothing
1053 ********************************************************************* */
1054static void
1055rge_mii_write_internal(xlr_reg_t *mii_mmio, int phyaddr, int regidx, int regval)
1056{
1057	int		i = 0;
1058
1059	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
1060		      (phyaddr << 8) | (regidx << 0));
1061
1062	/* Write the data which starts the write cycle */
1063	xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
1064
1065	/* poll for the write cycle to complete */
1066	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1067		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
1068			break;
1069	}
1070
1071	return;
1072}
1073
1074static void
1075rge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
1076{
1077	struct rge_softc *sc = device_get_softc(dev);
1078
1079	rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
1080}
1081
1082static void
1083rmi_xlr_mac_mii_statchg(device_t dev)
1084{
1085}
1086
1087static void
1088serdes_regs_init(struct driver_data *priv)
1089{
1090	xlr_reg_t *mmio_gpio = (xlr_reg_t *)(xlr_io_base + XLR_IO_GPIO_OFFSET);
1091	int i;
1092
1093	/* Initialize SERDES CONTROL Registers */
1094	rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
1095	rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
1096	rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
1097	rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
1098	rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
1099	rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
1100	rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
1101	rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
1102	rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
1103	rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
1104	rge_mii_write_internal(priv->serdes_mmio, 26,10, 0x0000);
1105
1106	/*
1107	 * For loop delay and GPIO programming crud from Linux driver,
1108	 */
1109	for(i=0;i<10000000;i++){}
1110	mmio_gpio[0x20] = 0x7e6802;
1111	mmio_gpio[0x10] = 0x7104;
1112	for(i=0;i<100000000;i++){}
1113	return;
1114}
1115
1116static void serdes_autoconfig(struct driver_data *priv)
1117{
1118    int delay = 100000;
1119
1120    /* Enable Auto negotiation in the PCS Layer*/
1121    rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
1122    DELAY(delay);
1123    rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
1124    DELAY(delay);
1125
1126    rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
1127    DELAY(delay);
1128    rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
1129    DELAY(delay);
1130
1131    rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
1132    DELAY(delay);
1133    rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
1134    DELAY(delay);
1135
1136    rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
1137    DELAY(delay);
1138    rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
1139    DELAY(delay);
1140
1141}
1142
1143/*****************************************************************
1144 * Initialize GMAC
1145 *****************************************************************/
1146static void
1147rmi_xlr_config_pde(struct driver_data *priv)
1148{
1149	int		i = 0,	cpu = 0, bucket = 0;
1150	uint64_t	bucket_map = 0;
1151	/* uint32_t desc_pack_ctrl = 0; */
1152	uint32_t	cpumask;
1153
1154	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
1155
1156	for (i = 0; i < 32; i++) {
1157		if (cpumask & (1 << i)) {
1158			cpu = cpu_ltop_map[i];
1159			bucket = ((cpu >> 2) << 3);//| (cpu & 0x03);
1160			bucket_map |= (1ULL << bucket);
1161			dbg_msg("i=%d, cpu=%d, bucket = %d, bucket_map=%llx\n",
1162				i, cpu, bucket, bucket_map);
1163		}
1164	}
1165
1166	/* bucket_map = 0x1; */
1167	xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1168	xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
1169		      ((bucket_map >> 32) & 0xffffffff));
1170
1171	xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1172	xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
1173		      ((bucket_map >> 32) & 0xffffffff));
1174
1175	xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1176	xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
1177		      ((bucket_map >> 32) & 0xffffffff));
1178
1179	xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1180	xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
1181		      ((bucket_map >> 32) & 0xffffffff));
1182}
1183
1184static void
1185rmi_xlr_config_parser(struct driver_data *priv)
1186{
1187	/*
1188	 * Mark it as no classification The parser extract is gauranteed to
1189	 * be zero with no classfication
1190	 */
1191	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1192
1193	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1194
1195	/* configure the parser : L2 Type is configured in the bootloader */
1196	/* extract IP: src, dest protocol */
1197	xlr_write_reg(priv->mmio, R_L3CTABLE,
1198		      (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1199		      (0x0800 << 0));
1200	xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1201		      (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1202
1203}
1204
1205static void
1206rmi_xlr_config_classifier(struct driver_data *priv)
1207{
1208	int		i = 0;
1209
1210	if (priv->type == XLR_XGMAC) {
1211		/* xgmac translation table doesn't have sane values on reset */
1212		for (i = 0; i < 64; i++)
1213			xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1214
1215		/*
1216		 * use upper 7 bits of the parser extract to index the
1217		 * translate table
1218		 */
1219		xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1220	}
1221}
1222
1223enum {
1224        SGMII_SPEED_10   = 0x00000000,
1225        SGMII_SPEED_100  = 0x02000000,
1226        SGMII_SPEED_1000 = 0x04000000,
1227};
1228
1229static void
1230rmi_xlr_gmac_config_speed(struct driver_data *priv)
1231{
1232	int phy_addr = priv->phy_addr;
1233	xlr_reg_t  *mmio = priv->mmio;
1234	struct rge_softc *sc = priv->sc;
1235
1236	priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1237	priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1238	priv->speed = (priv->speed >> 3) & 0x03;
1239
1240	if (priv->speed == xlr_mac_speed_10) {
1241		if (priv->mode != XLR_RGMII)
1242			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1243		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1244		xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1245		printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1246		sc->rge_mii.mii_media.ifm_media =          IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1247		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1248		sc->rge_mii.mii_media_active =             IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1249	} else if (priv->speed == xlr_mac_speed_100) {
1250		if (priv->mode != XLR_RGMII)
1251			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1252		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1253		xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1254		printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1255		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1256		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1257		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1258	} else {
1259		if (priv->speed != xlr_mac_speed_1000) {
1260			if (priv->mode != XLR_RGMII)
1261				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1262			printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1263			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1264			xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1265			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1266			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1267			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1268		} else {
1269			if (priv->mode != XLR_RGMII)
1270				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1271			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7237);
1272			xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1273			printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1274			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1275			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1276			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1277		}
1278	}
1279
1280	if (!priv->link) {
1281		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1282		sc->link_up = 0;
1283	} else {
1284		sc->link_up = 1;
1285	}
1286}
1287
1288/*****************************************************************
1289 * Initialize XGMAC
1290 *****************************************************************/
1291static void
1292rmi_xlr_xgmac_init(struct driver_data *priv)
1293{
1294	int		i = 0;
1295	xlr_reg_t      *mmio = priv->mmio;
1296	int		id = priv->instance;
1297	struct rge_softc *sc = priv->sc;
1298	volatile unsigned short *cpld;
1299
1300	cpld = (volatile unsigned short *)0xBD840000;
1301
1302	xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1303	     (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1304	xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1305	rmi_xlr_config_pde(priv);
1306	rmi_xlr_config_parser(priv);
1307	rmi_xlr_config_classifier(priv);
1308
1309	xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1310
1311	/* configure the XGMAC Registers */
1312	xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1313
1314	/* configure the XGMAC_GLUE Registers */
1315	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1316	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1317	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1318	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1319	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1320	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1321
1322	xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1323	xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1324	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1325	xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1326
1327	/*
1328	 * take XGMII phy out of reset
1329	 */
1330	/*
1331	 * we are pulling everything out of reset because writing a 0 would
1332	 * reset other devices on the chip
1333	 */
1334	cpld[ATX_CPLD_RESET_1] = 0xffff;
1335	cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1336	cpld[ATX_CPLD_RESET_2] = 0xffff;
1337
1338	xgmac_mdio_setup(mmio);
1339
1340	rmi_xlr_config_spill_area(priv);
1341
1342	if (id == 0) {
1343		for (i = 0; i < 16; i++) {
1344			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1345				      bucket_sizes.
1346				      bucket[MSGRNG_STNID_XGS0_TX + i]);
1347		}
1348
1349		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1350			      bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1351		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1352			      bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1353
1354		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1355			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1356				      cc_table_xgs_0.
1357				      counters[i >> 3][i & 0x07]);
1358		}
1359	} else if (id == 1) {
1360		for (i = 0; i < 16; i++) {
1361			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1362				      bucket_sizes.
1363				      bucket[MSGRNG_STNID_XGS1_TX + i]);
1364		}
1365
1366		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1367			      bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1368		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1369			      bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1370
1371		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1372			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1373				      cc_table_xgs_1.
1374				      counters[i >> 3][i & 0x07]);
1375		}
1376	}
1377	sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1378	sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1379	sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1380	sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1381	sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1382
1383	priv->init_frin_desc = 1;
1384}
1385
1386/*******************************************************
1387 * Initialization gmac
1388 *******************************************************/
1389static int
1390rmi_xlr_gmac_reset(struct driver_data *priv)
1391{
1392	volatile uint32_t val;
1393	xlr_reg_t *mmio = priv->mmio;
1394	int i, maxloops = 100;
1395
1396        /* Disable MAC RX */
1397        val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1398        val &= ~0x4;
1399        xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1400
1401        /* Disable Core RX */
1402        val = xlr_read_reg(mmio, R_RX_CONTROL);
1403        val &= ~0x1;
1404        xlr_write_reg(mmio, R_RX_CONTROL, val);
1405
1406        /* wait for rx to halt */
1407        for (i=0; i<maxloops; i++) {
1408            val = xlr_read_reg(mmio, R_RX_CONTROL);
1409            if(val & 0x2)
1410                break;
1411            DELAY(1000);
1412        }
1413	if (i == maxloops)
1414		return -1;
1415
1416        /* Issue a soft reset */
1417        val = xlr_read_reg(mmio, R_RX_CONTROL);
1418        val |= 0x4;
1419        xlr_write_reg(mmio, R_RX_CONTROL, val);
1420
1421        /* wait for reset to complete */
1422        for (i=0; i<maxloops; i++) {
1423            val = xlr_read_reg(mmio, R_RX_CONTROL);
1424            if(val & 0x8)
1425                break;
1426            DELAY(1000);
1427        }
1428	if (i == maxloops)
1429		return -1;
1430
1431        /* Clear the soft reset bit */
1432        val = xlr_read_reg(mmio, R_RX_CONTROL);
1433        val &= ~0x4;
1434        xlr_write_reg(mmio, R_RX_CONTROL, val);
1435	return 0;
1436}
1437
1438static void
1439rmi_xlr_gmac_init(struct driver_data *priv)
1440{
1441	int		i = 0;
1442	xlr_reg_t      *mmio = priv->mmio;
1443	int		id = priv->instance;
1444	struct stn_cc *gmac_cc_config;
1445	uint32_t	value = 0;
1446	int blk = id/4, port = id % 4;
1447
1448	rmi_xlr_mac_set_enable(priv, 0);
1449
1450	rmi_xlr_config_spill_area(priv);
1451
1452	xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1453		      (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1454		      (1 << O_DESC_PACK_CTRL__MaxEntry) |
1455		      (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1456
1457	rmi_xlr_config_pde(priv);
1458	rmi_xlr_config_parser(priv);
1459	rmi_xlr_config_classifier(priv);
1460
1461	xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1462	xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1463	xlr_write_reg(mmio, R_RX_CONTROL, (0x7<<6));
1464
1465	if(priv->mode == XLR_PORT0_RGMII) {
1466		printf("Port 0 set in RGMII mode\n");
1467		value = xlr_read_reg(mmio, R_RX_CONTROL);
1468		value |= 1 << O_RX_CONTROL__RGMII;
1469		xlr_write_reg(mmio, R_RX_CONTROL, value);
1470	}
1471
1472	rmi_xlr_mac_mii_init(priv);
1473
1474
1475#if 0
1476	priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1477		ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1478		ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1479		ADVERTISED_MII;
1480#endif
1481
1482	/*
1483	 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1484	 * set about every 1 sec in GigE mode, ignore it for now...
1485	 */
1486	rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1487
1488	if (priv->mode != XLR_RGMII){
1489		serdes_regs_init(priv);
1490		serdes_autoconfig(priv);
1491	}
1492
1493	rmi_xlr_gmac_config_speed(priv);
1494
1495	value = xlr_read_reg(mmio, R_IPG_IFG);
1496	xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1497	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1498	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1499	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1500	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1501	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1502	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1503	xlr_write_reg(mmio, R_INTMASK, 0);
1504	xlr_write_reg(mmio, R_FREEQCARVE, 0);
1505
1506	xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1507		      xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1508	xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1509		      xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1510	xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1511		     xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1512	xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1513		      xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1514	xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1515		      xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1516
1517	dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1518	       xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1519
1520	gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1521	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1522		xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1523			      gmac_cc_config->counters[i >> 3][i & 0x07]);
1524		dbg_msg("%d: %d -> %d\n", priv->instance,
1525		       R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1526	}
1527	priv->init_frin_desc = 1;
1528}
1529
1530/**********************************************************************
1531 * Set promiscuous mode
1532 **********************************************************************/
1533static void
1534xlr_mac_set_rx_mode(struct rge_softc *sc)
1535{
1536	struct driver_data *priv = &(sc->priv);
1537	uint32_t	regval;
1538
1539	regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1540
1541	if (sc->flags & IFF_PROMISC) {
1542		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1543			(1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1544			(1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1545			(1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1546	} else {
1547		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1548			    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1549	}
1550
1551	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1552}
1553
1554/**********************************************************************
1555 *  Configure LAN speed for the specified MAC.
1556 ********************************************************************* */
1557static int
1558rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1559{
1560	return 0;
1561}
1562
1563/**********************************************************************
1564 *  Set Ethernet duplex and flow control options for this MAC
1565 ********************************************************************* */
1566static int
1567rmi_xlr_mac_set_duplex(struct driver_data *s,
1568		       xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1569{
1570	return 0;
1571}
1572
1573/*****************************************************************
1574 * Kernel Net Stack <-> MAC Driver Interface
1575 *****************************************************************/
1576/**********************************************************************
1577 **********************************************************************/
1578#define MAC_TX_FAIL 2
1579#define MAC_TX_PASS 0
1580#define MAC_TX_RETRY 1
1581
1582static __inline__ void
1583message_send_block(unsigned int size, unsigned int code,
1584		   unsigned int stid, struct msgrng_msg *msg)
1585{
1586	unsigned int	dest = 0;
1587	unsigned long long status = 0;
1588
1589	msgrng_load_tx_msg0(msg->msg0);
1590	msgrng_load_tx_msg1(msg->msg1);
1591	msgrng_load_tx_msg2(msg->msg2);
1592	msgrng_load_tx_msg3(msg->msg3);
1593
1594	dest = ((size - 1) << 16) | (code << 8) | (stid);
1595
1596	do {
1597		msgrng_send(dest);
1598		status = msgrng_read_status();
1599	} while (status & 0x6);
1600
1601}
1602
1603int		xlr_dev_queue_xmit_hack = 0;
1604
1605static int
1606mac_xmit(struct mbuf *m, struct rge_softc *sc,
1607	 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1608{
1609	struct msgrng_msg msg;
1610	int		stid = priv->txbucket;
1611	uint32_t	tx_cycles = 0;
1612	unsigned long	mflags = 0;
1613	int vcpu = PCPU_GET(cpuid);
1614	int rv;
1615
1616	tx_cycles = mips_rd_count();
1617
1618	if (build_frag_list(m, &msg, tx_desc) != 0)
1619		return MAC_TX_FAIL;
1620
1621	else {
1622		msgrng_access_enable(mflags);
1623		if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1624			msg_snd_failed++;
1625			msgrng_access_disable(mflags);
1626			release_tx_desc(&msg, 0);
1627			xlr_rge_msg_snd_failed[vcpu]++;
1628			dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%llx\n",
1629				      vcpu, rv, stid, msg.msg0);
1630			return MAC_TX_FAIL;
1631		}
1632		msgrng_access_disable(mflags);
1633		port_inc_counter(priv->instance, PORT_TX);
1634	}
1635
1636	/* Send the packet to MAC */
1637	dbg_msg("Sent tx packet to stid %d, msg0=%llx, msg1=%llx \n", stid, msg.msg0, msg.msg1);
1638#ifdef DUMP_PACKETS
1639	{
1640		int		i = 0;
1641		unsigned char  *buf = (char *)m->m_data;
1642		printf("Tx Packet: length=%d\n", len);
1643		for (i = 0; i < 64; i++) {
1644			if (i && (i % 16) == 0)
1645				printf("\n");
1646			printf("%02x ", buf[i]);
1647		}
1648		printf("\n");
1649	}
1650#endif
1651	xlr_inc_counter(NETIF_TX);
1652	return MAC_TX_PASS;
1653}
1654
1655static int
1656rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1657{
1658	struct driver_data *priv = &(sc->priv);
1659	int		ret = -ENOSPC;
1660
1661	dbg_msg("IN\n");
1662
1663	xlr_inc_counter(NETIF_STACK_TX);
1664
1665retry:
1666	ret = mac_xmit(m, sc, priv, len, tx_desc);
1667
1668	if (ret == MAC_TX_RETRY)
1669		goto retry;
1670
1671	dbg_msg("OUT, ret = %d\n", ret);
1672	if (ret == MAC_TX_FAIL) {
1673		/* FULL */
1674		dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1675		port_inc_counter(priv->instance, PORT_STOPQ);
1676	}
1677	return ret;
1678}
1679
1680static void
1681mac_frin_replenish(void *args /* ignored */ )
1682{
1683#ifdef RX_COPY
1684	return;
1685#else
1686	int		cpu = xlr_cpu_id();
1687	int		done = 0;
1688	int		i = 0;
1689
1690	xlr_inc_counter(REPLENISH_ENTER);
1691	/*
1692	 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1693	 * atomic_read(frin_to_be_sent));
1694	 */
1695	xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1696
1697	for (;;) {
1698
1699		done = 0;
1700
1701		for (i = 0; i < XLR_MAX_MACS; i++) {
1702			/* int offset = 0; */
1703			unsigned long	msgrng_flags;
1704			void           *m;
1705			uint32_t	cycles;
1706			struct rge_softc *sc;
1707			struct driver_data *priv;
1708			int		frin_to_be_sent;
1709
1710			sc = dev_mac[i];
1711			if (!sc)
1712				goto skip;
1713
1714			priv = &(sc->priv);
1715			frin_to_be_sent = priv->frin_to_be_sent[cpu];
1716
1717			/* if (atomic_read(frin_to_be_sent) < 0) */
1718			if (frin_to_be_sent < 0) {
1719				panic ("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1720					 __FUNCTION__, i,
1721					 frin_to_be_sent);
1722			}
1723			/* if (!atomic_read(frin_to_be_sent)) */
1724			if (!frin_to_be_sent)
1725				goto skip;
1726
1727			cycles = mips_rd_count();
1728			{
1729				m = get_buf();
1730				if (!m) {
1731					device_printf(sc->rge_dev, "No buffer\n");
1732					goto skip;
1733				}
1734			}
1735			xlr_inc_counter(REPLENISH_FRIN);
1736			msgrng_access_enable(msgrng_flags);
1737			if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1738				free_buf(vtophys(m));
1739				printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1740				msgrng_access_disable(msgrng_flags);
1741				break;
1742			}
1743			msgrng_access_disable(msgrng_flags);
1744			xlr_set_counter(REPLENISH_CYCLES,
1745					(read_c0_count() - cycles));
1746			atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1747
1748			continue;
1749	skip:
1750			done++;
1751		}
1752		if (done == XLR_MAX_MACS)
1753			break;
1754	}
1755#endif
1756}
1757
1758static volatile uint32_t g_tx_frm_tx_ok;
1759
1760static void
1761rge_tx_bkp_func(void *arg, int npending)
1762{
1763	int i=0;
1764	for(i=0; i<xlr_board_info.gmacports; i++){
1765		if (!dev_mac[i] || !dev_mac[i]->active)
1766			continue;
1767		rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1768	}
1769	atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1770}
1771
1772/* This function is called from an interrupt handler */
1773void
1774rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1775			    int stid, struct msgrng_msg *msg,
1776			    void *data /* ignored */ )
1777{
1778	uint64_t	phys_addr = 0;
1779	unsigned long	addr = 0;
1780	uint32_t	length = 0;
1781	int		ctrl = 0,	port = 0;
1782	struct rge_softc *sc = NULL;
1783	struct driver_data *priv = 0;
1784	struct ifnet   *ifp;
1785	int		cpu = xlr_cpu_id();
1786	int vcpu=(cpu<<2)+xlr_thr_id();
1787
1788	dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%llx msg1=%llx\n",
1789		 bucket, size, code, stid, msg->msg0, msg->msg1);
1790
1791	phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1792	length = (msg->msg0 >> 40) & 0x3fff;
1793	if (length == 0) {
1794		ctrl = CTRL_REG_FREE;
1795		port = (msg->msg0 >> 54) & 0x0f;
1796		addr = 0;
1797	} else {
1798		ctrl = CTRL_SNGL;
1799		length = length - BYTE_OFFSET - MAC_CRC_LEN;
1800		port = msg->msg0 & 0x0f;
1801		addr = 0;
1802	}
1803
1804	if (xlr_board_info.is_xls) {
1805		if (stid == MSGRNG_STNID_GMAC1)
1806			port += 4;
1807		sc = dev_mac[dev_mac_gmac0 + port];
1808	} else {
1809		if (stid == MSGRNG_STNID_XGS0FR)
1810			sc = dev_mac[dev_mac_xgs0];
1811		else if (stid == MSGRNG_STNID_XGS1FR)
1812			sc = dev_mac[dev_mac_xgs0 + 1];
1813		else
1814			sc = dev_mac[dev_mac_gmac0 + port];
1815	}
1816	if (sc == NULL)
1817		return;
1818	priv = &(sc->priv);
1819
1820	dbg_msg("msg0 = %llx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1821		msg->msg0, stid, port, addr, length, ctrl);
1822
1823	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1824		xlr_rge_tx_ok_done[vcpu]++;
1825		release_tx_desc(msg, 1);
1826		ifp = sc->rge_ifp;
1827		if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
1828			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1829		}
1830		if(atomic_cmpset_int(&g_tx_frm_tx_ok, 0,1))
1831			rge_tx_bkp_func(NULL,0);
1832		xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1833				(read_c0_count() - msgrng_msg_cycles));
1834	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1835		/* Rx Packet */
1836		/* struct mbuf *m = 0; */
1837		/* int logical_cpu = 0; */
1838
1839		dbg_msg("Received packet, port = %d\n", port);
1840
1841		/*
1842		 * if num frins to be sent exceeds threshold, wake up the
1843		 * helper thread
1844		 */
1845		atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1846		if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1847			mac_frin_replenish(NULL);
1848		}
1849
1850		dbg_msg("gmac_%d: rx packet: phys_addr = %llx, length = %x\n",
1851		       priv->instance, phys_addr, length);
1852
1853		mac_stats_add(priv->stats.rx_packets, 1);
1854		mac_stats_add(priv->stats.rx_bytes, length);
1855		xlr_inc_counter(NETIF_RX);
1856		xlr_set_counter(NETIF_RX_CYCLES,
1857				(read_c0_count() - msgrng_msg_cycles));
1858		rge_rx(sc, phys_addr, length);
1859		xlr_rge_rx_done[vcpu]++;
1860	} else {
1861		printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1862	}
1863
1864}
1865
1866/**********************************************************************
1867 **********************************************************************/
1868static int
1869rge_probe(dev)
1870	device_t	dev;
1871{
1872	/* Always return 0 */
1873	return 0;
1874}
1875
1876volatile unsigned long xlr_debug_enabled;
1877struct callout	rge_dbg_count;
1878static void xlr_debug_count(void *addr)
1879{
1880	struct driver_data *priv = &dev_mac[0]->priv;
1881	/*uint32_t crdt;*/
1882	if(xlr_debug_enabled){
1883		printf("\nAvailRxIn %#x\n",xlr_read_reg(priv->mmio,0x23e));
1884	}
1885	callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1886}
1887
1888
1889static void xlr_tx_q_wakeup(void *addr)
1890{
1891	int i=0;
1892	int j=0;
1893	for(i=0; i<xlr_board_info.gmacports; i++){
1894		if (!dev_mac[i] || !dev_mac[i]->active)
1895			continue;
1896		if((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE){
1897			for(j=0; j<XLR_MAX_CORE; j++){
1898				if(xlr_tot_avail_p2d[j]) {
1899					dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1900					break;
1901				}
1902			}
1903		}
1904	}
1905	callout_reset(&xlr_tx_stop_bkp, 5*hz, xlr_tx_q_wakeup, NULL);
1906}
1907
1908static int
1909rge_attach(dev)
1910	device_t	dev;
1911{
1912	struct ifnet   *ifp;
1913	struct rge_softc *sc;
1914	struct driver_data *priv = 0;
1915	int		ret = 0;
1916	struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1917
1918	sc = device_get_softc(dev);
1919	sc->rge_dev = dev;
1920
1921	/* Initialize mac's */
1922	sc->unit = device_get_unit(dev);
1923
1924	if (sc->unit > XLR_MAX_MACS) {
1925		ret = ENXIO;
1926		goto out;
1927	}
1928	RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1929
1930	priv = &(sc->priv);
1931	priv->sc = sc;
1932
1933	sc->flags = 0;		/* TODO : fix me up later */
1934
1935	priv->id = sc->unit;
1936	if (gmac_conf->type == XLR_GMAC) {
1937		priv->instance = priv->id;
1938		priv->mmio = (xlr_reg_t *)(xlr_io_base + gmac_conf->baseaddr +
1939					   0x1000 * (sc->unit % 4));
1940		if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1941			goto out;
1942	} else if (gmac_conf->type == XLR_XGMAC) {
1943		priv->instance = priv->id - xlr_board_info.gmacports;
1944		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1945	}
1946
1947	if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI) {
1948		dbg_msg("Arizona board - offset 4 \n");
1949		priv->mii_mmio = (xlr_reg_t *)(xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1950	} else
1951		priv->mii_mmio = (xlr_reg_t *)(xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1952
1953	priv->pcs_mmio =  (xlr_reg_t *)(xlr_io_base + gmac_conf->baseaddr);
1954	priv->serdes_mmio = (xlr_reg_t *)(xlr_io_base +  XLR_IO_GMAC_0_OFFSET);
1955
1956	sc->base_addr = (unsigned long) priv->mmio;
1957	sc->mem_end = (unsigned long) priv->mmio + XLR_IO_SIZE - 1;
1958
1959	sc->xmit = rge_start;
1960	sc->stop = rge_stop;
1961	sc->get_stats = rmi_xlr_mac_get_stats;
1962	sc->ioctl = rge_ioctl;
1963
1964	/* Initialize the device specific driver data */
1965	mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1966
1967	priv->type = gmac_conf->type;
1968
1969	priv->mode = gmac_conf->mode;
1970	if (xlr_board_info.is_xls == 0) {
1971		if (xlr_board_atx_ii() && !xlr_board_atx_ii_b())
1972			priv->phy_addr = priv->instance - 2;
1973		else
1974			priv->phy_addr = priv->instance;
1975		priv->mode = XLR_RGMII;
1976        } else {
1977		if (gmac_conf->mode == XLR_PORT0_RGMII &&
1978		                             priv->instance == 0) {
1979			priv->mode =  XLR_PORT0_RGMII;
1980			priv->phy_addr = 0;
1981		} else  {
1982			priv->mode =  XLR_SGMII;
1983			priv->phy_addr = priv->instance + 16;
1984		}
1985	}
1986
1987	priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1988	priv->rfrbucket =  gmac_conf->station_rfr;
1989	priv->spill_configured = 0;
1990
1991	dbg_msg("priv->mmio=%p\n", priv->mmio);
1992
1993	/* Set up ifnet structure */
1994	ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1995	if (ifp == NULL) {
1996		device_printf(sc->rge_dev, "failed to if_alloc()\n");
1997		rge_release_resources(sc);
1998		ret = ENXIO;
1999		RGE_LOCK_DESTROY(sc);
2000		goto out;
2001	}
2002	ifp->if_softc = sc;
2003	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2004	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2005	ifp->if_ioctl = rge_ioctl;
2006	ifp->if_start = rge_start;
2007	ifp->if_watchdog = rge_watchdog;
2008	ifp->if_init = rge_init;
2009	ifp->if_mtu = ETHERMTU;
2010	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
2011	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2012	IFQ_SET_READY(&ifp->if_snd);
2013	sc->active = 1;
2014	ifp->if_hwassist = 0;
2015	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
2016	ifp->if_capenable = ifp->if_capabilities;
2017
2018	/* Initialize the rge_softc */
2019	sc->irq = gmac_conf->baseirq + priv->instance % 4;
2020	sc->rge_irq.r_flags = (u_int) sc->irq;	/* We will use r_flags for
2021						 * storing irq which
2022						 * iodi_setup_intr can check */
2023
2024	ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
2025			     rge_intr, sc, &sc->rge_intrhand);
2026
2027	if (ret) {
2028		rge_detach(dev);
2029		device_printf(sc->rge_dev, "couldn't set up irq\n");
2030		RGE_LOCK_DESTROY(sc);
2031		goto out;
2032	}
2033	xlr_mac_get_hwaddr(sc);
2034	xlr_mac_setup_hwaddr(priv);
2035
2036	dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
2037	       (u_long) priv->mmio, (u_long) priv->mii_mmio, (u_long) priv->pcs_mmio,
2038	       (u_long) sc->base_addr, priv->phy_addr, sc->irq);
2039	dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
2040	       (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
2041
2042	/*
2043	 * Set up ifmedia support.
2044	 */
2045	/*
2046	 * Initialize MII/media info.
2047	 */
2048	sc->rge_mii.mii_ifp = ifp;
2049	sc->rge_mii.mii_readreg = rge_mii_read;
2050	sc->rge_mii.mii_writereg = rge_mii_write;
2051	sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
2052	ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
2053		     rmi_xlr_mac_mediastatus);
2054	ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2055	ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
2056	sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
2057
2058	/*
2059	 * Call MI attach routine.
2060	 */
2061	ether_ifattach(ifp, sc->dev_addr);
2062
2063	if (priv->type == XLR_GMAC) {
2064		rmi_xlr_gmac_init(priv);
2065	} else if (priv->type == XLR_XGMAC) {
2066		rmi_xlr_xgmac_init(priv);
2067	}
2068	dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
2069		sc->unit, priv->mmio, sc->mtu);
2070	dev_mac[sc->unit] = sc;
2071	if (priv->type == XLR_XGMAC && priv->instance == 0)
2072		dev_mac_xgs0 = sc->unit;
2073	if (priv->type == XLR_GMAC && priv->instance == 0)
2074		dev_mac_gmac0 = sc->unit;
2075
2076	if (!gmac_common_init_done){
2077		mac_common_init();
2078		gmac_common_init_done = 1;
2079		callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
2080		callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
2081		callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
2082//		callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
2083	}
2084	if ((ret = rmi_xlr_mac_open(sc)) == -1) {
2085		RGE_LOCK_DESTROY(sc);
2086		goto out;
2087	}
2088
2089out:
2090	if (ret < 0) {
2091		device_printf(dev, "error - skipping\n");
2092	}
2093	return ret;
2094}
2095
2096static void
2097rge_reset(struct rge_softc *sc)
2098{
2099}
2100
2101static int
2102rge_detach(dev)
2103	device_t	dev;
2104{
2105#ifdef FREEBSD_MAC_NOT_YET
2106	struct rge_softc *sc;
2107	struct ifnet   *ifp;
2108
2109	sc = device_get_softc(dev);
2110	ifp = sc->rge_ifp;
2111
2112	RGE_LOCK(sc);
2113	rge_stop(sc);
2114	rge_reset(sc);
2115	RGE_UNLOCK(sc);
2116
2117	ether_ifdetach(ifp);
2118
2119	if (sc->rge_tbi) {
2120		ifmedia_removeall(&sc->rge_ifmedia);
2121	} else {
2122		bus_generic_detach(dev);
2123		device_delete_child(dev, sc->rge_miibus);
2124	}
2125
2126	rge_release_resources(sc);
2127
2128#endif				/* FREEBSD_MAC_NOT_YET */
2129	return (0);
2130}
2131static int
2132rge_suspend(device_t dev)
2133{
2134	struct rge_softc *sc;
2135
2136	sc = device_get_softc(dev);
2137	RGE_LOCK(sc);
2138	rge_stop(sc);
2139	RGE_UNLOCK(sc);
2140
2141	return 0;
2142}
2143
2144static int
2145rge_resume(device_t dev)
2146{
2147	panic("rge_resume(): unimplemented\n");
2148	return 0;
2149}
2150
2151static void
2152rge_release_resources(struct rge_softc *sc)
2153{
2154
2155	if (sc->rge_ifp != NULL)
2156		if_free(sc->rge_ifp);
2157
2158	if (mtx_initialized(&sc->rge_mtx))	/* XXX */
2159		RGE_LOCK_DESTROY(sc);
2160}
2161uint32_t	gmac_rx_fail[32];
2162uint32_t	gmac_rx_pass[32];
2163
2164#ifdef RX_COPY
2165static void
2166rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2167{
2168	/*
2169	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2170	 * XLR_CACHELINE_SIZE);
2171	 */
2172	struct mbuf    *m;
2173	void           *ptr;
2174	vm_offset_t	temp;
2175	struct ifnet   *ifp = sc->rge_ifp;
2176	unsigned long	msgrng_flags;
2177	int		cpu = PCPU_GET(cpuid);
2178
2179
2180	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
2181	ptr = (void *)(temp + XLR_CACHELINE_SIZE);
2182	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2183	if (m != NULL) {
2184		m->m_len = m->m_pkthdr.len = MCLBYTES;
2185		m_copyback(m, 0, len + BYTE_OFFSET, ptr);
2186		/* align the data */
2187		m->m_data += BYTE_OFFSET;
2188		m->m_pkthdr.len = m->m_len = len;
2189		m->m_pkthdr.rcvif = ifp;
2190		gmac_rx_pass[cpu]++;
2191	} else {
2192		gmac_rx_fail[cpu]++;
2193	}
2194	msgrng_access_enable(msgrng_flags);
2195	xlr_mac_send_fr(&sc->priv, paddr, MAX_FRAME_SIZE);
2196	msgrng_access_disable(msgrng_flags);
2197
2198#ifdef DUMP_PACKETS
2199	{
2200		int		i = 0;
2201		unsigned char  *buf = (char *)m->m_data;
2202		printf("Rx Packet: length=%d\n", len);
2203		for (i = 0; i < 64; i++) {
2204			if (i && (i % 16) == 0)
2205				printf("\n");
2206			printf("%02x ", buf[i]);
2207		}
2208		printf("\n");
2209	}
2210#endif
2211
2212
2213	if (m) {
2214		ifp->if_ipackets++;
2215		(*ifp->if_input) (ifp, m);
2216	}
2217}
2218#else
2219static void
2220rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2221{
2222	/*
2223	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2224	 * XLR_CACHELINE_SIZE);
2225	 */
2226	struct mbuf    *m;
2227	vm_offset_t	temp;
2228	unsigned int   mag;
2229	struct ifnet   *ifp = sc->rge_ifp;
2230
2231	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
2232	mag = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE+4), 3);
2233
2234	m = (struct mbuf *)temp;
2235
2236	if (mag != 0xf00bad) {
2237		/* somebody else packet Error - FIXME in intialization */
2238		printf("cpu %d: *ERROR* Not my packet paddr %llx\n", xlr_cpu_id(), paddr);
2239		return;
2240	}
2241
2242	/* align the data */
2243	m->m_data += BYTE_OFFSET;
2244	m->m_pkthdr.len = m->m_len = len;
2245	m->m_pkthdr.rcvif = ifp;
2246
2247#ifdef DUMP_PACKETS
2248	{
2249		int		i = 0;
2250		unsigned char  *buf = (char *)m->m_data;
2251		printf("Rx Packet: length=%d\n", len);
2252		for (i = 0; i < 64; i++) {
2253			if (i && (i % 16) == 0)
2254				printf("\n");
2255			printf("%02x ", buf[i]);
2256		}
2257		printf("\n");
2258	}
2259#endif
2260	ifp->if_ipackets++;
2261	(*ifp->if_input) (ifp, m);
2262}
2263#endif
2264
2265static void
2266rge_intr(void *arg)
2267{
2268	struct rge_softc *sc = (struct rge_softc *)arg;
2269	struct driver_data *priv = &(sc->priv);
2270	xlr_reg_t      *mmio = priv->mmio;
2271	uint32_t	intreg = xlr_read_reg(mmio, R_INTREG);
2272
2273	if (intreg & (1 << O_INTREG__MDInt)) {
2274		uint32_t	phy_int_status = 0;
2275		int		i = 0;
2276
2277		for (i = 0; i < XLR_MAX_MACS; i++) {
2278			struct rge_softc *phy_dev = 0;
2279			struct driver_data *phy_priv = 0;
2280
2281			phy_dev = dev_mac[i];
2282			if (phy_dev == NULL)
2283				continue;
2284
2285			phy_priv = &phy_dev->priv;
2286
2287			if (phy_priv->type == XLR_XGMAC)
2288				continue;
2289
2290			phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2291							       phy_priv->phy_addr, 26);
2292			printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2293			       (int) phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, 	phy_int_status);
2294			rmi_xlr_gmac_config_speed(phy_priv);
2295		}
2296	} else {
2297		printf("[%s]: mac type = %d, instance %d error "
2298		       "interrupt: INTREG = 0x%08x\n",
2299		       __FUNCTION__, priv->type, priv->instance, intreg);
2300	}
2301
2302	/* clear all interrupts and hope to make progress */
2303	xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2304
2305	/* on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2306	if ((xlr_revision_b0()) && (priv->type == XLR_XGMAC)) {
2307		struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2308		struct driver_data *xgs0_priv = &xgs0_dev->priv;
2309		xlr_reg_t      *xgs0_mmio = xgs0_priv->mmio;
2310		uint32_t	xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2311
2312		if (xgs0_intreg) {
2313			printf("[%s]: mac type = %d, instance %d error "
2314			       "interrupt: INTREG = 0x%08x\n",
2315			       __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2316
2317			xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2318		}
2319	}
2320}
2321
2322static void
2323rge_start_locked(struct ifnet *ifp, int threshold)
2324{
2325	struct rge_softc *sc = ifp->if_softc;
2326	struct mbuf    *m = NULL;
2327	int		prepend_pkt = 0;
2328	int i=0;
2329	struct p2d_tx_desc *tx_desc=NULL;
2330	int cpu = xlr_cpu_id();
2331	uint32_t vcpu = (cpu<<2)+xlr_thr_id();
2332
2333	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2334		return;
2335
2336	for (i=0; i<xlr_tot_avail_p2d[cpu]; i++) {
2337		if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2338			return;
2339		tx_desc = get_p2d_desc();
2340		if(!tx_desc){
2341			xlr_rge_get_p2d_failed[vcpu]++;
2342			return;
2343		}
2344		/* Grab a packet off the queue. */
2345		IFQ_DEQUEUE(&ifp->if_snd, m);
2346		if (m == NULL){
2347			free_p2d_desc(tx_desc);
2348			return;
2349		}
2350		prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2351
2352		if (prepend_pkt) {
2353			xlr_rge_tx_prepend[vcpu]++;
2354			IF_PREPEND(&ifp->if_snd, m);
2355			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2356			return;
2357		} else {
2358			ifp->if_opackets++;
2359			xlr_rge_tx_done[vcpu]++;
2360		}
2361	}
2362}
2363
2364static void
2365rge_start(struct ifnet *ifp)
2366{
2367	rge_start_locked(ifp, RGE_TX_Q_SIZE);
2368}
2369
2370static int
2371rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2372{
2373	struct rge_softc *sc = ifp->if_softc;
2374	struct ifreq   *ifr = (struct ifreq *)data;
2375	int		mask      , error = 0;
2376	/* struct mii_data *mii; */
2377	switch (command) {
2378	case SIOCSIFMTU:
2379		ifp->if_mtu = ifr->ifr_mtu;
2380		error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2381		break;
2382	case SIOCSIFFLAGS:
2383
2384		RGE_LOCK(sc);
2385		if (ifp->if_flags & IFF_UP) {
2386			/*
2387			 * If only the state of the PROMISC flag changed,
2388			 * then just use the 'set promisc mode' command
2389			 * instead of reinitializing the entire NIC. Doing a
2390			 * full re-init means reloading the firmware and
2391			 * waiting for it to start up, which may take a
2392			 * second or two.  Similarly for ALLMULTI.
2393			 */
2394			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2395			    ifp->if_flags & IFF_PROMISC &&
2396			    !(sc->flags & IFF_PROMISC)) {
2397				sc->flags |= IFF_PROMISC;
2398				xlr_mac_set_rx_mode(sc);
2399			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2400				   !(ifp->if_flags & IFF_PROMISC) &&
2401				   sc->flags & IFF_PROMISC) {
2402				sc->flags &= IFF_PROMISC;
2403				xlr_mac_set_rx_mode(sc);
2404			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2405			       (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2406				rmi_xlr_mac_set_multicast_list(sc);
2407			} else
2408				xlr_mac_set_rx_mode(sc);
2409		} else {
2410			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2411				xlr_mac_set_rx_mode(sc);
2412			}
2413		}
2414		sc->flags = ifp->if_flags;
2415		RGE_UNLOCK(sc);
2416		error = 0;
2417		break;
2418	case SIOCADDMULTI:
2419	case SIOCDELMULTI:
2420		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2421			RGE_LOCK(sc);
2422			rmi_xlr_mac_set_multicast_list(sc);
2423			RGE_UNLOCK(sc);
2424			error = 0;
2425		}
2426		break;
2427	case SIOCSIFMEDIA:
2428	case SIOCGIFMEDIA:
2429		error = ifmedia_ioctl(ifp, ifr,
2430				      &sc->rge_mii.mii_media, command);
2431		break;
2432	case SIOCSIFCAP:
2433		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2434		ifp->if_hwassist = 0;
2435		break;
2436	default:
2437		error = ether_ioctl(ifp, command, data);
2438		break;
2439	}
2440
2441	return (error);
2442}
2443
2444static void
2445rge_init(void *addr)
2446{
2447	struct rge_softc *sc = (struct rge_softc *)addr;
2448	struct ifnet   *ifp;
2449	struct driver_data *priv = &(sc->priv);
2450
2451	ifp = sc->rge_ifp;
2452
2453	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2454		return;
2455	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2456	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2457
2458	rmi_xlr_mac_set_enable(priv, 1);
2459}
2460
2461static void
2462rge_stop(struct rge_softc *sc)
2463{
2464	rmi_xlr_mac_close(sc);
2465}
2466
2467static void
2468rge_watchdog(struct ifnet *sc)
2469{
2470}
2471
2472static void
2473rge_shutdown(device_t dev)
2474{
2475	struct rge_softc *sc;
2476	sc = device_get_softc(dev);
2477
2478	RGE_LOCK(sc);
2479	rge_stop(sc);
2480	rge_reset(sc);
2481	RGE_UNLOCK(sc);
2482
2483	return;
2484}
2485
2486static int
2487rmi_xlr_mac_open(struct rge_softc *sc)
2488{
2489	struct driver_data *priv = &(sc->priv);
2490	int i;
2491
2492	dbg_msg("IN\n");
2493
2494	if (rmi_xlr_mac_fill_rxfr(sc)) {
2495		return -1;
2496	}
2497
2498	mtx_lock_spin(&priv->lock);
2499
2500	xlr_mac_set_rx_mode(sc);
2501
2502	if (sc->unit == xlr_board_info.gmacports - 1) {
2503		printf("Enabling MDIO interrupts\n");
2504		struct rge_softc *tmp = NULL;
2505		for (i = 0; i < xlr_board_info.gmacports; i++) {
2506			tmp = dev_mac[i];
2507			if (tmp)
2508				xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2509					      ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2510		}
2511	}
2512
2513	/*
2514	 * Configure the speed, duplex, and flow control
2515	 */
2516	rmi_xlr_mac_set_speed(priv, priv->speed);
2517	rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2518	rmi_xlr_mac_set_enable(priv, 0);
2519
2520	mtx_unlock_spin(&priv->lock);
2521
2522	for (i = 0; i < 8; i++) {
2523		atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2524	}
2525
2526	return 0;
2527}
2528
2529/**********************************************************************
2530 **********************************************************************/
2531static int
2532rmi_xlr_mac_close(struct rge_softc *sc)
2533{
2534	struct driver_data *priv = &(sc->priv);
2535
2536	mtx_lock_spin(&priv->lock);
2537
2538	/*
2539	 * There may have left over mbufs in the ring as well as in free in
2540	 * they will be reused next time open is called
2541	 */
2542
2543	rmi_xlr_mac_set_enable(priv, 0);
2544
2545	xlr_inc_counter(NETIF_STOP_Q);
2546	port_inc_counter(priv->instance, PORT_STOPQ);
2547
2548	mtx_unlock_spin(&priv->lock);
2549
2550	return 0;
2551}
2552
2553/**********************************************************************
2554 **********************************************************************/
2555static struct rge_softc_stats *
2556rmi_xlr_mac_get_stats(struct rge_softc *sc)
2557{
2558	struct driver_data *priv = &(sc->priv);
2559	/* unsigned long flags; */
2560
2561	mtx_lock_spin(&priv->lock);
2562
2563	/* XXX update other stats here */
2564
2565	mtx_unlock_spin(&priv->lock);
2566
2567	return &priv->stats;
2568}
2569
2570/**********************************************************************
2571 **********************************************************************/
2572static void
2573rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2574{
2575}
2576
2577/**********************************************************************
2578 **********************************************************************/
2579static int
2580rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2581{
2582	struct driver_data *priv = &(sc->priv);
2583
2584	if ((new_mtu > 9500) || (new_mtu < 64)) {
2585		return -EINVAL;
2586	}
2587	mtx_lock_spin(&priv->lock);
2588
2589	sc->mtu = new_mtu;
2590
2591	/* Disable MAC TX/RX */
2592	rmi_xlr_mac_set_enable(priv, 0);
2593
2594	/* Flush RX FR IN */
2595	/* Flush TX IN */
2596	rmi_xlr_mac_set_enable(priv, 1);
2597
2598	mtx_unlock_spin(&priv->lock);
2599	return 0;
2600}
2601
2602/**********************************************************************
2603 **********************************************************************/
2604static int
2605rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2606{
2607	struct driver_data *priv = &(sc->priv);
2608	unsigned long	msgrng_flags;
2609	int		i;
2610	int		ret = 0;
2611	void           *ptr;
2612
2613	dbg_msg("\n");
2614	if (!priv->init_frin_desc)
2615		return ret;
2616	priv->init_frin_desc = 0;
2617
2618	dbg_msg("\n");
2619	for (i = 0; i < MAX_NUM_DESC; i++) {
2620		ptr = get_buf();
2621		if (!ptr) {
2622			ret = -ENOMEM;
2623			break;
2624		}
2625
2626		/* Send the free Rx desc to the MAC */
2627		msgrng_access_enable(msgrng_flags);
2628		xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2629		msgrng_access_disable(msgrng_flags);
2630	}
2631
2632	return ret;
2633}
2634
2635/**********************************************************************
2636 **********************************************************************/
2637static __inline__ void *
2638rmi_xlr_config_spill(xlr_reg_t * mmio,
2639		     int reg_start_0, int reg_start_1,
2640		     int reg_size, int size)
2641{
2642	uint32_t	spill_size = size;
2643	void           *spill = NULL;
2644	uint64_t	phys_addr = 0;
2645
2646
2647	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2648		   M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2649	if (!spill || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
2650		panic("Unable to allocate memory for spill area!\n");
2651	}
2652	phys_addr = vtophys(spill);
2653	dbg_msg("Allocate spill %d bytes at %llx\n", size, phys_addr);
2654	xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2655	xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2656	xlr_write_reg(mmio, reg_size, spill_size);
2657
2658	return spill;
2659}
2660
2661static void
2662rmi_xlr_config_spill_area(struct driver_data *priv)
2663{
2664	/*
2665	 * if driver initialization is done parallely on multiple cpus
2666	 * spill_configured needs synchronization
2667	 */
2668	if (priv->spill_configured)
2669		return;
2670
2671	if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2672		priv->spill_configured = 1;
2673		return;
2674	}
2675
2676	priv->spill_configured = 1;
2677
2678	priv->frin_spill =
2679		rmi_xlr_config_spill(priv->mmio,
2680				     R_REG_FRIN_SPILL_MEM_START_0,
2681				     R_REG_FRIN_SPILL_MEM_START_1,
2682				     R_REG_FRIN_SPILL_MEM_SIZE,
2683				     MAX_FRIN_SPILL *
2684				     sizeof(struct fr_desc));
2685
2686	priv->class_0_spill =
2687		rmi_xlr_config_spill(priv->mmio,
2688				     R_CLASS0_SPILL_MEM_START_0,
2689				     R_CLASS0_SPILL_MEM_START_1,
2690				     R_CLASS0_SPILL_MEM_SIZE,
2691				     MAX_CLASS_0_SPILL *
2692				     sizeof(union rx_tx_desc));
2693	priv->class_1_spill =
2694		rmi_xlr_config_spill(priv->mmio,
2695				     R_CLASS1_SPILL_MEM_START_0,
2696				     R_CLASS1_SPILL_MEM_START_1,
2697				     R_CLASS1_SPILL_MEM_SIZE,
2698				     MAX_CLASS_1_SPILL *
2699				     sizeof(union rx_tx_desc));
2700
2701	priv->frout_spill =
2702		rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2703				     R_FROUT_SPILL_MEM_START_1,
2704				     R_FROUT_SPILL_MEM_SIZE,
2705				     MAX_FROUT_SPILL *
2706				     sizeof(struct fr_desc));
2707
2708	priv->class_2_spill =
2709		rmi_xlr_config_spill(priv->mmio,
2710				     R_CLASS2_SPILL_MEM_START_0,
2711				     R_CLASS2_SPILL_MEM_START_1,
2712				     R_CLASS2_SPILL_MEM_SIZE,
2713				     MAX_CLASS_2_SPILL *
2714				     sizeof(union rx_tx_desc));
2715	priv->class_3_spill =
2716		rmi_xlr_config_spill(priv->mmio,
2717				     R_CLASS3_SPILL_MEM_START_0,
2718				     R_CLASS3_SPILL_MEM_START_1,
2719				     R_CLASS3_SPILL_MEM_SIZE,
2720				     MAX_CLASS_3_SPILL *
2721				     sizeof(union rx_tx_desc));
2722	priv->spill_configured = 1;
2723}
2724
2725/*****************************************************************
2726 * Write the MAC address to the XLR registers
2727 * All 4 addresses are the same for now
2728 *****************************************************************/
2729static void
2730xlr_mac_setup_hwaddr(struct driver_data *priv)
2731{
2732	struct rge_softc *sc = priv->sc;
2733
2734	xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2735		      ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2736		       | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2737		);
2738
2739	xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2740		      ((sc->dev_addr[1] << 24) | (sc->
2741						  dev_addr[0] << 16)));
2742
2743	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2744
2745	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2746
2747	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2748
2749	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2750
2751	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2752		      (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2753		      (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2754		      (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2755		);
2756}
2757
2758/*****************************************************************
2759 * Read the MAC address from the XLR registers
2760 * All 4 addresses are the same for now
2761 *****************************************************************/
2762static void
2763xlr_mac_get_hwaddr(struct rge_softc *sc)
2764{
2765	struct driver_data *priv = &(sc->priv);
2766
2767	sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2768	sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2769	sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2770	sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2771	sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2772	sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2773}
2774
2775/*****************************************************************
2776 * Mac Module Initialization
2777 *****************************************************************/
2778static void
2779mac_common_init(void)
2780{
2781	init_p2d_allocation();
2782	init_tx_ring();
2783#ifdef RX_COPY
2784	init_rx_buf();
2785#endif
2786
2787	if (xlr_board_info.is_xls) {
2788		if (register_msgring_handler (TX_STN_GMAC0,
2789					      rmi_xlr_mac_msgring_handler, NULL)) {
2790			panic("Couldn't register msgring handler\n");
2791		}
2792		if (register_msgring_handler (TX_STN_GMAC1,
2793					      rmi_xlr_mac_msgring_handler, NULL)) {
2794			panic("Couldn't register msgring handler\n");
2795		}
2796	} else {
2797		if (register_msgring_handler (TX_STN_GMAC,
2798					      rmi_xlr_mac_msgring_handler, NULL)) {
2799			panic("Couldn't register msgring handler\n");
2800		}
2801	}
2802
2803#if notyet
2804	if (xlr_board_atx_ii()) {
2805		if (register_msgring_handler
2806		    (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2807			panic("Couldn't register msgring handler for TX_STN_XGS_0\n");
2808		}
2809		if (register_msgring_handler
2810		    (TX_STN_XGS_1, rmi_xlr_mac_msgring_handler, NULL)) {
2811			panic("Couldn't register msgring handler for TX_STN_XGS_1\n");
2812		}
2813	}
2814#endif
2815}
2816