1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD$");
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h>	/* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
105#include <mips/rmi/pic.h>
106#include <mips/rmi/board.h>
107#include <mips/rmi/rmi_mips_exts.h>
108#include <mips/rmi/rmi_boot_info.h>
109#include <mips/rmi/dev/xlr/atx_cpld.h>
110#include <mips/rmi/dev/xlr/xgmac_mdio.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114#include "miidevs.h"
115#include <dev/mii/brgphyreg.h>
116#include "miibus_if.h"
117
118#include <mips/rmi/dev/nlge/if_nlge.h>
119
120MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121MODULE_DEPEND(nlge, ether, 1, 1, 1);
122MODULE_DEPEND(nlge, miibus, 1, 1, 1);
123
124/* Network accelarator entry points */
125static int      nlna_probe(device_t);
126static int      nlna_attach(device_t);
127static int      nlna_detach(device_t);
128static int      nlna_suspend(device_t);
129static int      nlna_resume(device_t);
130static int 	nlna_shutdown(device_t);
131
132/* GMAC port entry points */
133static int	nlge_probe(device_t);
134static int	nlge_attach(device_t);
135static int	nlge_detach(device_t);
136static int	nlge_suspend(device_t);
137static int	nlge_resume(device_t);
138static void	nlge_init(void *);
139static int	nlge_ioctl(struct ifnet *, u_long, caddr_t);
140static int	nlge_tx(struct ifnet *ifp, struct mbuf *m);
141static void 	nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
142
143static int	nlge_mii_write(struct device *, int, int, int);
144static int	nlge_mii_read(struct device *, int, int);
145static void	nlge_mac_mii_statchg(device_t);
146static int	nlge_mediachange(struct ifnet *ifp);
147static void	nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
148
149/* Other internal/helper functions */
150static void 	*get_buf(void);
151
152static void	nlna_add_to_port_set(struct nlge_port_set *pset,
153    struct nlge_softc *sc);
154static void	nlna_config_pde(struct nlna_softc *);
155static void	nlna_config_parser(struct nlna_softc *);
156static void	nlna_config_classifier(struct nlna_softc *);
157static void	nlna_config_fifo_spill_area(struct nlna_softc *sc);
158static void	nlna_config_translate_table(struct nlna_softc *sc);
159static void	nlna_config_common(struct nlna_softc *);
160static void	nlna_disable_ports(struct nlna_softc *sc);
161static void	nlna_enable_intr(struct nlna_softc *sc);
162static void	nlna_disable_intr(struct nlna_softc *sc);
163static void	nlna_enable_ports(struct nlna_softc *sc);
164static void	nlna_get_all_softc(device_t iodi_dev,
165    struct nlna_softc **sc_vec, uint32_t vec_sz);
166static void 	nlna_hw_init(struct nlna_softc *sc);
167static int 	nlna_is_last_active_na(struct nlna_softc *sc);
168static void	nlna_media_specific_config(struct nlna_softc *sc);
169static void 	nlna_reset_ports(struct nlna_softc *sc,
170    struct xlr_gmac_block_t *blk);
171static struct nlna_softc *nlna_sc_init(device_t dev,
172    struct xlr_gmac_block_t *blk);
173static void	nlna_setup_intr(struct nlna_softc *sc);
174static void	nlna_smp_update_pde(void *dummy __unused);
175static void	nlna_submit_rx_free_desc(struct nlna_softc *sc,
176    uint32_t n_desc);
177
178static int	nlge_gmac_config_speed(struct nlge_softc *, int quick);
179static void	nlge_hw_init(struct nlge_softc *sc);
180static int	nlge_if_init(struct nlge_softc *sc);
181static void	nlge_intr(void *arg);
182static int	nlge_irq_init(struct nlge_softc *sc);
183static void	nlge_irq_fini(struct nlge_softc *sc);
184static void	nlge_media_specific_init(struct nlge_softc *sc);
185static void	nlge_mii_init(device_t dev, struct nlge_softc *sc);
186static int	nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
187    int regidx);
188static void 	nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
189    int regidx, int regval);
190void 		nlge_msgring_handler(int bucket, int size, int code,
191    int stid, struct msgrng_msg *msg, void *data);
192static void 	nlge_port_disable(struct nlge_softc *sc);
193static void 	nlge_port_enable(struct nlge_softc *sc);
194static void 	nlge_read_mac_addr(struct nlge_softc *sc);
195static void	nlge_sc_init(struct nlge_softc *sc, device_t dev,
196    struct xlr_gmac_port *port_info);
197static void 	nlge_set_mac_addr(struct nlge_softc *sc);
198static void	nlge_set_port_attribs(struct nlge_softc *,
199    struct xlr_gmac_port *);
200static void	nlge_mac_set_rx_mode(struct nlge_softc *sc);
201static void 	nlge_sgmii_init(struct nlge_softc *sc);
202static int 	nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc,
203    struct mbuf *m);
204
205static int	prepare_fmn_message(struct nlge_softc *sc,
206    struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
207    uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
208
209static void 	release_tx_desc(vm_paddr_t phy_addr);
210static int	send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
211    uint32_t n_entries);
212
213//#define DEBUG
214#ifdef DEBUG
215static int	mac_debug = 1;
216#undef PDEBUG
217#define PDEBUG(fmt, args...) \
218        do {\
219            if (mac_debug) {\
220                printf("[%s@%d|%s]: cpu_%d: " fmt, \
221                __FILE__, __LINE__, __FUNCTION__,  PCPU_GET(cpuid), ##args);\
222            }\
223        } while(0);
224
225/* Debug/dump functions */
226static void 	dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
227static void	dump_gmac_registers(struct nlge_softc *);
228static void	dump_na_registers(xlr_reg_t *base, int port_id);
229static void	dump_mac_stats(struct nlge_softc *sc);
230static void 	dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
231static void 	dump_mii_data(struct mii_data *mii) __attribute__((used));
232static void	dump_board_info(struct xlr_board_info *);
233static void	dump_pcs_regs(struct nlge_softc *sc, int phy);
234
235#else
236#undef PDEBUG
237#define PDEBUG(fmt, args...)
238#define dump_reg(a, o, n)		/* nop */
239#define dump_gmac_registers(a)		/* nop */
240#define dump_na_registers(a, p)	/* nop */
241#define dump_board_info(b)		/* nop */
242#define dump_mac_stats(sc)		/* nop */
243#define dump_mii_regs(sc)		/* nop */
244#define dump_mii_data(mii)		/* nop */
245#define dump_pcs_regs(sc, phy)		/* nop */
246#endif
247
248/* Wrappers etc. to export the driver entry points. */
249static device_method_t nlna_methods[] = {
250	/* Device interface */
251	DEVMETHOD(device_probe,         nlna_probe),
252	DEVMETHOD(device_attach,        nlna_attach),
253	DEVMETHOD(device_detach,        nlna_detach),
254	DEVMETHOD(device_shutdown,      nlna_shutdown),
255	DEVMETHOD(device_suspend,       nlna_suspend),
256	DEVMETHOD(device_resume,        nlna_resume),
257
258	/* bus interface : TBD : what are these for ? */
259	DEVMETHOD(bus_setup_intr,       bus_generic_setup_intr),
260
261	DEVMETHOD_END
262};
263
264static driver_t	nlna_driver = {
265	"nlna",
266	nlna_methods,
267	sizeof(struct nlna_softc)
268};
269
270static devclass_t nlna_devclass;
271
272static device_method_t nlge_methods[] = {
273	/* Device interface */
274	DEVMETHOD(device_probe,         nlge_probe),
275	DEVMETHOD(device_attach,        nlge_attach),
276	DEVMETHOD(device_detach,        nlge_detach),
277	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
278	DEVMETHOD(device_suspend,       nlge_suspend),
279	DEVMETHOD(device_resume,        nlge_resume),
280
281	/* MII interface */
282	DEVMETHOD(miibus_readreg, nlge_mii_read),
283	DEVMETHOD(miibus_writereg, nlge_mii_write),
284	DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
285
286	{0, 0}
287};
288
289static driver_t	nlge_driver = {
290	"nlge",
291	nlge_methods,
292	sizeof(struct nlge_softc)
293};
294
295static devclass_t nlge_devclass;
296
297DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
298DRIVER_MODULE(nlge, nlna,  nlge_driver, nlge_devclass, 0, 0);
299DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
300
301static uma_zone_t nl_tx_desc_zone;
302
303/* Tunables. */
304static int flow_classification = 0;
305TUNABLE_INT("hw.nlge.flow_classification", &flow_classification);
306
307#define	NLGE_HW_CHKSUM		1
308
309static __inline void
310atomic_incr_long(unsigned long *addr)
311{
312	/* XXX: fix for 64 bit */
313	unsigned int *iaddr = (unsigned int *)addr;
314
315	xlr_ldaddwu(1, iaddr);
316}
317
318static int
319nlna_probe(device_t dev)
320{
321	return (BUS_PROBE_DEFAULT);
322}
323
324/*
325 * Add all attached GMAC/XGMAC ports to the device tree. Port
326 * configuration is spread in two regions - common configuration
327 * for all ports in the NA and per-port configuration in MAC-specific
328 * region. This function does the following:
329 *  - adds the ports to the device tree
330 *  - reset the ports
331 *  - do all the common initialization
332 *  - invoke bus_generic_attach for per-port configuration
333 *  - supply initial free rx descriptors to ports
334 *  - initialize s/w data structures
335 *  - finally, enable interrupts (only in the last NA).
336 *
337 * For reference, sample address space for common and per-port
338 * registers is given below.
339 *
340 * The address map for RNA0 is:                           (typical value)
341 *
342 * XLR_IO_BASE +--------------------------------------+   0xbef0_0000
343 *             |                                      |
344 *             |                                      |
345 *             |                                      |
346 *             |                                      |
347 *             |                                      |
348 *             |                                      |
349 * GMAC0  ---> +--------------------------------------+   0xbef0_c000
350 *             |                                      |
351 *             |                                      |
352 * (common) -> |......................................|   0xbef0_c400
353 *             |                                      |
354 *             |   (RGMII/SGMII: common registers)    |
355 *             |                                      |
356 * GMAC1  ---> |--------------------------------------|   0xbef0_d000
357 *             |                                      |
358 *             |                                      |
359 * (common) -> |......................................|   0xbef0_d400
360 *             |                                      |
361 *             |   (RGMII/SGMII: common registers)    |
362 *             |                                      |
363 *             |......................................|
364 *       and so on ....
365 *
366 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
367 */
368static int
369nlna_attach(device_t dev)
370{
371	struct xlr_gmac_block_t *block_info;
372	device_t		 gmac_dev;
373	struct nlna_softc	*sc;
374	int			 error;
375	int			 i;
376	int			 id;
377
378	id = device_get_unit(dev);
379	block_info = device_get_ivars(dev);
380	if (!block_info->enabled) {
381		return 0;
382	}
383
384#ifdef DEBUG
385	dump_board_info(&xlr_board_info);
386#endif
387	/* Initialize nlna state in softc structure */
388	sc = nlna_sc_init(dev, block_info);
389
390	/* Add device's for the ports controlled by this NA. */
391	if (block_info->type == XLR_GMAC) {
392		KASSERT(id < 2, ("No GMACs supported with this network"
393		    "accelerator: %d", id));
394		for (i = 0; i < sc->num_ports; i++) {
395			gmac_dev = device_add_child(dev, "nlge", -1);
396			device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
397		}
398	} else if (block_info->type == XLR_XGMAC) {
399		KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
400		    "network accelerator: %d", id));
401		gmac_dev = device_add_child(dev, "nlge", -1);
402		device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
403	} else if (block_info->type == XLR_SPI4) {
404		/* SPI4 is not supported here */
405		device_printf(dev, "Unsupported: NA with SPI4 type");
406		return (ENOTSUP);
407	}
408
409	nlna_reset_ports(sc, block_info);
410
411	/* Initialize Network Accelarator registers. */
412	nlna_hw_init(sc);
413
414	error = bus_generic_attach(dev);
415	if (error) {
416		device_printf(dev, "failed to attach port(s)\n");
417		goto fail;
418	}
419
420	/* Send out the initial pool of free-descriptors for the rx path */
421	nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
422
423	/* S/w data structure initializations shared by all NA's. */
424	if (nl_tx_desc_zone == NULL) {
425		/* Create a zone for allocating tx descriptors */
426		nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
427		    sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
428		    XLR_CACHELINE_SIZE, 0);
429	}
430
431	/* Enable NA interrupts */
432	nlna_setup_intr(sc);
433
434	return (0);
435
436fail:
437	return (error);
438}
439
440static int
441nlna_detach(device_t dev)
442{
443	struct nlna_softc *sc;
444
445	sc = device_get_softc(dev);
446	if (device_is_alive(dev)) {
447		nlna_disable_intr(sc);
448		/* This will make sure that per-port detach is complete
449		 * and all traffic on the ports has been stopped. */
450		bus_generic_detach(dev);
451		uma_zdestroy(nl_tx_desc_zone);
452	}
453
454	return (0);
455}
456
457static int
458nlna_suspend(device_t dev)
459{
460
461	return (0);
462}
463
464static int
465nlna_resume(device_t dev)
466{
467
468	return (0);
469}
470
471static int
472nlna_shutdown(device_t dev)
473{
474	return (0);
475}
476
477
478/* GMAC port entry points */
479static int
480nlge_probe(device_t dev)
481{
482	struct nlge_softc	*sc;
483	struct xlr_gmac_port	*port_info;
484	int index;
485	char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
486	    "Unknown"};
487
488	port_info = device_get_ivars(dev);
489	index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
490	    5 : port_info->type;
491	device_set_desc_copy(dev, desc[index]);
492
493	sc = device_get_softc(dev);
494	nlge_sc_init(sc, dev, port_info);
495
496	nlge_port_disable(sc);
497
498	return (0);
499}
500
501static int
502nlge_attach(device_t dev)
503{
504	struct nlge_softc *sc;
505	struct nlna_softc *nsc;
506	int error;
507
508	sc = device_get_softc(dev);
509
510	nlge_if_init(sc);
511	nlge_mii_init(dev, sc);
512	error = nlge_irq_init(sc);
513	if (error)
514		return error;
515	nlge_hw_init(sc);
516
517	nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
518	nsc->child_sc[sc->instance] = sc;
519
520	return (0);
521}
522
523static int
524nlge_detach(device_t dev)
525{
526	struct nlge_softc *sc;
527	struct ifnet   *ifp;
528
529	sc = device_get_softc(dev);
530	ifp = sc->nlge_if;
531
532	if (device_is_attached(dev)) {
533		nlge_port_disable(sc);
534		nlge_irq_fini(sc);
535		ether_ifdetach(ifp);
536		bus_generic_detach(dev);
537	}
538	if (ifp)
539		if_free(ifp);
540
541	return (0);
542}
543
544static int
545nlge_suspend(device_t dev)
546{
547	return (0);
548}
549
550static int
551nlge_resume(device_t dev)
552{
553	return (0);
554}
555
556static void
557nlge_init(void *addr)
558{
559	struct nlge_softc *sc;
560	struct ifnet   *ifp;
561
562	sc = (struct nlge_softc *)addr;
563	ifp = sc->nlge_if;
564
565	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
566		return;
567
568	nlge_gmac_config_speed(sc, 1);
569	ifp->if_drv_flags |= IFF_DRV_RUNNING;
570	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
571	nlge_port_enable(sc);
572
573	if (sc->port_type == XLR_SGMII) {
574		dump_pcs_regs(sc, 27);
575	}
576	dump_gmac_registers(sc);
577	dump_mac_stats(sc);
578}
579
580static int
581nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
582{
583	struct mii_data 	*mii;
584	struct nlge_softc 	*sc;
585	struct ifreq 		*ifr;
586	int 			error;
587
588	sc = ifp->if_softc;
589	error = 0;
590	ifr = (struct ifreq *)data;
591
592	switch(command) {
593	case SIOCSIFFLAGS:
594		NLGE_LOCK(sc);
595		if (ifp->if_flags & IFF_UP) {
596			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
597				nlge_init(sc);
598			}
599			if (ifp->if_flags & IFF_PROMISC &&
600			    !(sc->if_flags & IFF_PROMISC)) {
601				sc->if_flags |= IFF_PROMISC;
602				nlge_mac_set_rx_mode(sc);
603			} else if (!(ifp->if_flags & IFF_PROMISC) &&
604			    sc->if_flags & IFF_PROMISC) {
605				sc->if_flags &= IFF_PROMISC;
606				nlge_mac_set_rx_mode(sc);
607			}
608		} else {
609			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
610				nlge_port_disable(sc);
611			}
612		}
613		sc->if_flags = ifp->if_flags;
614		NLGE_UNLOCK(sc);
615		error = 0;
616		break;
617
618	case SIOCSIFMEDIA:
619	case SIOCGIFMEDIA:
620		if (sc->mii_bus != NULL) {
621			mii = (struct mii_data *)device_get_softc(sc->mii_bus);
622			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
623			    command);
624		}
625		break;
626
627	default:
628		error = ether_ioctl(ifp, command, data);
629		break;
630	}
631
632	return (error);
633}
634
635/* This function is called from an interrupt handler */
636void
637nlge_msgring_handler(int bucket, int size, int code, int stid,
638		    struct msgrng_msg *msg, void *data)
639{
640	struct nlna_softc *na_sc;
641	struct nlge_softc *sc;
642	struct ifnet	*ifp;
643	struct mbuf	*m;
644	vm_paddr_t	phys_addr;
645	uint32_t	length;
646	int		ctrl;
647	int		tx_error;
648	int		port;
649	int		is_p2p;
650
651	is_p2p = 0;
652	tx_error = 0;
653	length = (msg->msg0 >> 40) & 0x3fff;
654	na_sc = (struct nlna_softc *)data;
655	if (length == 0) {
656		ctrl = CTRL_REG_FREE;
657		phys_addr = msg->msg0 & 0xffffffffffULL;
658		port = (msg->msg0 >> 54) & 0x0f;
659		is_p2p = (msg->msg0 >> 62) & 0x1;
660		tx_error = (msg->msg0 >> 58) & 0xf;
661	} else {
662		ctrl = CTRL_SNGL;
663		phys_addr = msg->msg0 & 0xffffffffe0ULL;
664		length = length - BYTE_OFFSET - MAC_CRC_LEN;
665		port = msg->msg0 & 0x0f;
666	}
667
668	sc = na_sc->child_sc[port];
669	if (sc == NULL) {
670		printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
671		    length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
672		    "Freeback for tx packet"));
673		return;
674	}
675
676	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
677		ifp = sc->nlge_if;
678		if (!tx_error) {
679			if (is_p2p) {
680				release_tx_desc(phys_addr);
681			} else {
682#ifdef __mips_n64
683				m = (struct mbuf *)(uintptr_t)xlr_paddr_ld(phys_addr);
684				m->m_nextpkt = NULL;
685#else
686				m = (struct mbuf *)(uintptr_t)phys_addr;
687#endif
688				m_freem(m);
689			}
690			NLGE_LOCK(sc);
691			if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
692				ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
693			}
694			NLGE_UNLOCK(sc);
695		} else {
696			printf("ERROR: Tx fb error (%d) on port %d\n", tx_error,
697			    port);
698		}
699		atomic_incr_long((tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
700	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
701		/* Rx Packet */
702
703		nlge_rx(sc, phys_addr, length);
704		nlna_submit_rx_free_desc(na_sc, 1);	/* return free descr to NA */
705	} else {
706		printf("[%s]: unrecognized ctrl=%d!\n", __func__, ctrl);
707	}
708
709}
710
711static int
712nlge_tx(struct ifnet *ifp, struct mbuf *m)
713{
714	return (nlge_start_locked(ifp, ifp->if_softc, m));
715}
716
717static int
718nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc, struct mbuf *m)
719{
720	struct msgrng_msg 	msg;
721	struct nlge_tx_desc 	*tx_desc;
722	uint64_t		fr_stid;
723	uint32_t		cpu;
724	uint32_t		n_entries;
725	uint32_t		tid;
726	int 			error, ret;
727
728	if (m == NULL)
729		return (0);
730
731	tx_desc = NULL;
732	error = 0;
733	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) ||
734	    ifp->if_drv_flags & IFF_DRV_OACTIVE) {
735	    	error = ENXIO;
736		goto fail;	// note: mbuf will get free'd
737	}
738
739	cpu = xlr_core_id();
740	tid = xlr_thr_id();
741	/* H/w threads [0, 2] --> bucket 6 and [1, 3] --> bucket 7 */
742	fr_stid = cpu * 8 + 6 + (tid % 2);
743
744	/*
745	 * First, remove some freeback messages before transmitting
746	 * any new packets. However, cap the number of messages
747	 * drained to permit this thread to continue with its
748	 * transmission.
749	 *
750	 * Mask for buckets {6, 7} is 0xc0
751	 */
752	xlr_msgring_handler(0xc0, 4);
753
754	ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
755	if (ret) {
756		error = (ret == 2) ? ENOBUFS : ENOTSUP;
757		goto fail;
758	}
759	ret = send_fmn_msg_tx(sc, &msg, n_entries);
760	if (ret != 0) {
761		error = EBUSY;
762		goto fail;
763	}
764
765	return (0);
766
767fail:
768	if (tx_desc != NULL) {
769		uma_zfree(nl_tx_desc_zone, tx_desc);
770	}
771	if (m != NULL) {
772		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
773			NLGE_LOCK(sc);
774			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
775			NLGE_UNLOCK(sc);
776		}
777		m_freem(m);
778		atomic_incr_long(&ifp->if_iqdrops);
779	}
780	return (error);
781}
782
783static void
784nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
785{
786	struct ifnet	*ifp;
787	struct mbuf	*m;
788	uint64_t	tm, mag;
789	uint32_t	sr;
790
791	sr = xlr_enable_kx();
792	tm = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
793	mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
794	xlr_restore_kx(sr);
795
796	m = (struct mbuf *)(intptr_t)tm;
797	if (mag != 0xf00bad) {
798		/* somebody else's packet. Error - FIXME in intialization */
799		printf("cpu %d: *ERROR* Not my packet paddr %jx\n",
800		    xlr_core_id(), (uintmax_t)paddr);
801		return;
802	}
803
804	ifp = sc->nlge_if;
805
806#ifdef NLGE_HW_CHKSUM
807	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
808	if (m->m_data[10] & 0x2) {
809		m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
810		if (m->m_data[10] & 0x1) {
811			m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
812			    CSUM_PSEUDO_HDR);
813			m->m_pkthdr.csum_data = htons(0xffff);
814		}
815	}
816	m->m_data += NLGE_PREPAD_LEN;
817	len -= NLGE_PREPAD_LEN;
818#else
819	m->m_pkthdr.csum_flags = 0;
820#endif
821
822	/* align the data */
823	m->m_data += BYTE_OFFSET ;
824	m->m_pkthdr.len = m->m_len = len;
825	m->m_pkthdr.rcvif = ifp;
826
827	atomic_incr_long(&ifp->if_ipackets);
828	(*ifp->if_input)(ifp, m);
829}
830
831static int
832nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
833{
834	struct nlge_softc *sc;
835
836	sc = device_get_softc(dev);
837	if (sc->port_type != XLR_XGMII)
838		nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
839
840	return (0);
841}
842
843static int
844nlge_mii_read(struct device *dev, int phyaddr, int regidx)
845{
846	struct nlge_softc *sc;
847	int val;
848
849	sc = device_get_softc(dev);
850	val = (sc->port_type == XLR_XGMII) ? (0xffff) :
851	    nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
852
853	return (val);
854}
855
856static void
857nlge_mac_mii_statchg(device_t dev)
858{
859}
860
861static int
862nlge_mediachange(struct ifnet *ifp)
863{
864	return 0;
865}
866
867static void
868nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
869{
870	struct nlge_softc *sc;
871	struct mii_data *md;
872
873	md = NULL;
874	sc = ifp->if_softc;
875	if (sc->mii_bus)
876		md = device_get_softc(sc->mii_bus);
877
878	ifmr->ifm_status = IFM_AVALID;
879	ifmr->ifm_active = IFM_ETHER;
880
881	if (sc->link == xlr_mac_link_down)
882		return;
883
884	if (md != NULL)
885		ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
886	ifmr->ifm_status |= IFM_ACTIVE;
887}
888
889static struct nlna_softc *
890nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
891{
892	struct nlna_softc	*sc;
893
894	sc = device_get_softc(dev);
895	memset(sc, 0, sizeof(*sc));
896	sc->nlna_dev = dev;
897	sc->base = xlr_io_mmio(blk->baseaddr);
898	sc->rfrbucket = blk->station_rfr;
899	sc->station_id = blk->station_id;
900	sc->na_type = blk->type;
901	sc->mac_type = blk->mode;
902	sc->num_ports = blk->num_ports;
903
904	sc->mdio_set.port_vec 	= sc->mdio_sc;
905	sc->mdio_set.vec_sz   	= XLR_MAX_MACS;
906
907	return (sc);
908}
909
910/*
911 * Do:
912 *     - Initialize common GMAC registers (index range 0x100-0x3ff).
913 */
914static void
915nlna_hw_init(struct nlna_softc *sc)
916{
917
918	/*
919	 * Register message ring handler for the NA block, messages from
920	 * the GMAC will have source station id to the first bucket of the
921	 * NA FMN station, so register just that station id.
922	 */
923	if (register_msgring_handler(sc->station_id, sc->station_id + 1,
924	    nlge_msgring_handler, sc)) {
925		panic("Couldn't register msgring handler\n");
926	}
927	nlna_config_fifo_spill_area(sc);
928	nlna_config_pde(sc);
929	nlna_config_common(sc);
930	nlna_config_parser(sc);
931	nlna_config_classifier(sc);
932}
933
934/*
935 * Enable interrupts on all the ports controlled by this NA. For now, we
936 * only care about the MII interrupt and this has to be enabled only
937 * on the port id0.
938 *
939 * This function is not in-sync with the regular way of doing things - it
940 * executes only in the context of the last active network accelerator (and
941 * thereby has some ugly accesses in the device tree). Though inelegant, it
942 * is necessary to do it this way as the per-port interrupts can be
943 * setup/enabled only after all the network accelerators have been
944 * initialized.
945 */
946static void
947nlna_setup_intr(struct nlna_softc *sc)
948{
949	struct nlna_softc *na_sc[XLR_MAX_NLNA];
950	struct nlge_port_set *pset;
951	struct xlr_gmac_port *port_info;
952	device_t	iodi_dev;
953	int 		i, j;
954
955	if (!nlna_is_last_active_na(sc))
956		return ;
957
958	/* Collect all nlna softc pointers */
959	memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
960	iodi_dev = device_get_parent(sc->nlna_dev);
961	nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
962
963	/* Setup the MDIO interrupt lists. */
964	/*
965	 * MDIO interrupts are coarse - a single interrupt line provides
966	 * information about one of many possible ports. To figure out the
967	 * exact port on which action is to be taken, all of the ports
968	 * linked to an MDIO interrupt should be read. To enable this,
969	 * ports need to add themselves to port sets.
970	 */
971	for (i = 0; i < XLR_MAX_NLNA; i++) {
972		if (na_sc[i] == NULL)
973			continue;
974		for (j = 0; j < na_sc[i]->num_ports; j++) {
975			/* processing j-th port on i-th NA */
976			port_info = device_get_ivars(
977			    na_sc[i]->child_sc[j]->nlge_dev);
978			pset = &na_sc[port_info->mdint_id]->mdio_set;
979			nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
980		}
981	}
982
983	/* Enable interrupts */
984	for (i = 0; i < XLR_MAX_NLNA; i++) {
985		if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
986			nlna_enable_intr(na_sc[i]);
987		}
988	}
989}
990
991static void
992nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
993{
994	int i;
995
996	/* step past the non-NULL elements */
997	for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
998	if (i < pset->vec_sz)
999		pset->port_vec[i] = sc;
1000	else
1001		printf("warning: internal error: out-of-bounds for MDIO array");
1002}
1003
1004static void
1005nlna_enable_intr(struct nlna_softc *sc)
1006{
1007	int i;
1008
1009	for (i = 0; i < sc->num_ports; i++) {
1010		if (sc->child_sc[i]->instance == 0)
1011			NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
1012			    (1 << O_INTMASK__MDInt));
1013	}
1014}
1015
1016static void
1017nlna_disable_intr(struct nlna_softc *sc)
1018{
1019	int i;
1020
1021	for (i = 0; i < sc->num_ports; i++) {
1022		if (sc->child_sc[i]->instance == 0)
1023			NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
1024	}
1025}
1026
1027static int
1028nlna_is_last_active_na(struct nlna_softc *sc)
1029{
1030	int id;
1031
1032	id = device_get_unit(sc->nlna_dev);
1033	return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
1034}
1035
1036static void
1037nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
1038{
1039	struct msgrng_msg msg;
1040	void           *ptr;
1041	uint32_t	msgrng_flags;
1042	int		i, n, stid, ret, code;
1043
1044	if (n_desc > 1) {
1045		PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1046		    sc->rfrbucket);
1047	}
1048
1049	stid = sc->rfrbucket;
1050	code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1051	memset(&msg, 0, sizeof(msg));
1052
1053	for (i = 0; i < n_desc; i++) {
1054		ptr = get_buf();
1055		if (!ptr) {
1056			ret = -ENOMEM;
1057			device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1058			break;
1059		}
1060
1061		/* Send the free Rx desc to the MAC */
1062		msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL;
1063		n = 0;
1064		do {
1065			msgrng_flags = msgrng_access_enable();
1066			ret = message_send(1, code, stid, &msg);
1067			msgrng_restore(msgrng_flags);
1068			KASSERT(n++ < 100000, ("Too many credit fails in rx path\n"));
1069		} while (ret != 0);
1070	}
1071}
1072
1073static __inline__ void *
1074nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1075    int reg_size, int size)
1076{
1077	void	*spill;
1078	uint64_t phys_addr;
1079	uint32_t spill_size;
1080
1081	spill_size = size;
1082	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1083	    M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1084	if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1085		panic("Unable to allocate memory for spill area!\n");
1086	}
1087	phys_addr = vtophys(spill);
1088	PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1089	NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1090	NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1091	NLGE_WRITE(base, reg_size, spill_size);
1092
1093	return (spill);
1094}
1095
1096/*
1097 * Configure the 6 FIFO's that are used by the network accelarator to
1098 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1099 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1100 * the NA with free descriptors.
1101 */
1102static void
1103nlna_config_fifo_spill_area(struct nlna_softc *sc)
1104{
1105	sc->frin_spill = nlna_config_spill(sc->base,
1106				     	R_REG_FRIN_SPILL_MEM_START_0,
1107				     	R_REG_FRIN_SPILL_MEM_START_1,
1108				     	R_REG_FRIN_SPILL_MEM_SIZE,
1109				     	MAX_FRIN_SPILL *
1110				     	sizeof(struct fr_desc));
1111	sc->frout_spill = nlna_config_spill(sc->base,
1112				     	R_FROUT_SPILL_MEM_START_0,
1113				     	R_FROUT_SPILL_MEM_START_1,
1114				     	R_FROUT_SPILL_MEM_SIZE,
1115				     	MAX_FROUT_SPILL *
1116				     	sizeof(struct fr_desc));
1117	sc->class_0_spill = nlna_config_spill(sc->base,
1118				     	R_CLASS0_SPILL_MEM_START_0,
1119				     	R_CLASS0_SPILL_MEM_START_1,
1120				     	R_CLASS0_SPILL_MEM_SIZE,
1121				     	MAX_CLASS_0_SPILL *
1122				     	sizeof(union rx_tx_desc));
1123	sc->class_1_spill = nlna_config_spill(sc->base,
1124				     	R_CLASS1_SPILL_MEM_START_0,
1125				     	R_CLASS1_SPILL_MEM_START_1,
1126				     	R_CLASS1_SPILL_MEM_SIZE,
1127				     	MAX_CLASS_1_SPILL *
1128				     	sizeof(union rx_tx_desc));
1129	sc->class_2_spill = nlna_config_spill(sc->base,
1130				     	R_CLASS2_SPILL_MEM_START_0,
1131				     	R_CLASS2_SPILL_MEM_START_1,
1132				     	R_CLASS2_SPILL_MEM_SIZE,
1133				     	MAX_CLASS_2_SPILL *
1134				     	sizeof(union rx_tx_desc));
1135	sc->class_3_spill = nlna_config_spill(sc->base,
1136				     	R_CLASS3_SPILL_MEM_START_0,
1137				     	R_CLASS3_SPILL_MEM_START_1,
1138				     	R_CLASS3_SPILL_MEM_SIZE,
1139				     	MAX_CLASS_3_SPILL *
1140				     	sizeof(union rx_tx_desc));
1141}
1142
1143/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1144static void
1145nlna_config_pde(struct nlna_softc *sc)
1146{
1147	uint64_t	bucket_map;
1148	uint32_t	cpumask;
1149	int		i, cpu, bucket;
1150
1151	cpumask = 0x1;
1152#ifdef SMP
1153	/*
1154         * rge may be called before SMP start in a BOOTP/NFSROOT
1155         * setup. we will distribute packets to other cpus only when
1156         * the SMP is started.
1157	 */
1158	if (smp_started)
1159		cpumask = xlr_hw_thread_mask;
1160#endif
1161	bucket_map = 0;
1162	for (i = 0; i < 32; i++) {
1163		if (cpumask & (1 << i)) {
1164			cpu = i;
1165			/* use bucket 0 and 1 on every core for NA msgs */
1166			bucket = cpu/4 * 8;
1167			bucket_map |= (3ULL << bucket);
1168		}
1169	}
1170
1171	NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1172	NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1173
1174	NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1175	NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1176
1177	NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1178	NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1179
1180	NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1181	NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1182}
1183
1184/*
1185 * Update the network accelerator packet distribution engine for SMP.
1186 * On bootup, we have just the boot hw thread handling all packets, on SMP
1187 * start, we can start distributing packets across all the cores which are up.
1188 */
1189static void
1190nlna_smp_update_pde(void *dummy __unused)
1191{
1192	device_t	   iodi_dev;
1193	struct nlna_softc *na_sc[XLR_MAX_NLNA];
1194	int i;
1195
1196	printf("Updating packet distribution for SMP\n");
1197
1198	iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1199	nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1200
1201	for (i = 0; i < XLR_MAX_NLNA; i++) {
1202		if (na_sc[i] == NULL)
1203			continue;
1204		nlna_disable_ports(na_sc[i]);
1205		nlna_config_pde(na_sc[i]);
1206		nlna_config_translate_table(na_sc[i]);
1207		nlna_enable_ports(na_sc[i]);
1208	}
1209}
1210
1211SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1212    NULL);
1213
1214static void
1215nlna_config_translate_table(struct nlna_softc *sc)
1216{
1217	uint32_t cpu_mask;
1218	uint32_t val;
1219	int bkts[32]; /* one bucket is assumed for each cpu */
1220	int b1, b2, c1, c2, i, j, k;
1221	int use_bkt;
1222
1223	if (!flow_classification)
1224		return;
1225
1226	use_bkt = 1;
1227	if (smp_started)
1228		cpu_mask = xlr_hw_thread_mask;
1229	else
1230		return;
1231
1232	printf("Using %s-based distribution\n", (use_bkt) ? "bucket" : "class");
1233
1234	j = 0;
1235	for(i = 0; i < 32; i++) {
1236		if ((1 << i) & cpu_mask){
1237		/* for each cpu, mark the 4+threadid bucket */
1238			bkts[j] = ((i / 4) * 8) + (i % 4);
1239			j++;
1240		}
1241	}
1242
1243	/*configure the 128 * 9 Translation table to send to available buckets*/
1244	k = 0;
1245	c1 = 3;
1246	c2 = 0;
1247	for(i = 0; i < 64; i++) {
1248		/* Get the next 2 pairs of (class, bucket):
1249		   (c1, b1), (c2, b2).
1250
1251		   c1, c2 limited to {0, 1, 2, 3}
1252		       i.e, the 4 classes defined by h/w
1253		   b1, b2 limited to { bkts[i], where 0 <= i < j}
1254		       i.e, the set of buckets computed in the
1255		       above loop.
1256		*/
1257
1258		c1 = (c1 + 1) & 3;
1259		c2 = (c1 + 1) & 3;
1260		b1 = bkts[k];
1261		k = (k + 1) % j;
1262		b2 = bkts[k];
1263		k = (k + 1) % j;
1264		PDEBUG("Translation table[%d] b1=%d b2=%d c1=%d c2=%d\n",
1265		    i, b1, b2, c1, c2);
1266		val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
1267		    (c2 << 7) | (b2 << 1) | (use_bkt << 0));
1268		NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, val);
1269		c1 = c2;
1270	}
1271}
1272
1273static void
1274nlna_config_parser(struct nlna_softc *sc)
1275{
1276	uint32_t val;
1277
1278	/*
1279	 * Mark it as ETHERNET type.
1280	 */
1281	NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1282
1283#ifndef NLGE_HW_CHKSUM
1284	if (!flow_classification)
1285		return;
1286#endif
1287
1288	/* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
1289	NLGE_WRITE(sc->base, R_PARSERCONFIGREG, ((0x7f << 8) | (1 << 1)));
1290
1291	/* configure the parser : L2 Type is configured in the bootloader */
1292	/* extract IP: src, dest protocol */
1293	NLGE_WRITE(sc->base, R_L3CTABLE,
1294	    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1295	    (0x0800 << 0));
1296	NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1297	    (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | (16 << 4) | 4);
1298#ifdef NLGE_HW_CHKSUM
1299	device_printf(sc->nlna_dev, "Enabled h/w support to compute TCP/IP"
1300	    " checksum\n");
1301#endif
1302
1303	/* Configure to extract SRC port and Dest port for TCP and UDP pkts */
1304	NLGE_WRITE(sc->base, R_L4CTABLE, 6);
1305	NLGE_WRITE(sc->base, R_L4CTABLE + 2, 17);
1306	val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
1307	NLGE_WRITE(sc->base, R_L4CTABLE + 1, val);
1308	NLGE_WRITE(sc->base, R_L4CTABLE + 3, val);
1309}
1310
1311static void
1312nlna_config_classifier(struct nlna_softc *sc)
1313{
1314	int i;
1315
1316	if (sc->mac_type == XLR_XGMII) {	/* TBD: XGMII init sequence */
1317		/* xgmac translation table doesn't have sane values on reset */
1318		for (i = 0; i < 64; i++)
1319			NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1320
1321		/*
1322		 * use upper 7 bits of the parser extract to index the
1323		 * translate table
1324		 */
1325		NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1326	}
1327}
1328
1329/*
1330 * Complete a bunch of h/w register initializations that are common for all the
1331 * ports controlled by a NA.
1332 */
1333static void
1334nlna_config_common(struct nlna_softc *sc)
1335{
1336	struct xlr_gmac_block_t *block_info;
1337	struct stn_cc 		*gmac_cc_config;
1338	int			i;
1339
1340	block_info = device_get_ivars(sc->nlna_dev);
1341	gmac_cc_config = block_info->credit_config;
1342	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1343		NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1344		    gmac_cc_config->counters[i >> 3][i & 0x07]);
1345	}
1346
1347	NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1348
1349	NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1350	NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1351	NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1352	NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1353	NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1354
1355	nlna_media_specific_config(sc);
1356}
1357
1358static void
1359nlna_media_specific_config(struct nlna_softc *sc)
1360{
1361	struct bucket_size *bucket_sizes;
1362
1363	bucket_sizes = xlr_board_info.bucket_sizes;
1364	switch (sc->mac_type) {
1365	case XLR_RGMII:
1366	case XLR_SGMII:
1367	case XLR_XAUI:
1368		NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1369		    bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1370		NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1371		    bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1372		NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1373		    bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1374		NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1375		    bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1376
1377		if (sc->mac_type == XLR_XAUI) {
1378			NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1379		}
1380		break;
1381
1382	case XLR_XGMII:
1383		NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1384		    bucket_sizes->bucket[sc->rfrbucket]);
1385
1386	default:
1387		break;
1388	}
1389}
1390
1391static void
1392nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1393{
1394	xlr_reg_t *addr;
1395	int i;
1396	uint32_t   rx_ctrl;
1397
1398	/* Refer Section 13.9.3 in the PRM for the reset sequence */
1399
1400	for (i = 0; i < sc->num_ports; i++) {
1401		addr = xlr_io_mmio(blk->gmac_port[i].base_addr);
1402
1403		/* 1. Reset RxEnable in MAC_CONFIG */
1404		switch (sc->mac_type) {
1405		case XLR_RGMII:
1406		case XLR_SGMII:
1407			NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1408			    (1 << O_MAC_CONFIG_1__rxen));
1409			break;
1410		case XLR_XAUI:
1411		case XLR_XGMII:
1412			NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1413			   (1 << O_RX_CONTROL__RxEnable));
1414			break;
1415		default:
1416			printf("Error: Unsupported port_type=%d\n",
1417			    sc->mac_type);
1418		}
1419
1420		/* 1.1 Wait for RxControl.RxHalt to be set */
1421		do {
1422			rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1423		} while (!(rx_ctrl & 0x2));
1424
1425		/* 2. Set the soft reset bit in RxControl */
1426		NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1427		    (1 << O_RX_CONTROL__SoftReset));
1428
1429		/* 2.1 Wait for RxControl.SoftResetDone to be set */
1430		do {
1431			rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1432		} while (!(rx_ctrl & 0x8));
1433
1434		/* 3. Clear the soft reset bit in RxControl */
1435		NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1436		    (1 << O_RX_CONTROL__SoftReset));
1437
1438		/* Turn off tx/rx on the port. */
1439		NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1440		    (1 << O_RX_CONTROL__RxEnable));
1441		NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1442		    (1 << O_TX_CONTROL__TxEnable));
1443	}
1444}
1445
1446static void
1447nlna_disable_ports(struct nlna_softc *sc)
1448{
1449	int i;
1450
1451	for (i = 0; i < sc->num_ports; i++) {
1452		if (sc->child_sc[i] != NULL)
1453			nlge_port_disable(sc->child_sc[i]);
1454	}
1455}
1456
1457static void
1458nlna_enable_ports(struct nlna_softc *sc)
1459{
1460	device_t		nlge_dev, *devlist;
1461	struct nlge_softc 	*port_sc;
1462	int 			i, numdevs;
1463
1464	device_get_children(sc->nlna_dev, &devlist, &numdevs);
1465	for (i = 0; i < numdevs; i++) {
1466		nlge_dev = devlist[i];
1467		if (nlge_dev == NULL)
1468			continue;
1469		port_sc = device_get_softc(nlge_dev);
1470		if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1471			nlge_port_enable(port_sc);
1472	}
1473	free(devlist, M_TEMP);
1474}
1475
1476static void
1477nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1478		   uint32_t vec_sz)
1479{
1480	device_t  na_dev;
1481	int       i;
1482
1483	for (i = 0; i < vec_sz; i++) {
1484		sc_vec[i] = NULL;
1485		na_dev = device_find_child(iodi_dev, "nlna", i);
1486		if (na_dev != NULL)
1487			sc_vec[i] = device_get_softc(na_dev);
1488	}
1489}
1490
1491static void
1492nlge_port_disable(struct nlge_softc *sc)
1493{
1494	struct ifnet *ifp;
1495	xlr_reg_t *base;
1496	uint32_t rd;
1497	int id, port_type;
1498
1499	id = sc->id;
1500	port_type = sc->port_type;
1501	base = sc->base;
1502	ifp = sc->nlge_if;
1503
1504	NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1505	do {
1506		rd = NLGE_READ(base, R_RX_CONTROL);
1507	} while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1508
1509	NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1510	do {
1511		rd = NLGE_READ(base, R_TX_CONTROL);
1512	} while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1513
1514	switch (port_type) {
1515	case XLR_RGMII:
1516	case XLR_SGMII:
1517		NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1518		   ((1 << O_MAC_CONFIG_1__rxen) |
1519		   (1 << O_MAC_CONFIG_1__txen)));
1520		break;
1521	case XLR_XGMII:
1522	case XLR_XAUI:
1523		NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1524		   ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1525		   (1 << O_XGMAC_CONFIG_1__hstrfen)));
1526		break;
1527	default:
1528		panic("Unknown MAC type on port %d\n", id);
1529	}
1530
1531	if (ifp) {
1532		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1533	}
1534}
1535
1536static void
1537nlge_port_enable(struct nlge_softc *sc)
1538{
1539	struct xlr_gmac_port  *self;
1540	xlr_reg_t *base;
1541
1542	base = sc->base;
1543	self = device_get_ivars(sc->nlge_dev);
1544	if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1545		NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1546	    	    (1 << O_RX_CONTROL__RGMII));
1547
1548	NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1549	    (1 << O_RX_CONTROL__RxEnable));
1550	NLGE_UPDATE(base, R_TX_CONTROL,
1551	    (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1552	    (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1553	switch (sc->port_type) {
1554	case XLR_RGMII:
1555	case XLR_SGMII:
1556		NLGE_UPDATE(base, R_MAC_CONFIG_1,
1557		    ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1558		    ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1559		break;
1560	case XLR_XGMII:
1561	case XLR_XAUI:
1562		NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1563		    ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1564		    ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1565		break;
1566	default:
1567		panic("Unknown MAC type on port %d\n", sc->id);
1568	}
1569}
1570
1571static void
1572nlge_mac_set_rx_mode(struct nlge_softc *sc)
1573{
1574	uint32_t regval;
1575
1576	regval = NLGE_READ(sc->base, R_MAC_FILTER_CONFIG);
1577
1578	if (sc->if_flags & IFF_PROMISC) {
1579		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1580		    (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1581		    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1582		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1583	} else {
1584		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1585		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1586	}
1587
1588	NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG, regval);
1589}
1590
1591static void
1592nlge_sgmii_init(struct nlge_softc *sc)
1593{
1594	xlr_reg_t *mmio_gpio;
1595	int phy;
1596
1597	if (sc->port_type != XLR_SGMII)
1598		return;
1599
1600	nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1601	nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1602	nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1603	nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1604	nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1605	nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1606	nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1607	nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1608	nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1609	nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1610	nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1611
1612	/* program  GPIO values for serdes init parameters */
1613	DELAY(100);
1614	mmio_gpio = xlr_io_mmio(XLR_IO_GPIO_OFFSET);
1615	xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
1616	xlr_write_reg(mmio_gpio, 0x10, 0x7104);
1617	DELAY(100);
1618
1619	/*
1620	 * This kludge is needed to setup serdes (?) clock correctly on some
1621	 * XLS boards
1622	 */
1623	if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
1624	    xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
1625	    xlr_boot1_info.board_minor_version == 4) {
1626		/* use 125 Mhz instead of 156.25Mhz ref clock */
1627		DELAY(100);
1628		xlr_write_reg(mmio_gpio, 0x10, 0x7103);
1629		xlr_write_reg(mmio_gpio, 0x21, 0x7103);
1630		DELAY(100);
1631	}
1632
1633	/* enable autoneg - more magic */
1634	phy = sc->phy_addr % 4 + 27;
1635	nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1636	DELAY(100000);
1637	nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1638	DELAY(100000);
1639}
1640
1641static void
1642nlge_intr(void *arg)
1643{
1644	struct nlge_port_set    *pset;
1645	struct nlge_softc 	*sc;
1646	struct nlge_softc 	*port_sc;
1647	xlr_reg_t 		*base;
1648	uint32_t		intreg;
1649	uint32_t		intr_status;
1650	int 			i;
1651
1652	sc = arg;
1653	if (sc == NULL) {
1654		printf("warning: No port registered for interrupt\n");
1655		return;
1656	}
1657	base = sc->base;
1658
1659	intreg = NLGE_READ(base, R_INTREG);
1660	if (intreg & (1 << O_INTREG__MDInt)) {
1661		pset = sc->mdio_pset;
1662		if (pset == NULL) {
1663			printf("warning: No ports for MDIO interrupt\n");
1664			return;
1665		}
1666		for (i = 0; i < pset->vec_sz; i++) {
1667			port_sc = pset->port_vec[i];
1668
1669			if (port_sc == NULL)
1670				continue;
1671
1672			/* Ack phy interrupt - clear on read*/
1673			intr_status = nlge_mii_read_internal(port_sc->mii_base,
1674			    port_sc->phy_addr, 26);
1675			PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1676			    intr_status);
1677
1678			if (!(intr_status & 0x8000)) {
1679				/* no interrupt for this port */
1680				continue;
1681			}
1682
1683			if (intr_status & 0x2410) {
1684				/* update link status for port */
1685				nlge_gmac_config_speed(port_sc, 1);
1686			} else {
1687				printf("%s: Unsupported phy interrupt"
1688				    " (0x%08x)\n",
1689				    device_get_nameunit(port_sc->nlge_dev),
1690				    intr_status);
1691			}
1692		}
1693	}
1694
1695	/* Clear the NA interrupt */
1696	xlr_write_reg(base, R_INTREG, 0xffffffff);
1697
1698	return;
1699}
1700
1701static int
1702nlge_irq_init(struct nlge_softc *sc)
1703{
1704	struct resource		irq_res;
1705	struct nlna_softc  	*na_sc;
1706	struct xlr_gmac_block_t *block_info;
1707	device_t		na_dev;
1708	int			ret;
1709	int			irq_num;
1710
1711	na_dev = device_get_parent(sc->nlge_dev);
1712	block_info = device_get_ivars(na_dev);
1713
1714	irq_num = block_info->baseirq + sc->instance;
1715	irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1716	ret = bus_setup_intr(sc->nlge_dev, &irq_res,
1717	    INTR_TYPE_NET | INTR_MPSAFE, NULL, nlge_intr, sc, NULL);
1718	if (ret) {
1719		nlge_detach(sc->nlge_dev);
1720		device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1721		    ret);
1722		return (ENXIO);
1723	}
1724	PDEBUG("Setup intr for dev=%s, irq=%d\n",
1725	    device_get_nameunit(sc->nlge_dev), irq_num);
1726
1727	if (sc->instance == 0) {
1728		na_sc = device_get_softc(na_dev);
1729		sc->mdio_pset = &na_sc->mdio_set;
1730	}
1731	return (0);
1732}
1733
1734static void
1735nlge_irq_fini(struct nlge_softc *sc)
1736{
1737}
1738
1739static void
1740nlge_hw_init(struct nlge_softc *sc)
1741{
1742	struct xlr_gmac_port  *port_info;
1743	xlr_reg_t *base;
1744
1745	base = sc->base;
1746	port_info = device_get_ivars(sc->nlge_dev);
1747	sc->tx_bucket_id = port_info->tx_bucket_id;
1748
1749	/* each packet buffer is 1536 bytes */
1750	NLGE_WRITE(base, R_DESC_PACK_CTRL,
1751	    (1 << O_DESC_PACK_CTRL__MaxEntry) |
1752#ifdef NLGE_HW_CHKSUM
1753	    (1 << O_DESC_PACK_CTRL__PrePadEnable) |
1754#endif
1755	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1756	NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1757	    (1 << O_STATCTRL__ClrCnt)));
1758	NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1759	NLGE_WRITE(base, R_INTMASK, 0);
1760	nlge_set_mac_addr(sc);
1761	nlge_media_specific_init(sc);
1762}
1763
1764static void
1765nlge_sc_init(struct nlge_softc *sc, device_t dev,
1766    struct xlr_gmac_port *port_info)
1767{
1768	memset(sc, 0, sizeof(*sc));
1769	sc->nlge_dev = dev;
1770	sc->id = device_get_unit(dev);
1771	nlge_set_port_attribs(sc, port_info);
1772}
1773
1774static void
1775nlge_media_specific_init(struct nlge_softc *sc)
1776{
1777	struct mii_data *media;
1778	struct bucket_size *bucket_sizes;
1779
1780	bucket_sizes = xlr_board_info.bucket_sizes;
1781	switch (sc->port_type) {
1782	case XLR_RGMII:
1783	case XLR_SGMII:
1784	case XLR_XAUI:
1785		NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1786		    (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1787		    (W_DESC_PACK_CTRL__ByteOffset <<
1788		        O_DESC_PACK_CTRL__ByteOffset));
1789		NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1790		    bucket_sizes->bucket[sc->tx_bucket_id]);
1791		if (sc->port_type != XLR_XAUI) {
1792			nlge_gmac_config_speed(sc, 1);
1793			if (sc->mii_bus) {
1794				media = (struct mii_data *)device_get_softc(
1795				    sc->mii_bus);
1796			}
1797		}
1798		break;
1799
1800	case XLR_XGMII:
1801		NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1802		NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1803		NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1804		    bucket_sizes->bucket[sc->tx_bucket_id]);
1805		break;
1806	default:
1807		break;
1808	}
1809}
1810
1811/*
1812 * Read the MAC address from the XLR boot registers. All port addresses
1813 * are identical except for the lowest octet.
1814 */
1815static void
1816nlge_read_mac_addr(struct nlge_softc *sc)
1817{
1818	int i, j;
1819
1820	for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1821		sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1822
1823	sc->dev_addr[i - 1] +=  sc->id;	/* last octet is port-specific */
1824}
1825
1826/*
1827 * Write the MAC address to the XLR MAC port. Also, set the address
1828 * masks and MAC filter configuration.
1829 */
1830static void
1831nlge_set_mac_addr(struct nlge_softc *sc)
1832{
1833	NLGE_WRITE(sc->base, R_MAC_ADDR0,
1834		  ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1835		   (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1836	NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1837		  ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1838
1839	NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1840	NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1841	NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1842	NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1843
1844	NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1845		  (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1846		  (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1847		  (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1848
1849	if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1850		NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1851	}
1852}
1853
1854static int
1855nlge_if_init(struct nlge_softc *sc)
1856{
1857	struct ifnet 	*ifp;
1858	device_t	dev;
1859	int error;
1860
1861	error = 0;
1862	dev = sc->nlge_dev;
1863	NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1864
1865	ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1866	if (ifp == NULL) {
1867		device_printf(dev, "can not if_alloc()\n");
1868		error = ENOSPC;
1869		goto fail;
1870	}
1871	ifp->if_softc = sc;
1872	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1873	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1874	ifp->if_capabilities = 0;
1875	ifp->if_capenable = ifp->if_capabilities;
1876	ifp->if_ioctl = nlge_ioctl;
1877	ifp->if_init = nlge_init;
1878	ifp->if_hwassist = 0;
1879	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1880	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1881	IFQ_SET_READY(&ifp->if_snd);
1882
1883	ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1884	    nlge_mediastatus);
1885	ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1886	ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1887	sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1888	nlge_read_mac_addr(sc);
1889
1890	ether_ifattach(ifp, sc->dev_addr);
1891
1892	/* override if_transmit : per ifnet(9), do it after if_attach */
1893	ifp->if_transmit = nlge_tx;
1894
1895fail:
1896	return (error);
1897}
1898
1899static void
1900nlge_mii_init(device_t dev, struct nlge_softc *sc)
1901{
1902	int error;
1903
1904	if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1905		NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1906	}
1907	error = mii_attach(dev, &sc->mii_bus, sc->nlge_if, nlge_mediachange,
1908	    nlge_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY,
1909	    0);
1910	if (error) {
1911		device_printf(dev, "attaching PHYs failed\n");
1912		sc->mii_bus = NULL;
1913	}
1914	if (sc->mii_bus != NULL) {
1915		/*
1916		 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1917		 * set about every 1 sec in GigE mode, ignore it for now...
1918		 */
1919		nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1920		    0xfffffffe);
1921	}
1922}
1923
1924/*
1925 *  Read a PHY register.
1926 *
1927 *  Input parameters:
1928 *  	   mii_base - Base address of MII
1929 *  	   phyaddr - PHY's address
1930 *  	   regidx = index of register to read
1931 *
1932 *  Return value:
1933 *  	   value read, or 0 if an error occurred.
1934 */
1935
1936static int
1937nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1938{
1939	int i, val;
1940
1941	/* setup the phy reg to be used */
1942	NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1943	    (phyaddr << 8) | (regidx << 0));
1944	/* Issue the read command */
1945	NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1946	    (1 << O_MII_MGMT_COMMAND__rstat));
1947
1948	/* poll for the read cycle to complete */
1949	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1950		if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1951			break;
1952	}
1953
1954	/* clear the read cycle */
1955	NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1956
1957	if (i == PHY_STATUS_RETRIES) {
1958		return (0xffffffff);
1959	}
1960
1961	val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1962
1963	return (val);
1964}
1965
1966/*
1967 *  Write a value to a PHY register.
1968 *
1969 *  Input parameters:
1970 *  	   mii_base - Base address of MII
1971 *  	   phyaddr - PHY to use
1972 *  	   regidx - register within the PHY
1973 *  	   regval - data to write to register
1974 *
1975 *  Return value:
1976 *  	   nothing
1977 */
1978static void
1979nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1980    int regval)
1981{
1982	int i;
1983
1984	NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1985	   (phyaddr << 8) | (regidx << 0));
1986
1987	/* Write the data which starts the write cycle */
1988	NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1989
1990	/* poll for the write cycle to complete */
1991	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1992		if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1993			break;
1994	}
1995}
1996
1997/*
1998 * Function to optimize the use of p2d descriptors for the given PDU.
1999 * As it is on the fast-path (called during packet transmission), it
2000 * described in more detail than the initialization functions.
2001 *
2002 * Input: mbuf chain (MC), pointer to fmn message
2003 * Input constraints: None
2004 * Output: FMN message to transmit the data in MC
2005 * Return values: 0 - success
2006 *                1 - MC cannot be handled (see Limitations below)
2007 *                2 - MC cannot be handled presently (maybe worth re-trying)
2008 * Other output: Number of entries filled in the FMN message
2009 *
2010 * Output structure/constraints:
2011 *     1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
2012 *     2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
2013 *     3. Each p2d points to physically contiguous chunk of data (subject to
2014 *        entire MC requiring max 17 p2d's).
2015 * Limitations:
2016 *     1. MC's that require more than 17 p2d's are not handled.
2017 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
2018 *           the p2p structure. Small packets (which typically give low
2019 *           performance) are expected to have a small MC that takes
2020 *           advantage of this.
2021 */
2022static int
2023prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
2024    uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
2025    struct nlge_tx_desc **tx_desc)
2026{
2027	struct mbuf     *m;
2028	struct nlge_tx_desc *p2p;
2029	uint64_t        *cur_p2d;
2030	uint64_t        fbpaddr;
2031	vm_offset_t	buf;
2032	vm_paddr_t      paddr;
2033	int             msg_sz, p2p_sz, len, frag_sz;
2034	/* Num entries per FMN msg is 4 for XLR/XLS */
2035	const int       FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
2036
2037	msg_sz = p2p_sz = 0;
2038	p2p = NULL;
2039	cur_p2d = &fmn_msg->msg0;
2040
2041	for (m = mbuf_chain; m != NULL; m = m->m_next) {
2042		buf = (vm_offset_t) m->m_data;
2043		len = m->m_len;
2044
2045		while (len) {
2046			if (msg_sz == (FMN_SZ - 1)) {
2047				p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
2048				if (p2p == NULL) {
2049					return (2);
2050				}
2051				/*
2052				 * Save the virtual address in the descriptor,
2053				 * it makes freeing easy.
2054				 */
2055				p2p->frag[XLR_MAX_TX_FRAGS] =
2056				    (uint64_t)(vm_offset_t)p2p;
2057				cur_p2d = &p2p->frag[0];
2058			} else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) {
2059				uma_zfree(nl_tx_desc_zone, p2p);
2060				return (1);
2061			}
2062			paddr = vtophys(buf);
2063			frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
2064			if (len < frag_sz)
2065				frag_sz = len;
2066			*cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
2067			    | paddr;
2068			msg_sz++;
2069			if (p2p != NULL)
2070				p2p_sz++;
2071			len -= frag_sz;
2072			buf += frag_sz;
2073		}
2074	}
2075
2076	if (msg_sz ==  0) {
2077		printf("Zero-length mbuf chain ??\n");
2078		*n_entries = msg_sz ;
2079		return (0);
2080	}
2081
2082	/* set eop in most-recent p2d */
2083	cur_p2d[-1] |= (1ULL << 63);
2084
2085#ifdef __mips_n64
2086	/*
2087	 * On n64, we cannot store our mbuf pointer(64 bit) in the freeback
2088	 * message (40bit available), so we put the mbuf in m_nextpkt and
2089	 * use the physical addr of that in freeback message.
2090	 */
2091	mbuf_chain->m_nextpkt = mbuf_chain;
2092	fbpaddr = vtophys(&mbuf_chain->m_nextpkt);
2093#else
2094	/* Careful, don't sign extend when going to 64bit */
2095	fbpaddr = (uint64_t)(uintptr_t)mbuf_chain;
2096#endif
2097	*cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) | fbpaddr;
2098	*tx_desc = p2p;
2099
2100	if (p2p != NULL) {
2101		paddr = vtophys(p2p);
2102		p2p_sz++;
2103		fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) |
2104		    ((uint64_t)(p2p_sz * 8) << 40) | paddr;
2105		*n_entries = FMN_SZ;
2106	} else {
2107		*n_entries = msg_sz + 1;
2108	}
2109
2110	return (0);
2111}
2112
2113static int
2114send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
2115    uint32_t n_entries)
2116{
2117	uint32_t msgrng_flags;
2118	int ret;
2119	int i = 0;
2120
2121	do {
2122		msgrng_flags = msgrng_access_enable();
2123		ret = message_send(n_entries, MSGRNG_CODE_MAC,
2124		    sc->tx_bucket_id, msg);
2125		msgrng_restore(msgrng_flags);
2126		if (ret == 0)
2127			return (0);
2128		i++;
2129	} while (i < 100000);
2130
2131	device_printf(sc->nlge_dev, "Too many credit fails in tx path\n");
2132
2133	return (1);
2134}
2135
2136static void
2137release_tx_desc(vm_paddr_t paddr)
2138{
2139	struct nlge_tx_desc *tx_desc;
2140	uint32_t 	sr;
2141	uint64_t	vaddr;
2142
2143	paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
2144	sr = xlr_enable_kx();
2145	vaddr = xlr_paddr_ld(paddr);
2146	xlr_restore_kx(sr);
2147
2148	tx_desc = (struct nlge_tx_desc*)(intptr_t)vaddr;
2149	uma_zfree(nl_tx_desc_zone, tx_desc);
2150}
2151
2152static void *
2153get_buf(void)
2154{
2155	struct mbuf	*m_new;
2156	uint64_t 	*md;
2157#ifdef INVARIANTS
2158	vm_paddr_t	temp1, temp2;
2159#endif
2160
2161	if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
2162		return (NULL);
2163	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2164	m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
2165	md = (uint64_t *)m_new->m_data;
2166	md[0] = (intptr_t)m_new;	/* Back Ptr */
2167	md[1] = 0xf00bad;
2168	m_adj(m_new, XLR_CACHELINE_SIZE);
2169
2170#ifdef INVARIANTS
2171	temp1 = vtophys((vm_offset_t) m_new->m_data);
2172	temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
2173	if ((temp1 + 1536) != temp2)
2174		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
2175#endif
2176
2177	return ((void *)m_new->m_data);
2178}
2179
2180static int
2181nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
2182{
2183	struct mii_data *md;
2184	xlr_reg_t  *mmio;
2185	int bmsr, n_tries, max_tries;
2186	int core_ctl[]    = { 0x2, 0x1, 0x0, 0x1 };
2187	int sgmii_speed[] = { SGMII_SPEED_10,
2188			      SGMII_SPEED_100,
2189			      SGMII_SPEED_1000,
2190			      SGMII_SPEED_100 };    /* default to 100Mbps */
2191	char *speed_str[] = { "10",
2192			      "100",
2193			      "1000",
2194			      "unknown, defaulting to 100" };
2195	int link_state = LINK_STATE_DOWN;
2196
2197	if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2198		return 0;
2199
2200	md = NULL;
2201	mmio = sc->base;
2202	if (sc->mii_base != NULL) {
2203		max_tries = (quick == 1) ? 100 : 4000;
2204		bmsr = 0;
2205		for (n_tries = 0; n_tries < max_tries; n_tries++) {
2206			bmsr = nlge_mii_read_internal(sc->mii_base,
2207			    sc->phy_addr, MII_BMSR);
2208			if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2209				break; /* Auto-negotiation is complete
2210					  and link is up */
2211			DELAY(1000);
2212		}
2213		bmsr &= BMSR_LINK;
2214		sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2215		sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2216		sc->speed = (sc->speed >> 3) & 0x03;
2217		if (sc->link == xlr_mac_link_up) {
2218			link_state = LINK_STATE_UP;
2219			nlge_sgmii_init(sc);
2220		}
2221		if (sc->mii_bus)
2222			md = (struct mii_data *)device_get_softc(sc->mii_bus);
2223	}
2224
2225	if (sc->port_type != XLR_RGMII)
2226		NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2227	if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2228	    sc->speed == xlr_mac_speed_rsvd) {
2229		NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2230	} else if (sc->speed == xlr_mac_speed_1000) {
2231		NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2232		if (md != NULL) {
2233			ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2234			    IFM_1000_T, IFM_FDX, md->mii_instance));
2235		}
2236	}
2237	NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2238	if_link_state_change(sc->nlge_if, link_state);
2239	printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2240	    speed_str[sc->speed]);
2241
2242	return (0);
2243}
2244
2245/*
2246 * This function is called for each port that was added to the device tree
2247 * and it initializes the following port attributes:
2248 * 	- type
2249 *      - base (base address to access port-specific registers)
2250 *      - mii_base
2251 * 	- phy_addr
2252 */
2253static void
2254nlge_set_port_attribs(struct nlge_softc *sc,
2255    struct xlr_gmac_port *port_info)
2256{
2257	sc->instance = port_info->instance % 4;	/* TBD: will not work for SPI-4 */
2258	sc->port_type = port_info->type;
2259	sc->base = xlr_io_mmio(port_info->base_addr);
2260	sc->mii_base = xlr_io_mmio(port_info->mii_addr);
2261	if (port_info->pcs_addr != 0)
2262		sc->pcs_addr = xlr_io_mmio(port_info->pcs_addr);
2263	if (port_info->serdes_addr != 0)
2264		sc->serdes_addr = xlr_io_mmio(port_info->serdes_addr);
2265	sc->phy_addr = port_info->phy_addr;
2266
2267	PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2268	    sc->mii_base, sc->phy_addr);
2269}
2270
2271/* ------------------------------------------------------------------------ */
2272
2273/* Debug dump functions */
2274
2275#ifdef DEBUG
2276
2277static void
2278dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2279{
2280	int val;
2281
2282	val = NLGE_READ(base, offset);
2283	printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2284}
2285
2286#define STRINGIFY(x) 		#x
2287
2288static void
2289dump_na_registers(xlr_reg_t *base_addr, int port_id)
2290{
2291	PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2292	dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2293	PDEBUG("Tx bucket sizes\n");
2294	dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2295	    STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2296	dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2297	    STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2298	dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2299	    STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2300	dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2301	    STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2302	dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2303	    STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2304	dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2305	    STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2306	dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2307	    STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2308	dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2309	    STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2310	dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2311	dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2312}
2313
2314static void
2315dump_gmac_registers(struct nlge_softc *sc)
2316{
2317	xlr_reg_t *base_addr = sc->base;
2318	int port_id = sc->instance;
2319
2320	PDEBUG("Register dump for port=%d\n", port_id);
2321	if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2322		dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2323		dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2324		dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2325		dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2326		dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2327		    STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2328		dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2329		dump_reg(base_addr, R_MII_MGMT_CONFIG,
2330		    STRINGIFY(R_MII_MGMT_CONFIG));
2331		dump_reg(base_addr, R_MII_MGMT_COMMAND,
2332		    STRINGIFY(R_MII_MGMT_COMMAND));
2333		dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2334		    STRINGIFY(R_MII_MGMT_ADDRESS));
2335		dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2336		    STRINGIFY(R_MII_MGMT_WRITE_DATA));
2337		dump_reg(base_addr, R_MII_MGMT_STATUS,
2338		    STRINGIFY(R_MII_MGMT_STATUS));
2339		dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2340		    STRINGIFY(R_MII_MGMT_INDICATORS));
2341		dump_reg(base_addr, R_INTERFACE_CONTROL,
2342		    STRINGIFY(R_INTERFACE_CONTROL));
2343		dump_reg(base_addr, R_INTERFACE_STATUS,
2344		    STRINGIFY(R_INTERFACE_STATUS));
2345	} else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2346		dump_reg(base_addr, R_XGMAC_CONFIG_0,
2347		    STRINGIFY(R_XGMAC_CONFIG_0));
2348		dump_reg(base_addr, R_XGMAC_CONFIG_1,
2349		    STRINGIFY(R_XGMAC_CONFIG_1));
2350		dump_reg(base_addr, R_XGMAC_CONFIG_2,
2351		    STRINGIFY(R_XGMAC_CONFIG_2));
2352		dump_reg(base_addr, R_XGMAC_CONFIG_3,
2353		    STRINGIFY(R_XGMAC_CONFIG_3));
2354		dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2355		    STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2356		dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2357		    STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2358		dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2359		    STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2360		dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2361		    STRINGIFY(R_XGMAC_REV_LEVEL));
2362		dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2363		    STRINGIFY(R_XGMAC_MIIM_COMMAND));
2364		dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2365		    STRINGIFY(R_XGMAC_MIIM_FILED));
2366		dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2367		    STRINGIFY(R_XGMAC_MIIM_CONFIG));
2368		dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2369		    STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2370		dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2371		    STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2372	}
2373
2374	dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2375	dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2376	dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2377	dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2378	dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2379	dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2380	dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2381	dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2382	dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2383	dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2384	dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2385	dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2386	dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2387	dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2388	dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2389	dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2390	dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2391	dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2392	dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2393	dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2394	dump_na_registers(base_addr, port_id);
2395}
2396
2397static void
2398dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2399{
2400	struct stn_cc *cc;
2401	int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2402	int j, k, r, c;
2403	int n_gmac_buckets;
2404
2405	n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2406	for (j = 0; j < 8; j++) { 		// for each cpu
2407		cc = board->credit_configs[j];
2408		printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2409		for (k = 0; k < n_gmac_buckets; k++) {
2410			r = gmac_bucket_ids[k] / 8;
2411			c = gmac_bucket_ids[k] % 8;
2412			printf ("    --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2413				gmac_bucket_ids[k], cc->counters[r][c]);
2414		}
2415	}
2416}
2417
2418static void
2419dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2420{
2421	struct stn_cc *cc;
2422	int j, k;
2423
2424	cc = board->gmac_block[gmac_id].credit_config;
2425	printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2426	for (j = 0; j < 8; j++) { 		// for each cpu
2427		printf("    ---> cpu_%d\n", j);
2428		for (k = 0; k < 8; k++) {	// for each bucket in cpu
2429			printf("        ---> bucket_%d: credits=%d\n", j * 8 + k,
2430			       cc->counters[j][k]);
2431		}
2432	}
2433}
2434
2435static void
2436dump_board_info(struct xlr_board_info *board)
2437{
2438	struct xlr_gmac_block_t *gm;
2439	int i, k;
2440
2441	printf("cpu=%x ", xlr_revision());
2442	printf("board_version: major=%llx, minor=%llx\n",
2443	    xlr_boot1_info.board_major_version,
2444	    xlr_boot1_info.board_minor_version);
2445	printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2446	    "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2447	    board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2448	    board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2449	printf("FMN: Core-station bucket sizes\n");
2450	for (i = 0; i < 128; i++) {
2451		if (i && ((i % 16) == 0))
2452			printf("\n");
2453		printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2454	}
2455	printf("\n");
2456	for (i = 0; i < 3; i++) {
2457		gm = &board->gmac_block[i];
2458		printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2459		    "station_txbase=%d, station_rfr=%d ", i, gm->type,
2460		    gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2461		    gm->station_txbase, gm->station_rfr);
2462		printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2463		     gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2464		     gm->baseinst);
2465	}
2466	for (k = 0; k < 3; k++) { 	// for each NA
2467		dump_fmn_cpu_credits_for_gmac(board, k);
2468		dump_fmn_gmac_credits(board, k);
2469	}
2470}
2471
2472static void
2473dump_mac_stats(struct nlge_softc *sc)
2474{
2475	xlr_reg_t *addr;
2476	uint32_t pkts_tx, pkts_rx;
2477
2478	addr = sc->base;
2479	pkts_rx = NLGE_READ(sc->base, R_RPKT);
2480	pkts_tx = NLGE_READ(sc->base, R_TPKT);
2481
2482	printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2483	    pkts_rx);
2484	if (pkts_rx > 0) {
2485		uint32_t r;
2486
2487		/* dump all rx counters. we need this because pkts_rx includes
2488		   bad packets. */
2489		for (r = R_RFCS; r <= R_ROVR; r++)
2490			printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2491			    NLGE_READ(sc->base, r));
2492	}
2493	if (pkts_tx > 0) {
2494		uint32_t r;
2495
2496		/* dump all tx counters. might be useful for debugging. */
2497		for (r = R_TMCA; r <= R_TFRG; r++) {
2498			if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2499				continue;
2500			printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2501			    NLGE_READ(sc->base, r));
2502		}
2503	}
2504
2505}
2506
2507static void
2508dump_mii_regs(struct nlge_softc *sc)
2509{
2510	uint32_t mii_regs[] = {  0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7,
2511	                         0x8,  0x9,  0xa,  0xf, 0x10, 0x11, 0x12, 0x13,
2512				0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2513				0x1c, 0x1d, 0x1e};
2514	int i, n_regs;
2515
2516	if (sc->mii_base == NULL || sc->mii_bus == NULL)
2517		return;
2518
2519	n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2520	for (i = 0; i < n_regs; i++) {
2521		printf("[mii_0x%x] = %x\n", mii_regs[i],
2522		    nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2523		        mii_regs[i]));
2524	}
2525}
2526
2527static void
2528dump_ifmedia(struct ifmedia *ifm)
2529{
2530	printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2531	    ifm->ifm_media, ifm->ifm_cur);
2532	if (ifm->ifm_cur != NULL) {
2533		printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2534		    " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2535		    ifm->ifm_cur->ifm_data);
2536	}
2537}
2538
2539static void
2540dump_mii_data(struct mii_data *mii)
2541{
2542	dump_ifmedia(&mii->mii_media);
2543	printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2544	    " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2545	    mii->mii_media_status, mii->mii_media_active);
2546}
2547
2548static void
2549dump_pcs_regs(struct nlge_softc *sc, int phy)
2550{
2551	int i, val;
2552
2553	printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2554	for (i = 0; i < 18; i++) {
2555		if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2556			continue;
2557		val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2558		printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2559	}
2560}
2561#endif
2562