Deleted Added
full compact
if_nlge.c (212553) if_nlge.c (212758)
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 212553 2010-09-13 13:11:50Z jchandra $");
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 212758 2010-09-16 19:13:55Z jchandra $");
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
105#include <mips/rmi/pic.h>
106#include <mips/rmi/board.h>
107#include <mips/rmi/rmi_mips_exts.h>
108#include <mips/rmi/rmi_boot_info.h>
109#include <mips/rmi/dev/xlr/atx_cpld.h>
110#include <mips/rmi/dev/xlr/xgmac_mdio.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114#include "miidevs.h"
115#include <dev/mii/brgphyreg.h>
116#include "miibus_if.h"
117
118#include <mips/rmi/dev/nlge/if_nlge.h>
119
120MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121MODULE_DEPEND(nlge, ether, 1, 1, 1);
122MODULE_DEPEND(nlge, miibus, 1, 1, 1);
123
124/* Network accelarator entry points */
125static int nlna_probe(device_t);
126static int nlna_attach(device_t);
127static int nlna_detach(device_t);
128static int nlna_suspend(device_t);
129static int nlna_resume(device_t);
130static int nlna_shutdown(device_t);
131
132/* GMAC port entry points */
133static int nlge_probe(device_t);
134static int nlge_attach(device_t);
135static int nlge_detach(device_t);
136static int nlge_suspend(device_t);
137static int nlge_resume(device_t);
138static void nlge_init(void *);
139static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
140static void nlge_start(struct ifnet *);
141static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
142
143static int nlge_mii_write(struct device *, int, int, int);
144static int nlge_mii_read(struct device *, int, int);
145static void nlge_mac_mii_statchg(device_t);
146static int nlge_mediachange(struct ifnet *ifp);
147static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
148
149/* Other internal/helper functions */
150static void *get_buf(void);
151
152static void nlna_add_to_port_set(struct nlge_port_set *pset,
153 struct nlge_softc *sc);
154static void nlna_config_pde(struct nlna_softc *);
155static void nlna_config_parser(struct nlna_softc *);
156static void nlna_config_classifier(struct nlna_softc *);
157static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
158static void nlna_config_common(struct nlna_softc *);
159static void nlna_disable_ports(struct nlna_softc *sc);
160static void nlna_enable_intr(struct nlna_softc *sc);
161static void nlna_disable_intr(struct nlna_softc *sc);
162static void nlna_enable_ports(struct nlna_softc *sc);
163static void nlna_get_all_softc(device_t iodi_dev,
164 struct nlna_softc **sc_vec, uint32_t vec_sz);
165static void nlna_hw_init(struct nlna_softc *sc);
166static int nlna_is_last_active_na(struct nlna_softc *sc);
167static void nlna_media_specific_config(struct nlna_softc *sc);
168static void nlna_reset_ports(struct nlna_softc *sc,
169 struct xlr_gmac_block_t *blk);
170static struct nlna_softc *nlna_sc_init(device_t dev,
171 struct xlr_gmac_block_t *blk);
172static void nlna_setup_intr(struct nlna_softc *sc);
173static void nlna_smp_update_pde(void *dummy __unused);
174static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
175 uint32_t n_desc);
176
177static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
178static void nlge_hw_init(struct nlge_softc *sc);
179static int nlge_if_init(struct nlge_softc *sc);
180static void nlge_intr(void *arg);
181static int nlge_irq_init(struct nlge_softc *sc);
182static void nlge_irq_fini(struct nlge_softc *sc);
183static void nlge_media_specific_init(struct nlge_softc *sc);
184static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
185static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
186 int regidx);
187static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
188 int regidx, int regval);
189void nlge_msgring_handler(int bucket, int size, int code,
190 int stid, struct msgrng_msg *msg, void *data);
191static void nlge_port_disable(int id, xlr_reg_t *base, int port_type);
192static void nlge_port_enable(struct nlge_softc *sc);
193static void nlge_read_mac_addr(struct nlge_softc *sc);
194static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
195 struct xlr_gmac_port *port_info);
196static void nlge_set_mac_addr(struct nlge_softc *sc);
197static void nlge_set_port_attribs(struct nlge_softc *,
198 struct xlr_gmac_port *);
199static void nlge_sgmii_init(struct nlge_softc *sc);
200static void nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc);
201
202static int prepare_fmn_message(struct nlge_softc *sc,
203 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
204 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
205
206static void release_tx_desc(vm_paddr_t phy_addr);
207static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
208 uint32_t n_entries);
209
210//#define DEBUG
211#ifdef DEBUG
212static int mac_debug = 1;
213static int reg_dump = 0;
214#undef PDEBUG
215#define PDEBUG(fmt, args...) \
216 do {\
217 if (mac_debug) {\
218 printf("[%s@%d|%s]: cpu_%d: " fmt, \
219 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
220 }\
221 } while(0);
222
223/* Debug/dump functions */
224static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
225static void dump_gmac_registers(struct nlge_softc *);
226static void dump_na_registers(xlr_reg_t *base, int port_id);
227static void dump_mac_stats(struct nlge_softc *sc);
228static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
229static void dump_mii_data(struct mii_data *mii) __attribute__((used));
230static void dump_board_info(struct xlr_board_info *);
231static void dump_pcs_regs(struct nlge_softc *sc, int phy);
232
233#else
234#undef PDEBUG
235#define PDEBUG(fmt, args...)
236#define dump_reg(a, o, n) /* nop */
237#define dump_gmac_registers(a) /* nop */
238#define dump_na_registers(a, p) /* nop */
239#define dump_board_info(b) /* nop */
240#define dump_mac_stats(sc) /* nop */
241#define dump_mii_regs(sc) /* nop */
242#define dump_mii_data(mii) /* nop */
243#define dump_pcs_regs(sc, phy) /* nop */
244#endif
245
246/* Wrappers etc. to export the driver entry points. */
247static device_method_t nlna_methods[] = {
248 /* Device interface */
249 DEVMETHOD(device_probe, nlna_probe),
250 DEVMETHOD(device_attach, nlna_attach),
251 DEVMETHOD(device_detach, nlna_detach),
252 DEVMETHOD(device_shutdown, nlna_shutdown),
253 DEVMETHOD(device_suspend, nlna_suspend),
254 DEVMETHOD(device_resume, nlna_resume),
255
256 /* bus interface : TBD : what are these for ? */
257 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
258 DEVMETHOD(bus_print_child, bus_generic_print_child),
259 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
260
261 { 0, 0 }
262};
263
264static driver_t nlna_driver = {
265 "nlna",
266 nlna_methods,
267 sizeof(struct nlna_softc)
268};
269
270static devclass_t nlna_devclass;
271
272static device_method_t nlge_methods[] = {
273 /* Device interface */
274 DEVMETHOD(device_probe, nlge_probe),
275 DEVMETHOD(device_attach, nlge_attach),
276 DEVMETHOD(device_detach, nlge_detach),
277 DEVMETHOD(device_shutdown, bus_generic_shutdown),
278 DEVMETHOD(device_suspend, nlge_suspend),
279 DEVMETHOD(device_resume, nlge_resume),
280
281 /* MII interface */
282 DEVMETHOD(miibus_readreg, nlge_mii_read),
283 DEVMETHOD(miibus_writereg, nlge_mii_write),
284 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
285
286 {0, 0}
287};
288
289static driver_t nlge_driver = {
290 "nlge",
291 nlge_methods,
292 sizeof(struct nlge_softc)
293};
294
295static devclass_t nlge_devclass;
296
297DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
298DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
299DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
300
301static uma_zone_t nl_tx_desc_zone;
302
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
105#include <mips/rmi/pic.h>
106#include <mips/rmi/board.h>
107#include <mips/rmi/rmi_mips_exts.h>
108#include <mips/rmi/rmi_boot_info.h>
109#include <mips/rmi/dev/xlr/atx_cpld.h>
110#include <mips/rmi/dev/xlr/xgmac_mdio.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114#include "miidevs.h"
115#include <dev/mii/brgphyreg.h>
116#include "miibus_if.h"
117
118#include <mips/rmi/dev/nlge/if_nlge.h>
119
120MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121MODULE_DEPEND(nlge, ether, 1, 1, 1);
122MODULE_DEPEND(nlge, miibus, 1, 1, 1);
123
124/* Network accelarator entry points */
125static int nlna_probe(device_t);
126static int nlna_attach(device_t);
127static int nlna_detach(device_t);
128static int nlna_suspend(device_t);
129static int nlna_resume(device_t);
130static int nlna_shutdown(device_t);
131
132/* GMAC port entry points */
133static int nlge_probe(device_t);
134static int nlge_attach(device_t);
135static int nlge_detach(device_t);
136static int nlge_suspend(device_t);
137static int nlge_resume(device_t);
138static void nlge_init(void *);
139static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
140static void nlge_start(struct ifnet *);
141static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
142
143static int nlge_mii_write(struct device *, int, int, int);
144static int nlge_mii_read(struct device *, int, int);
145static void nlge_mac_mii_statchg(device_t);
146static int nlge_mediachange(struct ifnet *ifp);
147static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
148
149/* Other internal/helper functions */
150static void *get_buf(void);
151
152static void nlna_add_to_port_set(struct nlge_port_set *pset,
153 struct nlge_softc *sc);
154static void nlna_config_pde(struct nlna_softc *);
155static void nlna_config_parser(struct nlna_softc *);
156static void nlna_config_classifier(struct nlna_softc *);
157static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
158static void nlna_config_common(struct nlna_softc *);
159static void nlna_disable_ports(struct nlna_softc *sc);
160static void nlna_enable_intr(struct nlna_softc *sc);
161static void nlna_disable_intr(struct nlna_softc *sc);
162static void nlna_enable_ports(struct nlna_softc *sc);
163static void nlna_get_all_softc(device_t iodi_dev,
164 struct nlna_softc **sc_vec, uint32_t vec_sz);
165static void nlna_hw_init(struct nlna_softc *sc);
166static int nlna_is_last_active_na(struct nlna_softc *sc);
167static void nlna_media_specific_config(struct nlna_softc *sc);
168static void nlna_reset_ports(struct nlna_softc *sc,
169 struct xlr_gmac_block_t *blk);
170static struct nlna_softc *nlna_sc_init(device_t dev,
171 struct xlr_gmac_block_t *blk);
172static void nlna_setup_intr(struct nlna_softc *sc);
173static void nlna_smp_update_pde(void *dummy __unused);
174static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
175 uint32_t n_desc);
176
177static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
178static void nlge_hw_init(struct nlge_softc *sc);
179static int nlge_if_init(struct nlge_softc *sc);
180static void nlge_intr(void *arg);
181static int nlge_irq_init(struct nlge_softc *sc);
182static void nlge_irq_fini(struct nlge_softc *sc);
183static void nlge_media_specific_init(struct nlge_softc *sc);
184static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
185static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
186 int regidx);
187static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
188 int regidx, int regval);
189void nlge_msgring_handler(int bucket, int size, int code,
190 int stid, struct msgrng_msg *msg, void *data);
191static void nlge_port_disable(int id, xlr_reg_t *base, int port_type);
192static void nlge_port_enable(struct nlge_softc *sc);
193static void nlge_read_mac_addr(struct nlge_softc *sc);
194static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
195 struct xlr_gmac_port *port_info);
196static void nlge_set_mac_addr(struct nlge_softc *sc);
197static void nlge_set_port_attribs(struct nlge_softc *,
198 struct xlr_gmac_port *);
199static void nlge_sgmii_init(struct nlge_softc *sc);
200static void nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc);
201
202static int prepare_fmn_message(struct nlge_softc *sc,
203 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
204 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
205
206static void release_tx_desc(vm_paddr_t phy_addr);
207static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
208 uint32_t n_entries);
209
210//#define DEBUG
211#ifdef DEBUG
212static int mac_debug = 1;
213static int reg_dump = 0;
214#undef PDEBUG
215#define PDEBUG(fmt, args...) \
216 do {\
217 if (mac_debug) {\
218 printf("[%s@%d|%s]: cpu_%d: " fmt, \
219 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
220 }\
221 } while(0);
222
223/* Debug/dump functions */
224static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
225static void dump_gmac_registers(struct nlge_softc *);
226static void dump_na_registers(xlr_reg_t *base, int port_id);
227static void dump_mac_stats(struct nlge_softc *sc);
228static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
229static void dump_mii_data(struct mii_data *mii) __attribute__((used));
230static void dump_board_info(struct xlr_board_info *);
231static void dump_pcs_regs(struct nlge_softc *sc, int phy);
232
233#else
234#undef PDEBUG
235#define PDEBUG(fmt, args...)
236#define dump_reg(a, o, n) /* nop */
237#define dump_gmac_registers(a) /* nop */
238#define dump_na_registers(a, p) /* nop */
239#define dump_board_info(b) /* nop */
240#define dump_mac_stats(sc) /* nop */
241#define dump_mii_regs(sc) /* nop */
242#define dump_mii_data(mii) /* nop */
243#define dump_pcs_regs(sc, phy) /* nop */
244#endif
245
246/* Wrappers etc. to export the driver entry points. */
247static device_method_t nlna_methods[] = {
248 /* Device interface */
249 DEVMETHOD(device_probe, nlna_probe),
250 DEVMETHOD(device_attach, nlna_attach),
251 DEVMETHOD(device_detach, nlna_detach),
252 DEVMETHOD(device_shutdown, nlna_shutdown),
253 DEVMETHOD(device_suspend, nlna_suspend),
254 DEVMETHOD(device_resume, nlna_resume),
255
256 /* bus interface : TBD : what are these for ? */
257 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
258 DEVMETHOD(bus_print_child, bus_generic_print_child),
259 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
260
261 { 0, 0 }
262};
263
264static driver_t nlna_driver = {
265 "nlna",
266 nlna_methods,
267 sizeof(struct nlna_softc)
268};
269
270static devclass_t nlna_devclass;
271
272static device_method_t nlge_methods[] = {
273 /* Device interface */
274 DEVMETHOD(device_probe, nlge_probe),
275 DEVMETHOD(device_attach, nlge_attach),
276 DEVMETHOD(device_detach, nlge_detach),
277 DEVMETHOD(device_shutdown, bus_generic_shutdown),
278 DEVMETHOD(device_suspend, nlge_suspend),
279 DEVMETHOD(device_resume, nlge_resume),
280
281 /* MII interface */
282 DEVMETHOD(miibus_readreg, nlge_mii_read),
283 DEVMETHOD(miibus_writereg, nlge_mii_write),
284 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
285
286 {0, 0}
287};
288
289static driver_t nlge_driver = {
290 "nlge",
291 nlge_methods,
292 sizeof(struct nlge_softc)
293};
294
295static devclass_t nlge_devclass;
296
297DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
298DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
299DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
300
301static uma_zone_t nl_tx_desc_zone;
302
303/* Function to atomically increment an integer with the given value. */
304static __inline__ unsigned int
305ldadd_wu(unsigned int value, unsigned long *addr)
303static __inline void
304atomic_incr_long(unsigned long *addr)
306{
305{
307 __asm__ __volatile__( ".set push\n"
308 ".set noreorder\n"
309 "move $8, %2\n"
310 "move $9, %3\n"
311 /* "ldaddwu $8, $9\n" */
312 ".word 0x71280011\n"
313 "move %0, $8\n"
314 ".set pop\n"
315 : "=&r"(value), "+m"(*addr)
316 : "0"(value), "r" ((unsigned long)addr)
317 : "$8", "$9");
318 return value;
319}
306 /* XXX: fix for 64 bit */
307 unsigned int *iaddr = (unsigned int *)addr;
320
308
321static __inline__ uint32_t
322xlr_enable_kx(void)
323{
324 uint32_t sr = mips_rd_status();
325
326 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
327 return sr;
309 xlr_ldaddwu(1, iaddr);
328}
329
330static int
331nlna_probe(device_t dev)
332{
333 return (BUS_PROBE_DEFAULT);
334}
335
336/*
337 * Add all attached GMAC/XGMAC ports to the device tree. Port
338 * configuration is spread in two regions - common configuration
339 * for all ports in the NA and per-port configuration in MAC-specific
340 * region. This function does the following:
341 * - adds the ports to the device tree
342 * - reset the ports
343 * - do all the common initialization
344 * - invoke bus_generic_attach for per-port configuration
345 * - supply initial free rx descriptors to ports
346 * - initialize s/w data structures
347 * - finally, enable interrupts (only in the last NA).
348 *
349 * For reference, sample address space for common and per-port
350 * registers is given below.
351 *
352 * The address map for RNA0 is: (typical value)
353 *
354 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
355 * | |
356 * | |
357 * | |
358 * | |
359 * | |
360 * | |
361 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
362 * | |
363 * | |
364 * (common) -> |......................................| 0xbef0_c400
365 * | |
366 * | (RGMII/SGMII: common registers) |
367 * | |
368 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
369 * | |
370 * | |
371 * (common) -> |......................................| 0xbef0_d400
372 * | |
373 * | (RGMII/SGMII: common registers) |
374 * | |
375 * |......................................|
376 * and so on ....
377 *
378 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
379 */
380static int
381nlna_attach(device_t dev)
382{
383 struct xlr_gmac_block_t *block_info;
384 device_t gmac_dev;
385 struct nlna_softc *sc;
386 int error;
387 int i;
388 int id;
389
390 id = device_get_unit(dev);
391 block_info = device_get_ivars(dev);
392 if (!block_info->enabled) {
393 return 0;
394 }
395
396#ifdef DEBUG
397 dump_board_info(&xlr_board_info);
398#endif
399 block_info->baseaddr += DEFAULT_XLR_IO_BASE;
400
401 /* Initialize nlna state in softc structure */
402 sc = nlna_sc_init(dev, block_info);
403
404 /* Add device's for the ports controlled by this NA. */
405 if (block_info->type == XLR_GMAC) {
406 KASSERT(id < 2, ("No GMACs supported with this network"
407 "accelerator: %d", id));
408 for (i = 0; i < sc->num_ports; i++) {
409 gmac_dev = device_add_child(dev, "nlge", -1);
410 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
411 }
412 } else if (block_info->type == XLR_XGMAC) {
413 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
414 "network accelerator: %d", id));
415 gmac_dev = device_add_child(dev, "nlge", -1);
416 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
417 } else if (block_info->type == XLR_SPI4) {
418 /* SPI4 is not supported here */
419 device_printf(dev, "Unsupported: NA with SPI4 type");
420 return (ENOTSUP);
421 }
422
423 nlna_reset_ports(sc, block_info);
424
425 /* Initialize Network Accelarator registers. */
426 nlna_hw_init(sc);
427
428 error = bus_generic_attach(dev);
429 if (error) {
430 device_printf(dev, "failed to attach port(s)\n");
431 goto fail;
432 }
433
434 /* Send out the initial pool of free-descriptors for the rx path */
435 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
436
437 /* S/w data structure initializations shared by all NA's. */
438 if (nl_tx_desc_zone == NULL) {
439 /* Create a zone for allocating tx descriptors */
440 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
441 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
442 XLR_CACHELINE_SIZE, 0);
443 }
444
445 /* Enable NA interrupts */
446 nlna_setup_intr(sc);
447
448 return (0);
449
450fail:
451 return (error);
452}
453
454static int
455nlna_detach(device_t dev)
456{
457 struct nlna_softc *sc;
458
459 sc = device_get_softc(dev);
460 if (device_is_alive(dev)) {
461 nlna_disable_intr(sc);
462 /* This will make sure that per-port detach is complete
463 * and all traffic on the ports has been stopped. */
464 bus_generic_detach(dev);
465 uma_zdestroy(nl_tx_desc_zone);
466 }
467
468 return (0);
469}
470
471static int
472nlna_suspend(device_t dev)
473{
474
475 return (0);
476}
477
478static int
479nlna_resume(device_t dev)
480{
481
482 return (0);
483}
484
485static int
486nlna_shutdown(device_t dev)
487{
488 return (0);
489}
490
491
492/* GMAC port entry points */
493static int
494nlge_probe(device_t dev)
495{
496 struct nlge_softc *sc;
497 struct xlr_gmac_port *port_info;
498 int index;
499 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
500 "Unknown"};
501
502 port_info = device_get_ivars(dev);
503 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
504 5 : port_info->type;
505 device_set_desc_copy(dev, desc[index]);
506
507 sc = device_get_softc(dev);
508 nlge_sc_init(sc, dev, port_info);
509
510 nlge_port_disable(sc->id, sc->base, sc->port_type);
511
512 return (0);
513}
514
515static int
516nlge_attach(device_t dev)
517{
518 struct nlge_softc *sc;
519 struct nlna_softc *nsc;
520 int error;
521
522 sc = device_get_softc(dev);
523
524 nlge_if_init(sc);
525 nlge_mii_init(dev, sc);
526 error = nlge_irq_init(sc);
527 if (error)
528 return error;
529 nlge_hw_init(sc);
530
531 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
532 nsc->child_sc[sc->instance] = sc;
533
534 return (0);
535}
536
537static int
538nlge_detach(device_t dev)
539{
540 struct nlge_softc *sc;
541 struct ifnet *ifp;
542
543 sc = device_get_softc(dev);
544 ifp = sc->nlge_if;
545
546 if (device_is_attached(dev)) {
547 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
548 nlge_port_disable(sc->id, sc->base, sc->port_type);
549 nlge_irq_fini(sc);
550 ether_ifdetach(ifp);
551 bus_generic_detach(dev);
552 }
553 if (ifp)
554 if_free(ifp);
555
556 return (0);
557}
558
559static int
560nlge_suspend(device_t dev)
561{
562 return (0);
563}
564
565static int
566nlge_resume(device_t dev)
567{
568 return (0);
569}
570
571static void
572nlge_init(void *addr)
573{
574 struct nlge_softc *sc;
575 struct ifnet *ifp;
576
577 sc = (struct nlge_softc *)addr;
578 ifp = sc->nlge_if;
579
580 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
581 return;
582
583 nlge_gmac_config_speed(sc, 0);
584 ifp->if_drv_flags |= IFF_DRV_RUNNING;
585 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
586 nlge_port_enable(sc);
587
588 if (sc->port_type == XLR_SGMII) {
589 dump_pcs_regs(sc, 27);
590 }
591 dump_gmac_registers(sc);
592 dump_mac_stats(sc);
593}
594
595static int
596nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
597{
598 struct mii_data *mii;
599 struct nlge_softc *sc;
600 struct ifreq *ifr;
601 int error;
602
603 sc = ifp->if_softc;
604 error = 0;
605 ifr = (struct ifreq *)data;
606 switch(command) {
607 case SIOCSIFFLAGS:
608 break;
609 case SIOCSIFMEDIA:
610 case SIOCGIFMEDIA:
611 if (sc->mii_bus != NULL) {
612 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
613 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
614 command);
615 }
616 break;
617 case SIOCSIFADDR:
618 // intentional fall thru
619 case SIOCSIFMTU:
620 default:
621 error = ether_ioctl(ifp, command, data);
622 break;
623 }
624
625 return (error);
626}
627
628/* This function is called from an interrupt handler */
629void
630nlge_msgring_handler(int bucket, int size, int code, int stid,
631 struct msgrng_msg *msg, void *data)
632{
633 struct nlna_softc *na_sc;
634 struct nlge_softc *sc;
635 struct ifnet *ifp;
636 vm_paddr_t phys_addr;
637 unsigned long addr;
638 uint32_t length;
639 int ctrl;
640 int cpu;
641 int tx_error;
642 int port;
643 int vcpu;
644 int is_p2p;
645
646 cpu = xlr_core_id();
647 vcpu = (cpu << 2) + xlr_thr_id();
648
649 addr = 0;
650 is_p2p = 0;
651 tx_error = 0;
652 length = (msg->msg0 >> 40) & 0x3fff;
653 na_sc = (struct nlna_softc *)data;
654 if (length == 0) {
655 ctrl = CTRL_REG_FREE;
656 phys_addr = msg->msg0 & 0xffffffffffULL;
657 port = (msg->msg0 >> 54) & 0x0f;
658 is_p2p = (msg->msg0 >> 62) & 0x1;
659 tx_error = (msg->msg0 >> 58) & 0xf;
660 } else {
661 ctrl = CTRL_SNGL;
662 phys_addr = msg->msg0 & 0xffffffffe0ULL;
663 length = length - BYTE_OFFSET - MAC_CRC_LEN;
664 port = msg->msg0 & 0x0f;
665 }
666
667 sc = na_sc->child_sc[port];
668 if (sc == NULL) {
669 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
670 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
671 "Freeback for tx packet"));
672 return;
673 }
674
675 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
676 if (is_p2p) {
677 release_tx_desc(phys_addr);
678 } else {
679 m_freem((struct mbuf *)(uintptr_t)phys_addr);
680 }
681
682 ifp = sc->nlge_if;
683 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
684 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
685 }
310}
311
312static int
313nlna_probe(device_t dev)
314{
315 return (BUS_PROBE_DEFAULT);
316}
317
318/*
319 * Add all attached GMAC/XGMAC ports to the device tree. Port
320 * configuration is spread in two regions - common configuration
321 * for all ports in the NA and per-port configuration in MAC-specific
322 * region. This function does the following:
323 * - adds the ports to the device tree
324 * - reset the ports
325 * - do all the common initialization
326 * - invoke bus_generic_attach for per-port configuration
327 * - supply initial free rx descriptors to ports
328 * - initialize s/w data structures
329 * - finally, enable interrupts (only in the last NA).
330 *
331 * For reference, sample address space for common and per-port
332 * registers is given below.
333 *
334 * The address map for RNA0 is: (typical value)
335 *
336 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
337 * | |
338 * | |
339 * | |
340 * | |
341 * | |
342 * | |
343 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
344 * | |
345 * | |
346 * (common) -> |......................................| 0xbef0_c400
347 * | |
348 * | (RGMII/SGMII: common registers) |
349 * | |
350 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
351 * | |
352 * | |
353 * (common) -> |......................................| 0xbef0_d400
354 * | |
355 * | (RGMII/SGMII: common registers) |
356 * | |
357 * |......................................|
358 * and so on ....
359 *
360 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
361 */
362static int
363nlna_attach(device_t dev)
364{
365 struct xlr_gmac_block_t *block_info;
366 device_t gmac_dev;
367 struct nlna_softc *sc;
368 int error;
369 int i;
370 int id;
371
372 id = device_get_unit(dev);
373 block_info = device_get_ivars(dev);
374 if (!block_info->enabled) {
375 return 0;
376 }
377
378#ifdef DEBUG
379 dump_board_info(&xlr_board_info);
380#endif
381 block_info->baseaddr += DEFAULT_XLR_IO_BASE;
382
383 /* Initialize nlna state in softc structure */
384 sc = nlna_sc_init(dev, block_info);
385
386 /* Add device's for the ports controlled by this NA. */
387 if (block_info->type == XLR_GMAC) {
388 KASSERT(id < 2, ("No GMACs supported with this network"
389 "accelerator: %d", id));
390 for (i = 0; i < sc->num_ports; i++) {
391 gmac_dev = device_add_child(dev, "nlge", -1);
392 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
393 }
394 } else if (block_info->type == XLR_XGMAC) {
395 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
396 "network accelerator: %d", id));
397 gmac_dev = device_add_child(dev, "nlge", -1);
398 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
399 } else if (block_info->type == XLR_SPI4) {
400 /* SPI4 is not supported here */
401 device_printf(dev, "Unsupported: NA with SPI4 type");
402 return (ENOTSUP);
403 }
404
405 nlna_reset_ports(sc, block_info);
406
407 /* Initialize Network Accelarator registers. */
408 nlna_hw_init(sc);
409
410 error = bus_generic_attach(dev);
411 if (error) {
412 device_printf(dev, "failed to attach port(s)\n");
413 goto fail;
414 }
415
416 /* Send out the initial pool of free-descriptors for the rx path */
417 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
418
419 /* S/w data structure initializations shared by all NA's. */
420 if (nl_tx_desc_zone == NULL) {
421 /* Create a zone for allocating tx descriptors */
422 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
423 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
424 XLR_CACHELINE_SIZE, 0);
425 }
426
427 /* Enable NA interrupts */
428 nlna_setup_intr(sc);
429
430 return (0);
431
432fail:
433 return (error);
434}
435
436static int
437nlna_detach(device_t dev)
438{
439 struct nlna_softc *sc;
440
441 sc = device_get_softc(dev);
442 if (device_is_alive(dev)) {
443 nlna_disable_intr(sc);
444 /* This will make sure that per-port detach is complete
445 * and all traffic on the ports has been stopped. */
446 bus_generic_detach(dev);
447 uma_zdestroy(nl_tx_desc_zone);
448 }
449
450 return (0);
451}
452
453static int
454nlna_suspend(device_t dev)
455{
456
457 return (0);
458}
459
460static int
461nlna_resume(device_t dev)
462{
463
464 return (0);
465}
466
467static int
468nlna_shutdown(device_t dev)
469{
470 return (0);
471}
472
473
474/* GMAC port entry points */
475static int
476nlge_probe(device_t dev)
477{
478 struct nlge_softc *sc;
479 struct xlr_gmac_port *port_info;
480 int index;
481 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
482 "Unknown"};
483
484 port_info = device_get_ivars(dev);
485 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
486 5 : port_info->type;
487 device_set_desc_copy(dev, desc[index]);
488
489 sc = device_get_softc(dev);
490 nlge_sc_init(sc, dev, port_info);
491
492 nlge_port_disable(sc->id, sc->base, sc->port_type);
493
494 return (0);
495}
496
497static int
498nlge_attach(device_t dev)
499{
500 struct nlge_softc *sc;
501 struct nlna_softc *nsc;
502 int error;
503
504 sc = device_get_softc(dev);
505
506 nlge_if_init(sc);
507 nlge_mii_init(dev, sc);
508 error = nlge_irq_init(sc);
509 if (error)
510 return error;
511 nlge_hw_init(sc);
512
513 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
514 nsc->child_sc[sc->instance] = sc;
515
516 return (0);
517}
518
519static int
520nlge_detach(device_t dev)
521{
522 struct nlge_softc *sc;
523 struct ifnet *ifp;
524
525 sc = device_get_softc(dev);
526 ifp = sc->nlge_if;
527
528 if (device_is_attached(dev)) {
529 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
530 nlge_port_disable(sc->id, sc->base, sc->port_type);
531 nlge_irq_fini(sc);
532 ether_ifdetach(ifp);
533 bus_generic_detach(dev);
534 }
535 if (ifp)
536 if_free(ifp);
537
538 return (0);
539}
540
541static int
542nlge_suspend(device_t dev)
543{
544 return (0);
545}
546
547static int
548nlge_resume(device_t dev)
549{
550 return (0);
551}
552
553static void
554nlge_init(void *addr)
555{
556 struct nlge_softc *sc;
557 struct ifnet *ifp;
558
559 sc = (struct nlge_softc *)addr;
560 ifp = sc->nlge_if;
561
562 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
563 return;
564
565 nlge_gmac_config_speed(sc, 0);
566 ifp->if_drv_flags |= IFF_DRV_RUNNING;
567 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
568 nlge_port_enable(sc);
569
570 if (sc->port_type == XLR_SGMII) {
571 dump_pcs_regs(sc, 27);
572 }
573 dump_gmac_registers(sc);
574 dump_mac_stats(sc);
575}
576
577static int
578nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
579{
580 struct mii_data *mii;
581 struct nlge_softc *sc;
582 struct ifreq *ifr;
583 int error;
584
585 sc = ifp->if_softc;
586 error = 0;
587 ifr = (struct ifreq *)data;
588 switch(command) {
589 case SIOCSIFFLAGS:
590 break;
591 case SIOCSIFMEDIA:
592 case SIOCGIFMEDIA:
593 if (sc->mii_bus != NULL) {
594 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
595 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
596 command);
597 }
598 break;
599 case SIOCSIFADDR:
600 // intentional fall thru
601 case SIOCSIFMTU:
602 default:
603 error = ether_ioctl(ifp, command, data);
604 break;
605 }
606
607 return (error);
608}
609
610/* This function is called from an interrupt handler */
611void
612nlge_msgring_handler(int bucket, int size, int code, int stid,
613 struct msgrng_msg *msg, void *data)
614{
615 struct nlna_softc *na_sc;
616 struct nlge_softc *sc;
617 struct ifnet *ifp;
618 vm_paddr_t phys_addr;
619 unsigned long addr;
620 uint32_t length;
621 int ctrl;
622 int cpu;
623 int tx_error;
624 int port;
625 int vcpu;
626 int is_p2p;
627
628 cpu = xlr_core_id();
629 vcpu = (cpu << 2) + xlr_thr_id();
630
631 addr = 0;
632 is_p2p = 0;
633 tx_error = 0;
634 length = (msg->msg0 >> 40) & 0x3fff;
635 na_sc = (struct nlna_softc *)data;
636 if (length == 0) {
637 ctrl = CTRL_REG_FREE;
638 phys_addr = msg->msg0 & 0xffffffffffULL;
639 port = (msg->msg0 >> 54) & 0x0f;
640 is_p2p = (msg->msg0 >> 62) & 0x1;
641 tx_error = (msg->msg0 >> 58) & 0xf;
642 } else {
643 ctrl = CTRL_SNGL;
644 phys_addr = msg->msg0 & 0xffffffffe0ULL;
645 length = length - BYTE_OFFSET - MAC_CRC_LEN;
646 port = msg->msg0 & 0x0f;
647 }
648
649 sc = na_sc->child_sc[port];
650 if (sc == NULL) {
651 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
652 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
653 "Freeback for tx packet"));
654 return;
655 }
656
657 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
658 if (is_p2p) {
659 release_tx_desc(phys_addr);
660 } else {
661 m_freem((struct mbuf *)(uintptr_t)phys_addr);
662 }
663
664 ifp = sc->nlge_if;
665 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
666 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
667 }
686 ldadd_wu(1, (tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
668 atomic_incr_long((tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
687 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
688 /* Rx Packet */
689
690 nlge_rx(sc, phys_addr, length);
691 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
692 } else {
693 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
694 }
695
696}
697
698static void
699nlge_start(struct ifnet *ifp)
700{
701 struct nlge_softc *sc;
702
703 sc = ifp->if_softc;
704 //NLGE_LOCK(sc);
705 nlge_start_locked(ifp, sc);
706 //NLGE_UNLOCK(sc);
707}
708
709static void
710nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc)
711{
712 struct msgrng_msg msg;
713 struct mbuf *m;
714 struct nlge_tx_desc *tx_desc;
715 uint64_t fr_stid;
716 uint32_t cpu;
717 uint32_t n_entries;
718 uint32_t tid;
719 int ret;
720 int sent;
721
722 cpu = xlr_core_id();
723 tid = xlr_thr_id();
724 fr_stid = cpu * 8 + tid + 4;
725
726 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
727 return;
728 }
729
730 do {
731 /* Grab a packet off the queue. */
732 IF_DEQUEUE(&ifp->if_snd, m);
733 if (m == NULL) {
734 return;
735 }
736
737 tx_desc = NULL;
738 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
739 if (ret) {
740 goto fail;
741 }
742 sent = send_fmn_msg_tx(sc, &msg, n_entries);
743 if (sent != 0) {
744 goto fail;
745 }
746 } while(1);
747
748 return;
749
750fail:
751 if (tx_desc != NULL) {
752 uma_zfree(nl_tx_desc_zone, tx_desc);
753 }
754 if (m != NULL) {
755 /*
756 * TBD: It is observed that only when both of the statements
757 * below are not enabled, traffic continues till the end.
758 * Otherwise, the port locks up in the middle and never
759 * recovers from it. The current theory for this behavior
760 * is that the queue is full and the upper layer is neither
761 * able to add to it not invoke nlge_start to drian the
762 * queue. The driver may have to do something in addition
763 * to reset'ing the OACTIVE bit when a trasnmit free-back
764 * is received.
765 */
766 //ifp->if_drv_flags |= IFF_DRV_OACTIVE;
767 //IF_PREPEND(&ifp->if_snd, m);
768 m_freem(m);
669 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
670 /* Rx Packet */
671
672 nlge_rx(sc, phys_addr, length);
673 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
674 } else {
675 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
676 }
677
678}
679
680static void
681nlge_start(struct ifnet *ifp)
682{
683 struct nlge_softc *sc;
684
685 sc = ifp->if_softc;
686 //NLGE_LOCK(sc);
687 nlge_start_locked(ifp, sc);
688 //NLGE_UNLOCK(sc);
689}
690
691static void
692nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc)
693{
694 struct msgrng_msg msg;
695 struct mbuf *m;
696 struct nlge_tx_desc *tx_desc;
697 uint64_t fr_stid;
698 uint32_t cpu;
699 uint32_t n_entries;
700 uint32_t tid;
701 int ret;
702 int sent;
703
704 cpu = xlr_core_id();
705 tid = xlr_thr_id();
706 fr_stid = cpu * 8 + tid + 4;
707
708 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
709 return;
710 }
711
712 do {
713 /* Grab a packet off the queue. */
714 IF_DEQUEUE(&ifp->if_snd, m);
715 if (m == NULL) {
716 return;
717 }
718
719 tx_desc = NULL;
720 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
721 if (ret) {
722 goto fail;
723 }
724 sent = send_fmn_msg_tx(sc, &msg, n_entries);
725 if (sent != 0) {
726 goto fail;
727 }
728 } while(1);
729
730 return;
731
732fail:
733 if (tx_desc != NULL) {
734 uma_zfree(nl_tx_desc_zone, tx_desc);
735 }
736 if (m != NULL) {
737 /*
738 * TBD: It is observed that only when both of the statements
739 * below are not enabled, traffic continues till the end.
740 * Otherwise, the port locks up in the middle and never
741 * recovers from it. The current theory for this behavior
742 * is that the queue is full and the upper layer is neither
743 * able to add to it not invoke nlge_start to drian the
744 * queue. The driver may have to do something in addition
745 * to reset'ing the OACTIVE bit when a trasnmit free-back
746 * is received.
747 */
748 //ifp->if_drv_flags |= IFF_DRV_OACTIVE;
749 //IF_PREPEND(&ifp->if_snd, m);
750 m_freem(m);
769 ldadd_wu(1, &ifp->if_iqdrops);
751 atomic_incr_long(&ifp->if_iqdrops);
770 }
771 return;
772}
773
774static void
775nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
776{
752 }
753 return;
754}
755
756static void
757nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
758{
777 struct ifnet *ifp;
778 struct mbuf *m;
779 uint32_t tm, mag, sr;
759 struct ifnet *ifp;
760 struct mbuf *m;
761 uint64_t tm, mag;
762 uint32_t sr;
780
781 sr = xlr_enable_kx();
763
764 sr = xlr_enable_kx();
782 tm = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE);
783 mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
784 mips_wr_status(sr);
765 tm = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
766 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
767 xlr_restore_kx(sr);
785
786 m = (struct mbuf *)(intptr_t)tm;
787 if (mag != 0xf00bad) {
788 /* somebody else's packet. Error - FIXME in intialization */
789 printf("cpu %d: *ERROR* Not my packet paddr %llx\n",
790 xlr_core_id(), (uint64_t) paddr);
791 return;
792 }
793
794 ifp = sc->nlge_if;
795 /* align the data */
796 m->m_data += BYTE_OFFSET;
797 m->m_pkthdr.len = m->m_len = len;
798 m->m_pkthdr.rcvif = ifp;
799
768
769 m = (struct mbuf *)(intptr_t)tm;
770 if (mag != 0xf00bad) {
771 /* somebody else's packet. Error - FIXME in intialization */
772 printf("cpu %d: *ERROR* Not my packet paddr %llx\n",
773 xlr_core_id(), (uint64_t) paddr);
774 return;
775 }
776
777 ifp = sc->nlge_if;
778 /* align the data */
779 m->m_data += BYTE_OFFSET;
780 m->m_pkthdr.len = m->m_len = len;
781 m->m_pkthdr.rcvif = ifp;
782
800 ldadd_wu(1, &ifp->if_ipackets);
783 atomic_incr_long(&ifp->if_ipackets);
801 (*ifp->if_input)(ifp, m);
802}
803
804static int
805nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
806{
807 struct nlge_softc *sc;
808
809 sc = device_get_softc(dev);
810 if (sc->phy_addr == phyaddr && sc->port_type != XLR_XGMII)
811 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
812
813 return (0);
814}
815
816static int
817nlge_mii_read(struct device *dev, int phyaddr, int regidx)
818{
819 struct nlge_softc *sc;
820 int val;
821
822 sc = device_get_softc(dev);
823 val = (sc->phy_addr != phyaddr && sc->port_type != XLR_XGMII) ? (0xffff) :
824 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
825
826 return (val);
827}
828
829static void
830nlge_mac_mii_statchg(device_t dev)
831{
832}
833
834static int
835nlge_mediachange(struct ifnet *ifp)
836{
837 return 0;
838}
839
840static void
841nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
842{
843 struct nlge_softc *sc;
844 struct mii_data *md;
845
846 md = NULL;
847 sc = ifp->if_softc;
848 if (sc->mii_bus)
849 md = device_get_softc(sc->mii_bus);
850
851 ifmr->ifm_status = IFM_AVALID;
852 ifmr->ifm_active = IFM_ETHER;
853
854 if (sc->link == xlr_mac_link_down)
855 return;
856
857 if (md != NULL)
858 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
859 ifmr->ifm_status |= IFM_ACTIVE;
860}
861
862static struct nlna_softc *
863nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
864{
865 struct nlna_softc *sc;
866
867 sc = device_get_softc(dev);
868 memset(sc, 0, sizeof(*sc));
869 sc->nlna_dev = dev;
870 sc->base = (xlr_reg_t *) blk->baseaddr;
871 sc->rfrbucket = blk->station_rfr;
872 sc->station_id = blk->station_id;
873 sc->na_type = blk->type;
874 sc->mac_type = blk->mode;
875 sc->num_ports = blk->num_ports;
876
877 sc->mdio_set.port_vec = sc->mdio_sc;
878 sc->mdio_set.vec_sz = XLR_MAX_MACS;
879
880 return (sc);
881}
882
883/*
884 * Do:
885 * - Initialize common GMAC registers (index range 0x100-0x3ff).
886 */
887static void
888nlna_hw_init(struct nlna_softc *sc)
889{
890
891 /*
892 * It is seen that this is a critical function in bringing up FreeBSD.
893 * When it is not invoked, FreeBSD panics and fails during the
894 * multi-processor init (SI_SUB_SMP of * mi_startup). The key function
895 * in this sequence seems to be platform_prep_smp_launch. */
896 if (register_msgring_handler(sc->station_id, nlge_msgring_handler, sc)) {
897 panic("Couldn't register msgring handler\n");
898 }
899 nlna_config_fifo_spill_area(sc);
900 nlna_config_pde(sc);
901 nlna_config_common(sc);
902 nlna_config_parser(sc);
903 nlna_config_classifier(sc);
904}
905
906/*
907 * Enable interrupts on all the ports controlled by this NA. For now, we
908 * only care about the MII interrupt and this has to be enabled only
909 * on the port id0.
910 *
911 * This function is not in-sync with the regular way of doing things - it
912 * executes only in the context of the last active network accelerator (and
913 * thereby has some ugly accesses in the device tree). Though inelegant, it
914 * is necessary to do it this way as the per-port interrupts can be
915 * setup/enabled only after all the network accelerators have been
916 * initialized.
917 */
918static void
919nlna_setup_intr(struct nlna_softc *sc)
920{
921 struct nlna_softc *na_sc[XLR_MAX_NLNA];
922 struct nlge_port_set *pset;
923 struct xlr_gmac_port *port_info;
924 device_t iodi_dev;
925 int i, j;
926
927 if (!nlna_is_last_active_na(sc))
928 return ;
929
930 /* Collect all nlna softc pointers */
931 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
932 iodi_dev = device_get_parent(sc->nlna_dev);
933 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
934
935 /* Setup the MDIO interrupt lists. */
936 /*
937 * MDIO interrupts are coarse - a single interrupt line provides
938 * information about one of many possible ports. To figure out the
939 * exact port on which action is to be taken, all of the ports
940 * linked to an MDIO interrupt should be read. To enable this,
941 * ports need to add themselves to port sets.
942 */
943 for (i = 0; i < XLR_MAX_NLNA; i++) {
944 if (na_sc[i] == NULL)
945 continue;
946 for (j = 0; j < na_sc[i]->num_ports; j++) {
947 /* processing j-th port on i-th NA */
948 port_info = device_get_ivars(
949 na_sc[i]->child_sc[j]->nlge_dev);
950 pset = &na_sc[port_info->mdint_id]->mdio_set;
951 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
952 }
953 }
954
955 /* Enable interrupts */
956 for (i = 0; i < XLR_MAX_NLNA; i++) {
957 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
958 nlna_enable_intr(na_sc[i]);
959 }
960 }
961}
962
963static void
964nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
965{
966 int i;
967
968 /* step past the non-NULL elements */
969 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
970 if (i < pset->vec_sz)
971 pset->port_vec[i] = sc;
972 else
973 printf("warning: internal error: out-of-bounds for MDIO array");
974}
975
976static void
977nlna_enable_intr(struct nlna_softc *sc)
978{
979 int i;
980
981 for (i = 0; i < sc->num_ports; i++) {
982 if (sc->child_sc[i]->instance == 0)
983 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
984 (1 << O_INTMASK__MDInt));
985 }
986}
987
988static void
989nlna_disable_intr(struct nlna_softc *sc)
990{
991 int i;
992
993 for (i = 0; i < sc->num_ports; i++) {
994 if (sc->child_sc[i]->instance == 0)
995 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
996 }
997}
998
999static int
1000nlna_is_last_active_na(struct nlna_softc *sc)
1001{
1002 int id;
1003
1004 id = device_get_unit(sc->nlna_dev);
1005 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
1006}
1007
1008static void
1009nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
1010{
1011 struct msgrng_msg msg;
1012 void *ptr;
1013 uint32_t msgrng_flags;
1014 int i, n, stid, ret, code;
1015
1016 if (n_desc > 1) {
1017 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1018 sc->rfrbucket);
1019 }
1020
1021 stid = sc->rfrbucket;
1022 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1023 memset(&msg, 0, sizeof(msg));
1024
1025 for (i = 0; i < n_desc; i++) {
1026 ptr = get_buf();
1027 if (!ptr) {
1028 ret = -ENOMEM;
1029 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1030 break;
1031 }
1032
1033 /* Send the free Rx desc to the MAC */
1034 msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL;
1035 n = 0;
1036 do {
1037 msgrng_flags = msgrng_access_enable();
1038 ret = message_send(1, code, stid, &msg);
1039 msgrng_restore(msgrng_flags);
1040 KASSERT(n++ < 100000, ("Too many credit fails\n"));
1041 } while (ret != 0);
1042 }
1043}
1044
1045static __inline__ void *
1046nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1047 int reg_size, int size)
1048{
1049 void *spill;
1050 uint64_t phys_addr;
1051 uint32_t spill_size;
1052
1053 spill_size = size;
1054 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1055 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1056 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1057 panic("Unable to allocate memory for spill area!\n");
1058 }
1059 phys_addr = vtophys(spill);
1060 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1061 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1062 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1063 NLGE_WRITE(base, reg_size, spill_size);
1064
1065 return (spill);
1066}
1067
1068/*
1069 * Configure the 6 FIFO's that are used by the network accelarator to
1070 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1071 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1072 * the NA with free descriptors.
1073 */
1074static void
1075nlna_config_fifo_spill_area(struct nlna_softc *sc)
1076{
1077 sc->frin_spill = nlna_config_spill(sc->base,
1078 R_REG_FRIN_SPILL_MEM_START_0,
1079 R_REG_FRIN_SPILL_MEM_START_1,
1080 R_REG_FRIN_SPILL_MEM_SIZE,
1081 MAX_FRIN_SPILL *
1082 sizeof(struct fr_desc));
1083 sc->frout_spill = nlna_config_spill(sc->base,
1084 R_FROUT_SPILL_MEM_START_0,
1085 R_FROUT_SPILL_MEM_START_1,
1086 R_FROUT_SPILL_MEM_SIZE,
1087 MAX_FROUT_SPILL *
1088 sizeof(struct fr_desc));
1089 sc->class_0_spill = nlna_config_spill(sc->base,
1090 R_CLASS0_SPILL_MEM_START_0,
1091 R_CLASS0_SPILL_MEM_START_1,
1092 R_CLASS0_SPILL_MEM_SIZE,
1093 MAX_CLASS_0_SPILL *
1094 sizeof(union rx_tx_desc));
1095 sc->class_1_spill = nlna_config_spill(sc->base,
1096 R_CLASS1_SPILL_MEM_START_0,
1097 R_CLASS1_SPILL_MEM_START_1,
1098 R_CLASS1_SPILL_MEM_SIZE,
1099 MAX_CLASS_1_SPILL *
1100 sizeof(union rx_tx_desc));
1101 sc->class_2_spill = nlna_config_spill(sc->base,
1102 R_CLASS2_SPILL_MEM_START_0,
1103 R_CLASS2_SPILL_MEM_START_1,
1104 R_CLASS2_SPILL_MEM_SIZE,
1105 MAX_CLASS_2_SPILL *
1106 sizeof(union rx_tx_desc));
1107 sc->class_3_spill = nlna_config_spill(sc->base,
1108 R_CLASS3_SPILL_MEM_START_0,
1109 R_CLASS3_SPILL_MEM_START_1,
1110 R_CLASS3_SPILL_MEM_SIZE,
1111 MAX_CLASS_3_SPILL *
1112 sizeof(union rx_tx_desc));
1113}
1114
1115/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1116static void
1117nlna_config_pde(struct nlna_softc *sc)
1118{
1119 uint64_t bucket_map;
1120 uint32_t cpumask;
1121 int i, cpu, bucket;
1122
1123 cpumask = 0x1;
1124#ifdef SMP
1125 /*
1126 * rge may be called before SMP start in a BOOTP/NFSROOT
1127 * setup. we will distribute packets to other cpus only when
1128 * the SMP is started.
1129 */
1130 if (smp_started)
1131 cpumask = xlr_hw_thread_mask;
1132#endif
1133
1134 bucket_map = 0;
1135 for (i = 0; i < 32; i++) {
1136 if (cpumask & (1 << i)) {
1137 cpu = i;
1138 bucket = ((cpu >> 2) << 3);
1139 bucket_map |= (1ULL << bucket);
1140 }
1141 }
1142 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1143 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1144
1145 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1146 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1147
1148 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1149 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1150
1151 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1152 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1153}
1154
1155static void
1156nlna_smp_update_pde(void *dummy __unused)
1157{
1158 device_t iodi_dev;
1159 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1160 int i;
1161
1162 printf("Updating packet distribution for SMP\n");
1163
1164 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1165 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1166
1167 for (i = 0; i < XLR_MAX_NLNA; i++) {
1168 if (na_sc[i] == NULL)
1169 continue;
1170 nlna_disable_ports(na_sc[i]);
1171 nlna_config_pde(na_sc[i]);
1172 nlna_enable_ports(na_sc[i]);
1173 }
1174}
1175
1176SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1177 NULL);
1178
1179static void
1180nlna_config_parser(struct nlna_softc *sc)
1181{
1182 /*
1183 * Mark it as no classification. The parser extract is gauranteed to
1184 * be zero with no classfication
1185 */
1186 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x00);
1187 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1188
1189 /* configure the parser : L2 Type is configured in the bootloader */
1190 /* extract IP: src, dest protocol */
1191 NLGE_WRITE(sc->base, R_L3CTABLE,
1192 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1193 (0x0800 << 0));
1194 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1195 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1196}
1197
1198static void
1199nlna_config_classifier(struct nlna_softc *sc)
1200{
1201 int i;
1202
1203 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1204 /* xgmac translation table doesn't have sane values on reset */
1205 for (i = 0; i < 64; i++)
1206 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1207
1208 /*
1209 * use upper 7 bits of the parser extract to index the
1210 * translate table
1211 */
1212 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1213 }
1214}
1215
1216/*
1217 * Complete a bunch of h/w register initializations that are common for all the
1218 * ports controlled by a NA.
1219 */
1220static void
1221nlna_config_common(struct nlna_softc *sc)
1222{
1223 struct xlr_gmac_block_t *block_info;
1224 struct stn_cc *gmac_cc_config;
1225 int i, id;
1226
1227 block_info = device_get_ivars(sc->nlna_dev);
1228
1229 id = device_get_unit(sc->nlna_dev);
1230 gmac_cc_config = block_info->credit_config;
1231 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1232 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1233 gmac_cc_config->counters[i >> 3][i & 0x07]);
1234 }
1235
1236 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1237
1238 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1239 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1240 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1241 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1242 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1243
1244 nlna_media_specific_config(sc);
1245}
1246
1247static void
1248nlna_media_specific_config(struct nlna_softc *sc)
1249{
1250 struct bucket_size *bucket_sizes;
1251
1252 bucket_sizes = xlr_board_info.bucket_sizes;
1253 switch (sc->mac_type) {
1254 case XLR_RGMII:
1255 case XLR_SGMII:
1256 case XLR_XAUI:
1257 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1258 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1259 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1260 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1261 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1262 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1263 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1264 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1265
1266 if (sc->mac_type == XLR_XAUI) {
1267 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1268 }
1269 break;
1270
1271 case XLR_XGMII:
1272 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1273 bucket_sizes->bucket[sc->rfrbucket]);
1274
1275 default:
1276 break;
1277 }
1278}
1279
1280static void
1281nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1282{
1283 xlr_reg_t *addr;
1284 int i;
1285 uint32_t rx_ctrl;
1286
1287 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1288
1289 for (i = 0; i < sc->num_ports; i++) {
1290 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1291
1292 base += blk->gmac_port[i].base_addr;
1293 addr = (xlr_reg_t *) base;
1294
1295 /* 1. Reset RxEnable in MAC_CONFIG */
1296 switch (sc->mac_type) {
1297 case XLR_RGMII:
1298 case XLR_SGMII:
1299 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1300 (1 << O_MAC_CONFIG_1__rxen));
1301 break;
1302 case XLR_XAUI:
1303 case XLR_XGMII:
1304 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1305 (1 << O_RX_CONTROL__RxEnable));
1306 break;
1307 default:
1308 printf("Error: Unsupported port_type=%d\n",
1309 sc->mac_type);
1310 }
1311
1312 /* 1.1 Wait for RxControl.RxHalt to be set */
1313 do {
1314 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1315 } while (!(rx_ctrl & 0x2));
1316
1317 /* 2. Set the soft reset bit in RxControl */
1318 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1319 (1 << O_RX_CONTROL__SoftReset));
1320
1321 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1322 do {
1323 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1324 } while (!(rx_ctrl & 0x8));
1325
1326 /* 3. Clear the soft reset bit in RxControl */
1327 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1328 (1 << O_RX_CONTROL__SoftReset));
1329
1330 /* Turn off tx/rx on the port. */
1331 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1332 (1 << O_RX_CONTROL__RxEnable));
1333 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1334 (1 << O_TX_CONTROL__TxEnable));
1335 }
1336}
1337
1338static void
1339nlna_disable_ports(struct nlna_softc *sc)
1340{
1341 struct xlr_gmac_block_t *blk;
1342 xlr_reg_t *addr;
1343 int i;
1344
1345 blk = device_get_ivars(sc->nlna_dev);
1346 for (i = 0; i < sc->num_ports; i++) {
1347 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1348
1349 base += blk->gmac_port[i].base_addr;
1350 addr = (xlr_reg_t *) base;
1351 nlge_port_disable(i, addr, blk->gmac_port[i].type);
1352 }
1353}
1354
1355static void
1356nlna_enable_ports(struct nlna_softc *sc)
1357{
1358 device_t nlge_dev, *devlist;
1359 struct nlge_softc *port_sc;
1360 int i, numdevs;
1361
1362 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1363 for (i = 0; i < numdevs; i++) {
1364 nlge_dev = devlist[i];
1365 if (nlge_dev == NULL)
1366 continue;
1367 port_sc = device_get_softc(nlge_dev);
1368 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1369 nlge_port_enable(port_sc);
1370 }
1371 free(devlist, M_TEMP);
1372}
1373
1374static void
1375nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1376 uint32_t vec_sz)
1377{
1378 device_t na_dev;
1379 int i;
1380
1381 for (i = 0; i < vec_sz; i++) {
1382 sc_vec[i] = NULL;
1383 na_dev = device_find_child(iodi_dev, "nlna", i);
1384 if (na_dev != NULL)
1385 sc_vec[i] = device_get_softc(na_dev);
1386 }
1387}
1388
1389static void
1390nlge_port_disable(int id, xlr_reg_t *base, int port_type)
1391{
1392 uint32_t rd;
1393
1394 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1395 do {
1396 rd = NLGE_READ(base, R_RX_CONTROL);
1397 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1398
1399 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1400 do {
1401 rd = NLGE_READ(base, R_TX_CONTROL);
1402 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1403
1404 switch (port_type) {
1405 case XLR_RGMII:
1406 case XLR_SGMII:
1407 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1408 ((1 << O_MAC_CONFIG_1__rxen) |
1409 (1 << O_MAC_CONFIG_1__txen)));
1410 break;
1411 case XLR_XGMII:
1412 case XLR_XAUI:
1413 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1414 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1415 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1416 break;
1417 default:
1418 panic("Unknown MAC type on port %d\n", id);
1419 }
1420}
1421
1422static void
1423nlge_port_enable(struct nlge_softc *sc)
1424{
1425 struct xlr_gmac_port *self;
1426 xlr_reg_t *base;
1427
1428 base = sc->base;
1429 self = device_get_ivars(sc->nlge_dev);
1430 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1431 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1432 (1 << O_RX_CONTROL__RGMII));
1433
1434 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1435 (1 << O_RX_CONTROL__RxEnable));
1436 NLGE_UPDATE(base, R_TX_CONTROL,
1437 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1438 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1439 switch (sc->port_type) {
1440 case XLR_RGMII:
1441 case XLR_SGMII:
1442 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1443 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1444 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1445 break;
1446 case XLR_XGMII:
1447 case XLR_XAUI:
1448 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1449 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1450 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1451 break;
1452 default:
1453 panic("Unknown MAC type on port %d\n", sc->id);
1454 }
1455}
1456
1457static void
1458nlge_sgmii_init(struct nlge_softc *sc)
1459{
1460 xlr_reg_t *mmio_gpio;
1461 int i;
1462 int phy;
1463
1464 if (sc->port_type != XLR_SGMII)
1465 return;
1466
1467 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1468 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1469 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1470 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1471 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1472 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1473 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1474 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1475 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1476 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1477 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1478
1479 for(i=0;i<10000000;i++){} /* delay */
1480 /* program GPIO values for serdes init parameters */
1481 mmio_gpio = (xlr_reg_t *) (DEFAULT_XLR_IO_BASE + XLR_IO_GPIO_OFFSET);
1482 mmio_gpio[0x20] = 0x7e6802;
1483 mmio_gpio[0x10] = 0x7104;
1484 for(i=0;i<100000000;i++){}
1485
1486 /* enable autoneg - more magic */
1487 phy = sc->phy_addr % 4 + 27;
1488 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1489 DELAY(100000);
1490 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1491 DELAY(100000);
1492}
1493
1494static void
1495nlge_intr(void *arg)
1496{
1497 struct nlge_port_set *pset;
1498 struct nlge_softc *sc;
1499 struct nlge_softc *port_sc;
1500 xlr_reg_t *base;
1501 uint32_t intreg;
1502 uint32_t intr_status;
1503 int i;
1504
1505 sc = arg;
1506 if (sc == NULL) {
1507 printf("warning: No port registered for interrupt\n");
1508 return;
1509 }
1510 base = sc->base;
1511
1512 intreg = NLGE_READ(base, R_INTREG);
1513 if (intreg & (1 << O_INTREG__MDInt)) {
1514 pset = sc->mdio_pset;
1515 if (pset == NULL) {
1516 printf("warning: No ports for MDIO interrupt\n");
1517 return;
1518 }
1519 for (i = 0; i < pset->vec_sz; i++) {
1520 port_sc = pset->port_vec[i];
1521
1522 if (port_sc == NULL)
1523 continue;
1524
1525 /* Ack phy interrupt - clear on read*/
1526 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1527 port_sc->phy_addr, 26);
1528 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1529 intr_status);
1530
1531 if (!(intr_status & 0x8000)) {
1532 /* no interrupt for this port */
1533 continue;
1534 }
1535
1536 if (intr_status & 0x2410) {
1537 /* update link status for port */
1538 nlge_gmac_config_speed(port_sc, 0);
1539 } else {
1540 printf("%s: Unsupported phy interrupt"
1541 " (0x%08x)\n",
1542 device_get_nameunit(port_sc->nlge_dev),
1543 intr_status);
1544 }
1545 }
1546 }
1547
1548 /* Clear the NA interrupt */
1549 xlr_write_reg(base, R_INTREG, 0xffffffff);
1550
1551 return;
1552}
1553
1554static int
1555nlge_irq_init(struct nlge_softc *sc)
1556{
1557 struct resource irq_res;
1558 struct nlna_softc *na_sc;
1559 struct xlr_gmac_block_t *block_info;
1560 device_t na_dev;
1561 int ret;
1562 int irq_num;
1563
1564 na_dev = device_get_parent(sc->nlge_dev);
1565 block_info = device_get_ivars(na_dev);
1566
1567 irq_num = block_info->baseirq + sc->instance;
1568 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1569 ret = bus_setup_intr(sc->nlge_dev, &irq_res, (INTR_FAST |
1570 INTR_TYPE_NET | INTR_MPSAFE), NULL, nlge_intr, sc, NULL);
1571 if (ret) {
1572 nlge_detach(sc->nlge_dev);
1573 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1574 ret);
1575 return (ENXIO);
1576 }
1577 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1578 device_get_nameunit(sc->nlge_dev), irq_num);
1579
1580 if (sc->instance == 0) {
1581 na_sc = device_get_softc(na_dev);
1582 sc->mdio_pset = &na_sc->mdio_set;
1583 }
1584 return (0);
1585}
1586
1587static void
1588nlge_irq_fini(struct nlge_softc *sc)
1589{
1590}
1591
1592static void
1593nlge_hw_init(struct nlge_softc *sc)
1594{
1595 struct xlr_gmac_port *port_info;
1596 xlr_reg_t *base;
1597
1598 base = sc->base;
1599 port_info = device_get_ivars(sc->nlge_dev);
1600 sc->tx_bucket_id = port_info->tx_bucket_id;
1601
1602 /* each packet buffer is 1536 bytes */
1603 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1604 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1605 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1606 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1607 (1 << O_STATCTRL__ClrCnt)));
1608 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1609 NLGE_WRITE(base, R_INTMASK, 0);
1610 nlge_set_mac_addr(sc);
1611 nlge_media_specific_init(sc);
1612}
1613
1614static void
1615nlge_sc_init(struct nlge_softc *sc, device_t dev,
1616 struct xlr_gmac_port *port_info)
1617{
1618 memset(sc, 0, sizeof(*sc));
1619 sc->nlge_dev = dev;
1620 sc->id = device_get_unit(dev);
1621 nlge_set_port_attribs(sc, port_info);
1622}
1623
1624static void
1625nlge_media_specific_init(struct nlge_softc *sc)
1626{
1627 struct mii_data *media;
1628 struct bucket_size *bucket_sizes;
1629
1630 bucket_sizes = xlr_board_info.bucket_sizes;
1631 switch (sc->port_type) {
1632 case XLR_RGMII:
1633 case XLR_SGMII:
1634 case XLR_XAUI:
1635 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1636 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1637 (W_DESC_PACK_CTRL__ByteOffset <<
1638 O_DESC_PACK_CTRL__ByteOffset));
1639 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1640 bucket_sizes->bucket[sc->tx_bucket_id]);
1641 if (sc->port_type != XLR_XAUI) {
1642 nlge_gmac_config_speed(sc, 1);
1643 if (sc->mii_bus) {
1644 media = (struct mii_data *)device_get_softc(
1645 sc->mii_bus);
1646 }
1647 }
1648 break;
1649
1650 case XLR_XGMII:
1651 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1652 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1653 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1654 bucket_sizes->bucket[sc->tx_bucket_id]);
1655 break;
1656 default:
1657 break;
1658 }
1659}
1660
1661/*
1662 * Read the MAC address from the XLR boot registers. All port addresses
1663 * are identical except for the lowest octet.
1664 */
1665static void
1666nlge_read_mac_addr(struct nlge_softc *sc)
1667{
1668 int i, j;
1669
1670 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1671 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1672
1673 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1674}
1675
1676/*
1677 * Write the MAC address to the XLR MAC port. Also, set the address
1678 * masks and MAC filter configuration.
1679 */
1680static void
1681nlge_set_mac_addr(struct nlge_softc *sc)
1682{
1683 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1684 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1685 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1686 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1687 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1688
1689 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1690 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1691 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1692 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1693
1694 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1695 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1696 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1697 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1698
1699 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1700 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1701 }
1702}
1703
1704static int
1705nlge_if_init(struct nlge_softc *sc)
1706{
1707 struct ifnet *ifp;
1708 device_t dev;
1709 int error;
1710
1711 error = 0;
1712 dev = sc->nlge_dev;
1713 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1714
1715 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1716 if (ifp == NULL) {
1717 device_printf(dev, "can not if_alloc()\n");
1718 error = ENOSPC;
1719 goto fail;
1720 }
1721 ifp->if_softc = sc;
1722 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1723 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1724 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1725 ifp->if_capenable = ifp->if_capabilities;
1726 ifp->if_ioctl = nlge_ioctl;
1727 ifp->if_start = nlge_start;
1728 ifp->if_init = nlge_init;
1729 ifp->if_hwassist = 0;
1730 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1731 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1732 IFQ_SET_READY(&ifp->if_snd);
1733
1734 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1735 nlge_mediastatus);
1736 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1737 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1738 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1739 nlge_read_mac_addr(sc);
1740
1741 ether_ifattach(ifp, sc->dev_addr);
1742
1743fail:
1744 return (error);
1745}
1746
1747static void
1748nlge_mii_init(device_t dev, struct nlge_softc *sc)
1749{
1750 int error;
1751
1752 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1753 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1754 }
1755 error = mii_phy_probe(dev, &sc->mii_bus, nlge_mediachange, nlge_mediastatus);
1756 if (error) {
1757 device_printf(dev, "no PHY device found\n");
1758 sc->mii_bus = NULL;
1759 }
1760 if (sc->mii_bus != NULL) {
1761 /*
1762 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1763 * set about every 1 sec in GigE mode, ignore it for now...
1764 */
1765 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1766 0xfffffffe);
1767 }
1768}
1769
1770/*
1771 * Read a PHY register.
1772 *
1773 * Input parameters:
1774 * mii_base - Base address of MII
1775 * phyaddr - PHY's address
1776 * regidx = index of register to read
1777 *
1778 * Return value:
1779 * value read, or 0 if an error occurred.
1780 */
1781
1782static int
1783nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1784{
1785 int i, val;
1786
1787 /* setup the phy reg to be used */
1788 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1789 (phyaddr << 8) | (regidx << 0));
1790 /* Issue the read command */
1791 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1792 (1 << O_MII_MGMT_COMMAND__rstat));
1793
1794 /* poll for the read cycle to complete */
1795 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1796 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1797 break;
1798 }
1799
1800 /* clear the read cycle */
1801 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1802
1803 if (i == PHY_STATUS_RETRIES) {
1804 return (0xffffffff);
1805 }
1806
1807 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1808
1809 return (val);
1810}
1811
1812/*
1813 * Write a value to a PHY register.
1814 *
1815 * Input parameters:
1816 * mii_base - Base address of MII
1817 * phyaddr - PHY to use
1818 * regidx - register within the PHY
1819 * regval - data to write to register
1820 *
1821 * Return value:
1822 * nothing
1823 */
1824static void
1825nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1826 int regval)
1827{
1828 int i;
1829
1830 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1831 (phyaddr << 8) | (regidx << 0));
1832
1833 /* Write the data which starts the write cycle */
1834 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1835
1836 /* poll for the write cycle to complete */
1837 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1838 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1839 break;
1840 }
1841}
1842
1843/*
1844 * Function to optimize the use of p2d descriptors for the given PDU.
1845 * As it is on the fast-path (called during packet transmission), it
1846 * described in more detail than the initialization functions.
1847 *
1848 * Input: mbuf chain (MC), pointer to fmn message
1849 * Input constraints: None
1850 * Output: FMN message to transmit the data in MC
1851 * Return values: 0 - success
1852 * 1 - MC cannot be handled (see Limitations below)
1853 * 2 - MC cannot be handled presently (maybe worth re-trying)
1854 * Other output: Number of entries filled in the FMN message
1855 *
1856 * Output structure/constraints:
1857 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
1858 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
1859 * 3. Each p2d points to physically contiguous chunk of data (subject to
1860 * entire MC requiring max 17 p2d's).
1861 * Limitations:
1862 * 1. MC's that require more than 17 p2d's are not handled.
1863 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
1864 * the p2p structure. Small packets (which typically give low
1865 * performance) are expected to have a small MC that takes
1866 * advantage of this.
1867 */
1868static int
1869prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
1870 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
1871 struct nlge_tx_desc **tx_desc)
1872{
1873 struct mbuf *m;
1874 struct nlge_tx_desc *p2p;
1875 uint64_t *cur_p2d;
1876 vm_offset_t buf;
1877 vm_paddr_t paddr;
1878 int msg_sz, p2p_sz, is_p2p;
1879 int len, frag_sz;
1880 /* Num entries per FMN msg is 4 for XLR/XLS */
1881 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
1882
1883 msg_sz = p2p_sz = is_p2p = 0;
1884 p2p = NULL;
1885 cur_p2d = &fmn_msg->msg0;
1886
1887 for (m = mbuf_chain; m != NULL; m = m->m_next) {
1888 buf = (vm_offset_t) m->m_data;
1889 len = m->m_len;
1890
1891 while (len) {
1892 if (msg_sz == (FMN_SZ - 1)) {
1893 p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
1894 if (p2p == NULL) {
1895 return 2;
1896 }
1897 /*
784 (*ifp->if_input)(ifp, m);
785}
786
787static int
788nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
789{
790 struct nlge_softc *sc;
791
792 sc = device_get_softc(dev);
793 if (sc->phy_addr == phyaddr && sc->port_type != XLR_XGMII)
794 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
795
796 return (0);
797}
798
799static int
800nlge_mii_read(struct device *dev, int phyaddr, int regidx)
801{
802 struct nlge_softc *sc;
803 int val;
804
805 sc = device_get_softc(dev);
806 val = (sc->phy_addr != phyaddr && sc->port_type != XLR_XGMII) ? (0xffff) :
807 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
808
809 return (val);
810}
811
812static void
813nlge_mac_mii_statchg(device_t dev)
814{
815}
816
817static int
818nlge_mediachange(struct ifnet *ifp)
819{
820 return 0;
821}
822
823static void
824nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
825{
826 struct nlge_softc *sc;
827 struct mii_data *md;
828
829 md = NULL;
830 sc = ifp->if_softc;
831 if (sc->mii_bus)
832 md = device_get_softc(sc->mii_bus);
833
834 ifmr->ifm_status = IFM_AVALID;
835 ifmr->ifm_active = IFM_ETHER;
836
837 if (sc->link == xlr_mac_link_down)
838 return;
839
840 if (md != NULL)
841 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
842 ifmr->ifm_status |= IFM_ACTIVE;
843}
844
845static struct nlna_softc *
846nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
847{
848 struct nlna_softc *sc;
849
850 sc = device_get_softc(dev);
851 memset(sc, 0, sizeof(*sc));
852 sc->nlna_dev = dev;
853 sc->base = (xlr_reg_t *) blk->baseaddr;
854 sc->rfrbucket = blk->station_rfr;
855 sc->station_id = blk->station_id;
856 sc->na_type = blk->type;
857 sc->mac_type = blk->mode;
858 sc->num_ports = blk->num_ports;
859
860 sc->mdio_set.port_vec = sc->mdio_sc;
861 sc->mdio_set.vec_sz = XLR_MAX_MACS;
862
863 return (sc);
864}
865
866/*
867 * Do:
868 * - Initialize common GMAC registers (index range 0x100-0x3ff).
869 */
870static void
871nlna_hw_init(struct nlna_softc *sc)
872{
873
874 /*
875 * It is seen that this is a critical function in bringing up FreeBSD.
876 * When it is not invoked, FreeBSD panics and fails during the
877 * multi-processor init (SI_SUB_SMP of * mi_startup). The key function
878 * in this sequence seems to be platform_prep_smp_launch. */
879 if (register_msgring_handler(sc->station_id, nlge_msgring_handler, sc)) {
880 panic("Couldn't register msgring handler\n");
881 }
882 nlna_config_fifo_spill_area(sc);
883 nlna_config_pde(sc);
884 nlna_config_common(sc);
885 nlna_config_parser(sc);
886 nlna_config_classifier(sc);
887}
888
889/*
890 * Enable interrupts on all the ports controlled by this NA. For now, we
891 * only care about the MII interrupt and this has to be enabled only
892 * on the port id0.
893 *
894 * This function is not in-sync with the regular way of doing things - it
895 * executes only in the context of the last active network accelerator (and
896 * thereby has some ugly accesses in the device tree). Though inelegant, it
897 * is necessary to do it this way as the per-port interrupts can be
898 * setup/enabled only after all the network accelerators have been
899 * initialized.
900 */
901static void
902nlna_setup_intr(struct nlna_softc *sc)
903{
904 struct nlna_softc *na_sc[XLR_MAX_NLNA];
905 struct nlge_port_set *pset;
906 struct xlr_gmac_port *port_info;
907 device_t iodi_dev;
908 int i, j;
909
910 if (!nlna_is_last_active_na(sc))
911 return ;
912
913 /* Collect all nlna softc pointers */
914 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
915 iodi_dev = device_get_parent(sc->nlna_dev);
916 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
917
918 /* Setup the MDIO interrupt lists. */
919 /*
920 * MDIO interrupts are coarse - a single interrupt line provides
921 * information about one of many possible ports. To figure out the
922 * exact port on which action is to be taken, all of the ports
923 * linked to an MDIO interrupt should be read. To enable this,
924 * ports need to add themselves to port sets.
925 */
926 for (i = 0; i < XLR_MAX_NLNA; i++) {
927 if (na_sc[i] == NULL)
928 continue;
929 for (j = 0; j < na_sc[i]->num_ports; j++) {
930 /* processing j-th port on i-th NA */
931 port_info = device_get_ivars(
932 na_sc[i]->child_sc[j]->nlge_dev);
933 pset = &na_sc[port_info->mdint_id]->mdio_set;
934 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
935 }
936 }
937
938 /* Enable interrupts */
939 for (i = 0; i < XLR_MAX_NLNA; i++) {
940 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
941 nlna_enable_intr(na_sc[i]);
942 }
943 }
944}
945
946static void
947nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
948{
949 int i;
950
951 /* step past the non-NULL elements */
952 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
953 if (i < pset->vec_sz)
954 pset->port_vec[i] = sc;
955 else
956 printf("warning: internal error: out-of-bounds for MDIO array");
957}
958
959static void
960nlna_enable_intr(struct nlna_softc *sc)
961{
962 int i;
963
964 for (i = 0; i < sc->num_ports; i++) {
965 if (sc->child_sc[i]->instance == 0)
966 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
967 (1 << O_INTMASK__MDInt));
968 }
969}
970
971static void
972nlna_disable_intr(struct nlna_softc *sc)
973{
974 int i;
975
976 for (i = 0; i < sc->num_ports; i++) {
977 if (sc->child_sc[i]->instance == 0)
978 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
979 }
980}
981
982static int
983nlna_is_last_active_na(struct nlna_softc *sc)
984{
985 int id;
986
987 id = device_get_unit(sc->nlna_dev);
988 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
989}
990
991static void
992nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
993{
994 struct msgrng_msg msg;
995 void *ptr;
996 uint32_t msgrng_flags;
997 int i, n, stid, ret, code;
998
999 if (n_desc > 1) {
1000 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1001 sc->rfrbucket);
1002 }
1003
1004 stid = sc->rfrbucket;
1005 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1006 memset(&msg, 0, sizeof(msg));
1007
1008 for (i = 0; i < n_desc; i++) {
1009 ptr = get_buf();
1010 if (!ptr) {
1011 ret = -ENOMEM;
1012 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1013 break;
1014 }
1015
1016 /* Send the free Rx desc to the MAC */
1017 msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL;
1018 n = 0;
1019 do {
1020 msgrng_flags = msgrng_access_enable();
1021 ret = message_send(1, code, stid, &msg);
1022 msgrng_restore(msgrng_flags);
1023 KASSERT(n++ < 100000, ("Too many credit fails\n"));
1024 } while (ret != 0);
1025 }
1026}
1027
1028static __inline__ void *
1029nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1030 int reg_size, int size)
1031{
1032 void *spill;
1033 uint64_t phys_addr;
1034 uint32_t spill_size;
1035
1036 spill_size = size;
1037 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1038 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1039 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1040 panic("Unable to allocate memory for spill area!\n");
1041 }
1042 phys_addr = vtophys(spill);
1043 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1044 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1045 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1046 NLGE_WRITE(base, reg_size, spill_size);
1047
1048 return (spill);
1049}
1050
1051/*
1052 * Configure the 6 FIFO's that are used by the network accelarator to
1053 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1054 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1055 * the NA with free descriptors.
1056 */
1057static void
1058nlna_config_fifo_spill_area(struct nlna_softc *sc)
1059{
1060 sc->frin_spill = nlna_config_spill(sc->base,
1061 R_REG_FRIN_SPILL_MEM_START_0,
1062 R_REG_FRIN_SPILL_MEM_START_1,
1063 R_REG_FRIN_SPILL_MEM_SIZE,
1064 MAX_FRIN_SPILL *
1065 sizeof(struct fr_desc));
1066 sc->frout_spill = nlna_config_spill(sc->base,
1067 R_FROUT_SPILL_MEM_START_0,
1068 R_FROUT_SPILL_MEM_START_1,
1069 R_FROUT_SPILL_MEM_SIZE,
1070 MAX_FROUT_SPILL *
1071 sizeof(struct fr_desc));
1072 sc->class_0_spill = nlna_config_spill(sc->base,
1073 R_CLASS0_SPILL_MEM_START_0,
1074 R_CLASS0_SPILL_MEM_START_1,
1075 R_CLASS0_SPILL_MEM_SIZE,
1076 MAX_CLASS_0_SPILL *
1077 sizeof(union rx_tx_desc));
1078 sc->class_1_spill = nlna_config_spill(sc->base,
1079 R_CLASS1_SPILL_MEM_START_0,
1080 R_CLASS1_SPILL_MEM_START_1,
1081 R_CLASS1_SPILL_MEM_SIZE,
1082 MAX_CLASS_1_SPILL *
1083 sizeof(union rx_tx_desc));
1084 sc->class_2_spill = nlna_config_spill(sc->base,
1085 R_CLASS2_SPILL_MEM_START_0,
1086 R_CLASS2_SPILL_MEM_START_1,
1087 R_CLASS2_SPILL_MEM_SIZE,
1088 MAX_CLASS_2_SPILL *
1089 sizeof(union rx_tx_desc));
1090 sc->class_3_spill = nlna_config_spill(sc->base,
1091 R_CLASS3_SPILL_MEM_START_0,
1092 R_CLASS3_SPILL_MEM_START_1,
1093 R_CLASS3_SPILL_MEM_SIZE,
1094 MAX_CLASS_3_SPILL *
1095 sizeof(union rx_tx_desc));
1096}
1097
1098/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1099static void
1100nlna_config_pde(struct nlna_softc *sc)
1101{
1102 uint64_t bucket_map;
1103 uint32_t cpumask;
1104 int i, cpu, bucket;
1105
1106 cpumask = 0x1;
1107#ifdef SMP
1108 /*
1109 * rge may be called before SMP start in a BOOTP/NFSROOT
1110 * setup. we will distribute packets to other cpus only when
1111 * the SMP is started.
1112 */
1113 if (smp_started)
1114 cpumask = xlr_hw_thread_mask;
1115#endif
1116
1117 bucket_map = 0;
1118 for (i = 0; i < 32; i++) {
1119 if (cpumask & (1 << i)) {
1120 cpu = i;
1121 bucket = ((cpu >> 2) << 3);
1122 bucket_map |= (1ULL << bucket);
1123 }
1124 }
1125 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1126 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1127
1128 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1129 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1130
1131 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1132 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1133
1134 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1135 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1136}
1137
1138static void
1139nlna_smp_update_pde(void *dummy __unused)
1140{
1141 device_t iodi_dev;
1142 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1143 int i;
1144
1145 printf("Updating packet distribution for SMP\n");
1146
1147 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1148 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1149
1150 for (i = 0; i < XLR_MAX_NLNA; i++) {
1151 if (na_sc[i] == NULL)
1152 continue;
1153 nlna_disable_ports(na_sc[i]);
1154 nlna_config_pde(na_sc[i]);
1155 nlna_enable_ports(na_sc[i]);
1156 }
1157}
1158
1159SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1160 NULL);
1161
1162static void
1163nlna_config_parser(struct nlna_softc *sc)
1164{
1165 /*
1166 * Mark it as no classification. The parser extract is gauranteed to
1167 * be zero with no classfication
1168 */
1169 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x00);
1170 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1171
1172 /* configure the parser : L2 Type is configured in the bootloader */
1173 /* extract IP: src, dest protocol */
1174 NLGE_WRITE(sc->base, R_L3CTABLE,
1175 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1176 (0x0800 << 0));
1177 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1178 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1179}
1180
1181static void
1182nlna_config_classifier(struct nlna_softc *sc)
1183{
1184 int i;
1185
1186 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1187 /* xgmac translation table doesn't have sane values on reset */
1188 for (i = 0; i < 64; i++)
1189 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1190
1191 /*
1192 * use upper 7 bits of the parser extract to index the
1193 * translate table
1194 */
1195 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1196 }
1197}
1198
1199/*
1200 * Complete a bunch of h/w register initializations that are common for all the
1201 * ports controlled by a NA.
1202 */
1203static void
1204nlna_config_common(struct nlna_softc *sc)
1205{
1206 struct xlr_gmac_block_t *block_info;
1207 struct stn_cc *gmac_cc_config;
1208 int i, id;
1209
1210 block_info = device_get_ivars(sc->nlna_dev);
1211
1212 id = device_get_unit(sc->nlna_dev);
1213 gmac_cc_config = block_info->credit_config;
1214 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1215 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1216 gmac_cc_config->counters[i >> 3][i & 0x07]);
1217 }
1218
1219 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1220
1221 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1222 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1223 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1224 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1225 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1226
1227 nlna_media_specific_config(sc);
1228}
1229
1230static void
1231nlna_media_specific_config(struct nlna_softc *sc)
1232{
1233 struct bucket_size *bucket_sizes;
1234
1235 bucket_sizes = xlr_board_info.bucket_sizes;
1236 switch (sc->mac_type) {
1237 case XLR_RGMII:
1238 case XLR_SGMII:
1239 case XLR_XAUI:
1240 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1241 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1242 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1243 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1244 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1245 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1246 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1247 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1248
1249 if (sc->mac_type == XLR_XAUI) {
1250 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1251 }
1252 break;
1253
1254 case XLR_XGMII:
1255 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1256 bucket_sizes->bucket[sc->rfrbucket]);
1257
1258 default:
1259 break;
1260 }
1261}
1262
1263static void
1264nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1265{
1266 xlr_reg_t *addr;
1267 int i;
1268 uint32_t rx_ctrl;
1269
1270 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1271
1272 for (i = 0; i < sc->num_ports; i++) {
1273 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1274
1275 base += blk->gmac_port[i].base_addr;
1276 addr = (xlr_reg_t *) base;
1277
1278 /* 1. Reset RxEnable in MAC_CONFIG */
1279 switch (sc->mac_type) {
1280 case XLR_RGMII:
1281 case XLR_SGMII:
1282 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1283 (1 << O_MAC_CONFIG_1__rxen));
1284 break;
1285 case XLR_XAUI:
1286 case XLR_XGMII:
1287 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1288 (1 << O_RX_CONTROL__RxEnable));
1289 break;
1290 default:
1291 printf("Error: Unsupported port_type=%d\n",
1292 sc->mac_type);
1293 }
1294
1295 /* 1.1 Wait for RxControl.RxHalt to be set */
1296 do {
1297 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1298 } while (!(rx_ctrl & 0x2));
1299
1300 /* 2. Set the soft reset bit in RxControl */
1301 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1302 (1 << O_RX_CONTROL__SoftReset));
1303
1304 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1305 do {
1306 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1307 } while (!(rx_ctrl & 0x8));
1308
1309 /* 3. Clear the soft reset bit in RxControl */
1310 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1311 (1 << O_RX_CONTROL__SoftReset));
1312
1313 /* Turn off tx/rx on the port. */
1314 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1315 (1 << O_RX_CONTROL__RxEnable));
1316 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1317 (1 << O_TX_CONTROL__TxEnable));
1318 }
1319}
1320
1321static void
1322nlna_disable_ports(struct nlna_softc *sc)
1323{
1324 struct xlr_gmac_block_t *blk;
1325 xlr_reg_t *addr;
1326 int i;
1327
1328 blk = device_get_ivars(sc->nlna_dev);
1329 for (i = 0; i < sc->num_ports; i++) {
1330 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1331
1332 base += blk->gmac_port[i].base_addr;
1333 addr = (xlr_reg_t *) base;
1334 nlge_port_disable(i, addr, blk->gmac_port[i].type);
1335 }
1336}
1337
1338static void
1339nlna_enable_ports(struct nlna_softc *sc)
1340{
1341 device_t nlge_dev, *devlist;
1342 struct nlge_softc *port_sc;
1343 int i, numdevs;
1344
1345 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1346 for (i = 0; i < numdevs; i++) {
1347 nlge_dev = devlist[i];
1348 if (nlge_dev == NULL)
1349 continue;
1350 port_sc = device_get_softc(nlge_dev);
1351 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1352 nlge_port_enable(port_sc);
1353 }
1354 free(devlist, M_TEMP);
1355}
1356
1357static void
1358nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1359 uint32_t vec_sz)
1360{
1361 device_t na_dev;
1362 int i;
1363
1364 for (i = 0; i < vec_sz; i++) {
1365 sc_vec[i] = NULL;
1366 na_dev = device_find_child(iodi_dev, "nlna", i);
1367 if (na_dev != NULL)
1368 sc_vec[i] = device_get_softc(na_dev);
1369 }
1370}
1371
1372static void
1373nlge_port_disable(int id, xlr_reg_t *base, int port_type)
1374{
1375 uint32_t rd;
1376
1377 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1378 do {
1379 rd = NLGE_READ(base, R_RX_CONTROL);
1380 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1381
1382 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1383 do {
1384 rd = NLGE_READ(base, R_TX_CONTROL);
1385 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1386
1387 switch (port_type) {
1388 case XLR_RGMII:
1389 case XLR_SGMII:
1390 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1391 ((1 << O_MAC_CONFIG_1__rxen) |
1392 (1 << O_MAC_CONFIG_1__txen)));
1393 break;
1394 case XLR_XGMII:
1395 case XLR_XAUI:
1396 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1397 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1398 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1399 break;
1400 default:
1401 panic("Unknown MAC type on port %d\n", id);
1402 }
1403}
1404
1405static void
1406nlge_port_enable(struct nlge_softc *sc)
1407{
1408 struct xlr_gmac_port *self;
1409 xlr_reg_t *base;
1410
1411 base = sc->base;
1412 self = device_get_ivars(sc->nlge_dev);
1413 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1414 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1415 (1 << O_RX_CONTROL__RGMII));
1416
1417 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1418 (1 << O_RX_CONTROL__RxEnable));
1419 NLGE_UPDATE(base, R_TX_CONTROL,
1420 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1421 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1422 switch (sc->port_type) {
1423 case XLR_RGMII:
1424 case XLR_SGMII:
1425 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1426 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1427 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1428 break;
1429 case XLR_XGMII:
1430 case XLR_XAUI:
1431 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1432 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1433 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1434 break;
1435 default:
1436 panic("Unknown MAC type on port %d\n", sc->id);
1437 }
1438}
1439
1440static void
1441nlge_sgmii_init(struct nlge_softc *sc)
1442{
1443 xlr_reg_t *mmio_gpio;
1444 int i;
1445 int phy;
1446
1447 if (sc->port_type != XLR_SGMII)
1448 return;
1449
1450 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1451 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1452 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1453 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1454 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1455 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1456 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1457 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1458 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1459 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1460 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1461
1462 for(i=0;i<10000000;i++){} /* delay */
1463 /* program GPIO values for serdes init parameters */
1464 mmio_gpio = (xlr_reg_t *) (DEFAULT_XLR_IO_BASE + XLR_IO_GPIO_OFFSET);
1465 mmio_gpio[0x20] = 0x7e6802;
1466 mmio_gpio[0x10] = 0x7104;
1467 for(i=0;i<100000000;i++){}
1468
1469 /* enable autoneg - more magic */
1470 phy = sc->phy_addr % 4 + 27;
1471 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1472 DELAY(100000);
1473 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1474 DELAY(100000);
1475}
1476
1477static void
1478nlge_intr(void *arg)
1479{
1480 struct nlge_port_set *pset;
1481 struct nlge_softc *sc;
1482 struct nlge_softc *port_sc;
1483 xlr_reg_t *base;
1484 uint32_t intreg;
1485 uint32_t intr_status;
1486 int i;
1487
1488 sc = arg;
1489 if (sc == NULL) {
1490 printf("warning: No port registered for interrupt\n");
1491 return;
1492 }
1493 base = sc->base;
1494
1495 intreg = NLGE_READ(base, R_INTREG);
1496 if (intreg & (1 << O_INTREG__MDInt)) {
1497 pset = sc->mdio_pset;
1498 if (pset == NULL) {
1499 printf("warning: No ports for MDIO interrupt\n");
1500 return;
1501 }
1502 for (i = 0; i < pset->vec_sz; i++) {
1503 port_sc = pset->port_vec[i];
1504
1505 if (port_sc == NULL)
1506 continue;
1507
1508 /* Ack phy interrupt - clear on read*/
1509 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1510 port_sc->phy_addr, 26);
1511 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1512 intr_status);
1513
1514 if (!(intr_status & 0x8000)) {
1515 /* no interrupt for this port */
1516 continue;
1517 }
1518
1519 if (intr_status & 0x2410) {
1520 /* update link status for port */
1521 nlge_gmac_config_speed(port_sc, 0);
1522 } else {
1523 printf("%s: Unsupported phy interrupt"
1524 " (0x%08x)\n",
1525 device_get_nameunit(port_sc->nlge_dev),
1526 intr_status);
1527 }
1528 }
1529 }
1530
1531 /* Clear the NA interrupt */
1532 xlr_write_reg(base, R_INTREG, 0xffffffff);
1533
1534 return;
1535}
1536
1537static int
1538nlge_irq_init(struct nlge_softc *sc)
1539{
1540 struct resource irq_res;
1541 struct nlna_softc *na_sc;
1542 struct xlr_gmac_block_t *block_info;
1543 device_t na_dev;
1544 int ret;
1545 int irq_num;
1546
1547 na_dev = device_get_parent(sc->nlge_dev);
1548 block_info = device_get_ivars(na_dev);
1549
1550 irq_num = block_info->baseirq + sc->instance;
1551 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1552 ret = bus_setup_intr(sc->nlge_dev, &irq_res, (INTR_FAST |
1553 INTR_TYPE_NET | INTR_MPSAFE), NULL, nlge_intr, sc, NULL);
1554 if (ret) {
1555 nlge_detach(sc->nlge_dev);
1556 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1557 ret);
1558 return (ENXIO);
1559 }
1560 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1561 device_get_nameunit(sc->nlge_dev), irq_num);
1562
1563 if (sc->instance == 0) {
1564 na_sc = device_get_softc(na_dev);
1565 sc->mdio_pset = &na_sc->mdio_set;
1566 }
1567 return (0);
1568}
1569
1570static void
1571nlge_irq_fini(struct nlge_softc *sc)
1572{
1573}
1574
1575static void
1576nlge_hw_init(struct nlge_softc *sc)
1577{
1578 struct xlr_gmac_port *port_info;
1579 xlr_reg_t *base;
1580
1581 base = sc->base;
1582 port_info = device_get_ivars(sc->nlge_dev);
1583 sc->tx_bucket_id = port_info->tx_bucket_id;
1584
1585 /* each packet buffer is 1536 bytes */
1586 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1587 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1588 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1589 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1590 (1 << O_STATCTRL__ClrCnt)));
1591 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1592 NLGE_WRITE(base, R_INTMASK, 0);
1593 nlge_set_mac_addr(sc);
1594 nlge_media_specific_init(sc);
1595}
1596
1597static void
1598nlge_sc_init(struct nlge_softc *sc, device_t dev,
1599 struct xlr_gmac_port *port_info)
1600{
1601 memset(sc, 0, sizeof(*sc));
1602 sc->nlge_dev = dev;
1603 sc->id = device_get_unit(dev);
1604 nlge_set_port_attribs(sc, port_info);
1605}
1606
1607static void
1608nlge_media_specific_init(struct nlge_softc *sc)
1609{
1610 struct mii_data *media;
1611 struct bucket_size *bucket_sizes;
1612
1613 bucket_sizes = xlr_board_info.bucket_sizes;
1614 switch (sc->port_type) {
1615 case XLR_RGMII:
1616 case XLR_SGMII:
1617 case XLR_XAUI:
1618 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1619 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1620 (W_DESC_PACK_CTRL__ByteOffset <<
1621 O_DESC_PACK_CTRL__ByteOffset));
1622 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1623 bucket_sizes->bucket[sc->tx_bucket_id]);
1624 if (sc->port_type != XLR_XAUI) {
1625 nlge_gmac_config_speed(sc, 1);
1626 if (sc->mii_bus) {
1627 media = (struct mii_data *)device_get_softc(
1628 sc->mii_bus);
1629 }
1630 }
1631 break;
1632
1633 case XLR_XGMII:
1634 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1635 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1636 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1637 bucket_sizes->bucket[sc->tx_bucket_id]);
1638 break;
1639 default:
1640 break;
1641 }
1642}
1643
1644/*
1645 * Read the MAC address from the XLR boot registers. All port addresses
1646 * are identical except for the lowest octet.
1647 */
1648static void
1649nlge_read_mac_addr(struct nlge_softc *sc)
1650{
1651 int i, j;
1652
1653 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1654 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1655
1656 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1657}
1658
1659/*
1660 * Write the MAC address to the XLR MAC port. Also, set the address
1661 * masks and MAC filter configuration.
1662 */
1663static void
1664nlge_set_mac_addr(struct nlge_softc *sc)
1665{
1666 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1667 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1668 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1669 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1670 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1671
1672 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1673 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1674 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1675 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1676
1677 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1678 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1679 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1680 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1681
1682 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1683 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1684 }
1685}
1686
1687static int
1688nlge_if_init(struct nlge_softc *sc)
1689{
1690 struct ifnet *ifp;
1691 device_t dev;
1692 int error;
1693
1694 error = 0;
1695 dev = sc->nlge_dev;
1696 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1697
1698 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1699 if (ifp == NULL) {
1700 device_printf(dev, "can not if_alloc()\n");
1701 error = ENOSPC;
1702 goto fail;
1703 }
1704 ifp->if_softc = sc;
1705 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1706 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1707 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1708 ifp->if_capenable = ifp->if_capabilities;
1709 ifp->if_ioctl = nlge_ioctl;
1710 ifp->if_start = nlge_start;
1711 ifp->if_init = nlge_init;
1712 ifp->if_hwassist = 0;
1713 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1714 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1715 IFQ_SET_READY(&ifp->if_snd);
1716
1717 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1718 nlge_mediastatus);
1719 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1720 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1721 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1722 nlge_read_mac_addr(sc);
1723
1724 ether_ifattach(ifp, sc->dev_addr);
1725
1726fail:
1727 return (error);
1728}
1729
1730static void
1731nlge_mii_init(device_t dev, struct nlge_softc *sc)
1732{
1733 int error;
1734
1735 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1736 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1737 }
1738 error = mii_phy_probe(dev, &sc->mii_bus, nlge_mediachange, nlge_mediastatus);
1739 if (error) {
1740 device_printf(dev, "no PHY device found\n");
1741 sc->mii_bus = NULL;
1742 }
1743 if (sc->mii_bus != NULL) {
1744 /*
1745 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1746 * set about every 1 sec in GigE mode, ignore it for now...
1747 */
1748 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1749 0xfffffffe);
1750 }
1751}
1752
1753/*
1754 * Read a PHY register.
1755 *
1756 * Input parameters:
1757 * mii_base - Base address of MII
1758 * phyaddr - PHY's address
1759 * regidx = index of register to read
1760 *
1761 * Return value:
1762 * value read, or 0 if an error occurred.
1763 */
1764
1765static int
1766nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1767{
1768 int i, val;
1769
1770 /* setup the phy reg to be used */
1771 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1772 (phyaddr << 8) | (regidx << 0));
1773 /* Issue the read command */
1774 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1775 (1 << O_MII_MGMT_COMMAND__rstat));
1776
1777 /* poll for the read cycle to complete */
1778 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1779 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1780 break;
1781 }
1782
1783 /* clear the read cycle */
1784 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1785
1786 if (i == PHY_STATUS_RETRIES) {
1787 return (0xffffffff);
1788 }
1789
1790 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1791
1792 return (val);
1793}
1794
1795/*
1796 * Write a value to a PHY register.
1797 *
1798 * Input parameters:
1799 * mii_base - Base address of MII
1800 * phyaddr - PHY to use
1801 * regidx - register within the PHY
1802 * regval - data to write to register
1803 *
1804 * Return value:
1805 * nothing
1806 */
1807static void
1808nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1809 int regval)
1810{
1811 int i;
1812
1813 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1814 (phyaddr << 8) | (regidx << 0));
1815
1816 /* Write the data which starts the write cycle */
1817 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1818
1819 /* poll for the write cycle to complete */
1820 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1821 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1822 break;
1823 }
1824}
1825
1826/*
1827 * Function to optimize the use of p2d descriptors for the given PDU.
1828 * As it is on the fast-path (called during packet transmission), it
1829 * described in more detail than the initialization functions.
1830 *
1831 * Input: mbuf chain (MC), pointer to fmn message
1832 * Input constraints: None
1833 * Output: FMN message to transmit the data in MC
1834 * Return values: 0 - success
1835 * 1 - MC cannot be handled (see Limitations below)
1836 * 2 - MC cannot be handled presently (maybe worth re-trying)
1837 * Other output: Number of entries filled in the FMN message
1838 *
1839 * Output structure/constraints:
1840 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
1841 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
1842 * 3. Each p2d points to physically contiguous chunk of data (subject to
1843 * entire MC requiring max 17 p2d's).
1844 * Limitations:
1845 * 1. MC's that require more than 17 p2d's are not handled.
1846 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
1847 * the p2p structure. Small packets (which typically give low
1848 * performance) are expected to have a small MC that takes
1849 * advantage of this.
1850 */
1851static int
1852prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
1853 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
1854 struct nlge_tx_desc **tx_desc)
1855{
1856 struct mbuf *m;
1857 struct nlge_tx_desc *p2p;
1858 uint64_t *cur_p2d;
1859 vm_offset_t buf;
1860 vm_paddr_t paddr;
1861 int msg_sz, p2p_sz, is_p2p;
1862 int len, frag_sz;
1863 /* Num entries per FMN msg is 4 for XLR/XLS */
1864 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
1865
1866 msg_sz = p2p_sz = is_p2p = 0;
1867 p2p = NULL;
1868 cur_p2d = &fmn_msg->msg0;
1869
1870 for (m = mbuf_chain; m != NULL; m = m->m_next) {
1871 buf = (vm_offset_t) m->m_data;
1872 len = m->m_len;
1873
1874 while (len) {
1875 if (msg_sz == (FMN_SZ - 1)) {
1876 p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
1877 if (p2p == NULL) {
1878 return 2;
1879 }
1880 /*
1898 * As we currently use xlr_paddr_lw on a 32-bit
1899 * OS, both the pointers are laid out in one
1900 * 64-bit location - this makes it easy to
1901 * retrieve the pointers when processing the
1902 * tx free-back descriptor.
1881 * Save the virtual address in the descriptor,
1882 * it makes freeing easy.
1903 */
1904 p2p->frag[XLR_MAX_TX_FRAGS] =
1883 */
1884 p2p->frag[XLR_MAX_TX_FRAGS] =
1905 (((uint64_t) (vm_offset_t) p2p) << 32) |
1906 ((vm_offset_t) mbuf_chain);
1885 (uint64_t)(vm_offset_t)p2p;
1907 cur_p2d = &p2p->frag[0];
1908 is_p2p = 1;
1909 } else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) {
1910 uma_zfree(nl_tx_desc_zone, p2p);
1911 return 1;
1912 }
1913 paddr = vtophys(buf);
1914 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
1915 if (len < frag_sz)
1916 frag_sz = len;
1917 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
1918 | paddr;
1919 msg_sz++;
1920 if (is_p2p)
1921 p2p_sz++;
1922 len -= frag_sz;
1923 buf += frag_sz;
1924 }
1925 }
1926
1927 if (msg_sz == 0) {
1928 printf("Zero-length mbuf chain ??\n");
1929 *n_entries = msg_sz ;
1930 return 0;
1931 }
1932
1933 cur_p2d[-1] |= (1ULL << 63); /* set eop in most-recent p2d */
1934 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) |
1886 cur_p2d = &p2p->frag[0];
1887 is_p2p = 1;
1888 } else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) {
1889 uma_zfree(nl_tx_desc_zone, p2p);
1890 return 1;
1891 }
1892 paddr = vtophys(buf);
1893 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
1894 if (len < frag_sz)
1895 frag_sz = len;
1896 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
1897 | paddr;
1898 msg_sz++;
1899 if (is_p2p)
1900 p2p_sz++;
1901 len -= frag_sz;
1902 buf += frag_sz;
1903 }
1904 }
1905
1906 if (msg_sz == 0) {
1907 printf("Zero-length mbuf chain ??\n");
1908 *n_entries = msg_sz ;
1909 return 0;
1910 }
1911
1912 cur_p2d[-1] |= (1ULL << 63); /* set eop in most-recent p2d */
1913 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) |
1935 (vm_offset_t) mbuf_chain;
1914 (vm_offset_t) mbuf_chain; /* XXX: fix 64 bit */
1936 *tx_desc = p2p;
1937
1938 if (is_p2p) {
1939 paddr = vtophys(p2p);
1940 p2p_sz++;
1941 fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) |
1942 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
1943 *n_entries = FMN_SZ;
1944 } else {
1945 *n_entries = msg_sz + 1;
1946 }
1947
1948 return (0);
1949}
1950
1951static int
1952send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
1953 uint32_t n_entries)
1954{
1955 uint32_t msgrng_flags;
1956 int ret;
1957#ifdef INVARIANTS
1958 int i = 0;
1959#endif
1960
1961 do {
1962 msgrng_flags = msgrng_access_enable();
1963 ret = message_send(n_entries, MSGRNG_CODE_MAC,
1964 sc->tx_bucket_id, msg);
1965 msgrng_restore(msgrng_flags);
1966 KASSERT(i++ < 100000, ("Too many credit fails\n"));
1967 } while (ret != 0);
1968 return (0);
1969}
1970
1971static void
1972release_tx_desc(vm_paddr_t paddr)
1973{
1974 struct nlge_tx_desc *tx_desc;
1975 uint32_t sr;
1915 *tx_desc = p2p;
1916
1917 if (is_p2p) {
1918 paddr = vtophys(p2p);
1919 p2p_sz++;
1920 fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) |
1921 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
1922 *n_entries = FMN_SZ;
1923 } else {
1924 *n_entries = msg_sz + 1;
1925 }
1926
1927 return (0);
1928}
1929
1930static int
1931send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
1932 uint32_t n_entries)
1933{
1934 uint32_t msgrng_flags;
1935 int ret;
1936#ifdef INVARIANTS
1937 int i = 0;
1938#endif
1939
1940 do {
1941 msgrng_flags = msgrng_access_enable();
1942 ret = message_send(n_entries, MSGRNG_CODE_MAC,
1943 sc->tx_bucket_id, msg);
1944 msgrng_restore(msgrng_flags);
1945 KASSERT(i++ < 100000, ("Too many credit fails\n"));
1946 } while (ret != 0);
1947 return (0);
1948}
1949
1950static void
1951release_tx_desc(vm_paddr_t paddr)
1952{
1953 struct nlge_tx_desc *tx_desc;
1954 uint32_t sr;
1976 uint32_t val1, val2;
1955 uint64_t vaddr;
1977
1978 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
1979 sr = xlr_enable_kx();
1956
1957 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
1958 sr = xlr_enable_kx();
1980 val1 = xlr_paddr_lw(paddr);
1981 paddr += sizeof(void *);
1982 val2 = xlr_paddr_lw(paddr);
1983 mips_wr_status(sr);
1959 vaddr = xlr_paddr_ld(paddr);
1960 xlr_restore_kx(sr);
1984
1961
1985 tx_desc = (struct nlge_tx_desc*)(intptr_t) val1;
1962 tx_desc = (struct nlge_tx_desc*)(intptr_t)vaddr;
1986 uma_zfree(nl_tx_desc_zone, tx_desc);
1987}
1988
1989static void *
1990get_buf(void)
1991{
1963 uma_zfree(nl_tx_desc_zone, tx_desc);
1964}
1965
1966static void *
1967get_buf(void)
1968{
1992 struct mbuf *m_new;
1993 vm_paddr_t temp1, temp2;
1994 unsigned int *md;
1969 struct mbuf *m_new;
1970 uint64_t *md;
1971#ifdef INVARIANTS
1972 vm_paddr_t temp1, temp2;
1973#endif
1995
1996 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
1974
1975 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
1997 return NULL;
1976 return (NULL);
1998 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1999 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
1977 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1978 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
2000 md = (unsigned int *)m_new->m_data;
2001 md[0] = (unsigned int)m_new; /* Back Ptr */
1979 md = (uint64_t *)m_new->m_data;
1980 md[0] = (intptr_t)m_new; /* Back Ptr */
2002 md[1] = 0xf00bad;
2003 m_adj(m_new, XLR_CACHELINE_SIZE);
2004
1981 md[1] = 0xf00bad;
1982 m_adj(m_new, XLR_CACHELINE_SIZE);
1983
1984#ifdef INVARIANTS
2005 temp1 = vtophys((vm_offset_t) m_new->m_data);
2006 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
2007 if ((temp1 + 1536) != temp2)
2008 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
1985 temp1 = vtophys((vm_offset_t) m_new->m_data);
1986 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
1987 if ((temp1 + 1536) != temp2)
1988 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
1989#endif
2009
2010 return ((void *)m_new->m_data);
2011}
2012
2013static int
2014nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
2015{
2016 struct mii_data *md;
2017 xlr_reg_t *mmio;
2018 int bmsr, n_tries, max_tries;
2019 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2020 int sgmii_speed[] = { SGMII_SPEED_10,
2021 SGMII_SPEED_100,
2022 SGMII_SPEED_1000,
2023 SGMII_SPEED_100 }; /* default to 100Mbps */
2024 char *speed_str[] = { "10",
2025 "100",
2026 "1000",
2027 "unknown, defaulting to 100" };
2028 int link_state = LINK_STATE_DOWN;
2029
2030 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2031 return 0;
2032
2033 md = NULL;
2034 mmio = sc->base;
2035 if (sc->mii_base != NULL) {
2036 max_tries = (quick == 1) ? 100 : 4000;
2037 bmsr = 0;
2038 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2039 bmsr = nlge_mii_read_internal(sc->mii_base,
2040 sc->phy_addr, MII_BMSR);
2041 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2042 break; /* Auto-negotiation is complete
2043 and link is up */
2044 DELAY(1000);
2045 }
2046 bmsr &= BMSR_LINK;
2047 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2048 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2049 sc->speed = (sc->speed >> 3) & 0x03;
2050 if (sc->link == xlr_mac_link_up) {
2051 link_state = LINK_STATE_UP;
2052 nlge_sgmii_init(sc);
2053 }
2054 if (sc->mii_bus)
2055 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2056 }
2057
2058 if (sc->port_type != XLR_RGMII)
2059 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2060 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2061 sc->speed == xlr_mac_speed_rsvd) {
2062 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2063 } else if (sc->speed == xlr_mac_speed_1000) {
2064 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2065 if (md != NULL) {
2066 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2067 IFM_1000_T, IFM_FDX, md->mii_instance));
2068 }
2069 }
2070 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2071 if_link_state_change(sc->nlge_if, link_state);
2072 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2073 speed_str[sc->speed]);
2074
2075 return (0);
2076}
2077
2078/*
2079 * This function is called for each port that was added to the device tree
2080 * and it initializes the following port attributes:
2081 * - type
2082 * - base (base address to access port-specific registers)
2083 * - mii_base
2084 * - phy_addr
2085 */
2086static void
2087nlge_set_port_attribs(struct nlge_softc *sc,
2088 struct xlr_gmac_port *port_info)
2089{
2090 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2091 sc->port_type = port_info->type;
2092 sc->base = (xlr_reg_t *) (port_info->base_addr +
2093 (uint32_t)DEFAULT_XLR_IO_BASE);
2094 sc->mii_base = (xlr_reg_t *) (port_info->mii_addr +
2095 (uint32_t)DEFAULT_XLR_IO_BASE);
2096 if (port_info->pcs_addr != 0)
2097 sc->pcs_addr = (xlr_reg_t *) (port_info->pcs_addr +
2098 (uint32_t)DEFAULT_XLR_IO_BASE);
2099 if (port_info->serdes_addr != 0)
2100 sc->serdes_addr = (xlr_reg_t *) (port_info->serdes_addr +
2101 (uint32_t)DEFAULT_XLR_IO_BASE);
2102 sc->phy_addr = port_info->phy_addr;
2103
2104 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2105 sc->mii_base, sc->phy_addr);
2106}
2107
2108/* ------------------------------------------------------------------------ */
2109
2110/* Debug dump functions */
2111
2112#ifdef DEBUG
2113
2114static void
2115dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2116{
2117 int val;
2118
2119 val = NLGE_READ(base, offset);
2120 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2121}
2122
2123#define STRINGIFY(x) #x
2124
2125static void
2126dump_na_registers(xlr_reg_t *base_addr, int port_id)
2127{
2128 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2129 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2130 PDEBUG("Tx bucket sizes\n");
2131 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2132 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2133 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2134 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2135 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2136 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2137 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2138 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2139 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2140 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2141 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2142 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2143 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2144 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2145 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2146 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2147 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2148 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2149}
2150
2151static void
2152dump_gmac_registers(struct nlge_softc *sc)
2153{
2154 xlr_reg_t *base_addr = sc->base;
2155 int port_id = sc->instance;
2156
2157 PDEBUG("Register dump for port=%d\n", port_id);
2158 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2159 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2160 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2161 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2162 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2163 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2164 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2165 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2166 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2167 STRINGIFY(R_MII_MGMT_CONFIG));
2168 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2169 STRINGIFY(R_MII_MGMT_COMMAND));
2170 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2171 STRINGIFY(R_MII_MGMT_ADDRESS));
2172 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2173 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2174 dump_reg(base_addr, R_MII_MGMT_STATUS,
2175 STRINGIFY(R_MII_MGMT_STATUS));
2176 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2177 STRINGIFY(R_MII_MGMT_INDICATORS));
2178 dump_reg(base_addr, R_INTERFACE_CONTROL,
2179 STRINGIFY(R_INTERFACE_CONTROL));
2180 dump_reg(base_addr, R_INTERFACE_STATUS,
2181 STRINGIFY(R_INTERFACE_STATUS));
2182 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2183 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2184 STRINGIFY(R_XGMAC_CONFIG_0));
2185 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2186 STRINGIFY(R_XGMAC_CONFIG_1));
2187 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2188 STRINGIFY(R_XGMAC_CONFIG_2));
2189 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2190 STRINGIFY(R_XGMAC_CONFIG_3));
2191 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2192 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2193 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2194 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2195 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2196 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2197 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2198 STRINGIFY(R_XGMAC_REV_LEVEL));
2199 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2200 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2201 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2202 STRINGIFY(R_XGMAC_MIIM_FILED));
2203 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2204 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2205 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2206 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2207 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2208 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2209 }
2210
2211 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2212 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2213 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2214 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2215 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2216 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2217 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2218 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2219 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2220 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2221 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2222 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2223 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2224 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2225 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2226 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2227 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2228 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2229 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2230 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2231 dump_na_registers(base_addr, port_id);
2232}
2233
2234static void
2235dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2236{
2237 struct stn_cc *cc;
2238 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2239 int j, k, r, c;
2240 int n_gmac_buckets;
2241
2242 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2243 for (j = 0; j < 8; j++) { // for each cpu
2244 cc = board->credit_configs[j];
2245 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2246 for (k = 0; k < n_gmac_buckets; k++) {
2247 r = gmac_bucket_ids[k] / 8;
2248 c = gmac_bucket_ids[k] % 8;
2249 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2250 gmac_bucket_ids[k], cc->counters[r][c]);
2251 }
2252 }
2253}
2254
2255static void
2256dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2257{
2258 struct stn_cc *cc;
2259 int j, k;
2260
2261 cc = board->gmac_block[gmac_id].credit_config;
2262 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2263 for (j = 0; j < 8; j++) { // for each cpu
2264 printf(" ---> cpu_%d\n", j);
2265 for (k = 0; k < 8; k++) { // for each bucket in cpu
2266 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2267 cc->counters[j][k]);
2268 }
2269 }
2270}
2271
2272static void
2273dump_board_info(struct xlr_board_info *board)
2274{
2275 struct xlr_gmac_block_t *gm;
2276 int i, k;
2277
2278 printf("cpu=%x ", xlr_revision());
2279 printf("board_version: major=%llx, minor=%llx\n",
2280 xlr_boot1_info.board_major_version,
2281 xlr_boot1_info.board_minor_version);
2282 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2283 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2284 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2285 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2286 printf("FMN: Core-station bucket sizes\n");
2287 for (i = 0; i < 128; i++) {
2288 if (i && ((i % 16) == 0))
2289 printf("\n");
2290 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2291 }
2292 printf("\n");
2293 for (i = 0; i < 3; i++) {
2294 gm = &board->gmac_block[i];
2295 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2296 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2297 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2298 gm->station_txbase, gm->station_rfr);
2299 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2300 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2301 gm->baseinst);
2302 }
2303 for (k = 0; k < 3; k++) { // for each NA
2304 dump_fmn_cpu_credits_for_gmac(board, k);
2305 dump_fmn_gmac_credits(board, k);
2306 }
2307}
2308
2309static void
2310dump_mac_stats(struct nlge_softc *sc)
2311{
2312 xlr_reg_t *addr;
2313 uint32_t pkts_tx, pkts_rx;
2314
2315 addr = sc->base;
2316 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2317 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2318
2319 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2320 pkts_rx);
2321 if (pkts_rx > 0) {
2322 uint32_t r;
2323
2324 /* dump all rx counters. we need this because pkts_rx includes
2325 bad packets. */
2326 for (r = R_RFCS; r <= R_ROVR; r++)
2327 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2328 NLGE_READ(sc->base, r));
2329 }
2330 if (pkts_tx > 0) {
2331 uint32_t r;
2332
2333 /* dump all tx counters. might be useful for debugging. */
2334 for (r = R_TMCA; r <= R_TFRG; r++) {
2335 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2336 continue;
2337 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2338 NLGE_READ(sc->base, r));
2339 }
2340 }
2341
2342}
2343
2344static void
2345dump_mii_regs(struct nlge_softc *sc)
2346{
2347 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2348 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2349 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2350 0x1c, 0x1d, 0x1e};
2351 int i, n_regs;
2352
2353 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2354 return;
2355
2356 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2357 for (i = 0; i < n_regs; i++) {
2358 printf("[mii_0x%x] = %x\n", mii_regs[i],
2359 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2360 mii_regs[i]));
2361 }
2362}
2363
2364static void
2365dump_ifmedia(struct ifmedia *ifm)
2366{
2367 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2368 ifm->ifm_media, ifm->ifm_cur);
2369 if (ifm->ifm_cur != NULL) {
2370 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2371 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2372 ifm->ifm_cur->ifm_data);
2373 }
2374}
2375
2376static void
2377dump_mii_data(struct mii_data *mii)
2378{
2379 dump_ifmedia(&mii->mii_media);
2380 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2381 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2382 mii->mii_media_status, mii->mii_media_active);
2383}
2384
2385static void
2386dump_pcs_regs(struct nlge_softc *sc, int phy)
2387{
2388 int i, val;
2389
2390 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2391 for (i = 0; i < 18; i++) {
2392 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2393 continue;
2394 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2395 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2396 }
2397}
2398#endif
1990
1991 return ((void *)m_new->m_data);
1992}
1993
1994static int
1995nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
1996{
1997 struct mii_data *md;
1998 xlr_reg_t *mmio;
1999 int bmsr, n_tries, max_tries;
2000 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2001 int sgmii_speed[] = { SGMII_SPEED_10,
2002 SGMII_SPEED_100,
2003 SGMII_SPEED_1000,
2004 SGMII_SPEED_100 }; /* default to 100Mbps */
2005 char *speed_str[] = { "10",
2006 "100",
2007 "1000",
2008 "unknown, defaulting to 100" };
2009 int link_state = LINK_STATE_DOWN;
2010
2011 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2012 return 0;
2013
2014 md = NULL;
2015 mmio = sc->base;
2016 if (sc->mii_base != NULL) {
2017 max_tries = (quick == 1) ? 100 : 4000;
2018 bmsr = 0;
2019 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2020 bmsr = nlge_mii_read_internal(sc->mii_base,
2021 sc->phy_addr, MII_BMSR);
2022 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2023 break; /* Auto-negotiation is complete
2024 and link is up */
2025 DELAY(1000);
2026 }
2027 bmsr &= BMSR_LINK;
2028 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2029 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2030 sc->speed = (sc->speed >> 3) & 0x03;
2031 if (sc->link == xlr_mac_link_up) {
2032 link_state = LINK_STATE_UP;
2033 nlge_sgmii_init(sc);
2034 }
2035 if (sc->mii_bus)
2036 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2037 }
2038
2039 if (sc->port_type != XLR_RGMII)
2040 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2041 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2042 sc->speed == xlr_mac_speed_rsvd) {
2043 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2044 } else if (sc->speed == xlr_mac_speed_1000) {
2045 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2046 if (md != NULL) {
2047 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2048 IFM_1000_T, IFM_FDX, md->mii_instance));
2049 }
2050 }
2051 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2052 if_link_state_change(sc->nlge_if, link_state);
2053 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2054 speed_str[sc->speed]);
2055
2056 return (0);
2057}
2058
2059/*
2060 * This function is called for each port that was added to the device tree
2061 * and it initializes the following port attributes:
2062 * - type
2063 * - base (base address to access port-specific registers)
2064 * - mii_base
2065 * - phy_addr
2066 */
2067static void
2068nlge_set_port_attribs(struct nlge_softc *sc,
2069 struct xlr_gmac_port *port_info)
2070{
2071 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2072 sc->port_type = port_info->type;
2073 sc->base = (xlr_reg_t *) (port_info->base_addr +
2074 (uint32_t)DEFAULT_XLR_IO_BASE);
2075 sc->mii_base = (xlr_reg_t *) (port_info->mii_addr +
2076 (uint32_t)DEFAULT_XLR_IO_BASE);
2077 if (port_info->pcs_addr != 0)
2078 sc->pcs_addr = (xlr_reg_t *) (port_info->pcs_addr +
2079 (uint32_t)DEFAULT_XLR_IO_BASE);
2080 if (port_info->serdes_addr != 0)
2081 sc->serdes_addr = (xlr_reg_t *) (port_info->serdes_addr +
2082 (uint32_t)DEFAULT_XLR_IO_BASE);
2083 sc->phy_addr = port_info->phy_addr;
2084
2085 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2086 sc->mii_base, sc->phy_addr);
2087}
2088
2089/* ------------------------------------------------------------------------ */
2090
2091/* Debug dump functions */
2092
2093#ifdef DEBUG
2094
2095static void
2096dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2097{
2098 int val;
2099
2100 val = NLGE_READ(base, offset);
2101 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2102}
2103
2104#define STRINGIFY(x) #x
2105
2106static void
2107dump_na_registers(xlr_reg_t *base_addr, int port_id)
2108{
2109 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2110 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2111 PDEBUG("Tx bucket sizes\n");
2112 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2113 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2114 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2115 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2116 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2117 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2118 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2119 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2120 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2121 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2122 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2123 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2124 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2125 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2126 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2127 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2128 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2129 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2130}
2131
2132static void
2133dump_gmac_registers(struct nlge_softc *sc)
2134{
2135 xlr_reg_t *base_addr = sc->base;
2136 int port_id = sc->instance;
2137
2138 PDEBUG("Register dump for port=%d\n", port_id);
2139 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2140 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2141 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2142 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2143 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2144 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2145 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2146 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2147 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2148 STRINGIFY(R_MII_MGMT_CONFIG));
2149 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2150 STRINGIFY(R_MII_MGMT_COMMAND));
2151 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2152 STRINGIFY(R_MII_MGMT_ADDRESS));
2153 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2154 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2155 dump_reg(base_addr, R_MII_MGMT_STATUS,
2156 STRINGIFY(R_MII_MGMT_STATUS));
2157 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2158 STRINGIFY(R_MII_MGMT_INDICATORS));
2159 dump_reg(base_addr, R_INTERFACE_CONTROL,
2160 STRINGIFY(R_INTERFACE_CONTROL));
2161 dump_reg(base_addr, R_INTERFACE_STATUS,
2162 STRINGIFY(R_INTERFACE_STATUS));
2163 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2164 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2165 STRINGIFY(R_XGMAC_CONFIG_0));
2166 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2167 STRINGIFY(R_XGMAC_CONFIG_1));
2168 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2169 STRINGIFY(R_XGMAC_CONFIG_2));
2170 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2171 STRINGIFY(R_XGMAC_CONFIG_3));
2172 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2173 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2174 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2175 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2176 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2177 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2178 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2179 STRINGIFY(R_XGMAC_REV_LEVEL));
2180 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2181 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2182 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2183 STRINGIFY(R_XGMAC_MIIM_FILED));
2184 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2185 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2186 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2187 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2188 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2189 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2190 }
2191
2192 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2193 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2194 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2195 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2196 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2197 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2198 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2199 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2200 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2201 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2202 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2203 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2204 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2205 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2206 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2207 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2208 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2209 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2210 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2211 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2212 dump_na_registers(base_addr, port_id);
2213}
2214
2215static void
2216dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2217{
2218 struct stn_cc *cc;
2219 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2220 int j, k, r, c;
2221 int n_gmac_buckets;
2222
2223 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2224 for (j = 0; j < 8; j++) { // for each cpu
2225 cc = board->credit_configs[j];
2226 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2227 for (k = 0; k < n_gmac_buckets; k++) {
2228 r = gmac_bucket_ids[k] / 8;
2229 c = gmac_bucket_ids[k] % 8;
2230 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2231 gmac_bucket_ids[k], cc->counters[r][c]);
2232 }
2233 }
2234}
2235
2236static void
2237dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2238{
2239 struct stn_cc *cc;
2240 int j, k;
2241
2242 cc = board->gmac_block[gmac_id].credit_config;
2243 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2244 for (j = 0; j < 8; j++) { // for each cpu
2245 printf(" ---> cpu_%d\n", j);
2246 for (k = 0; k < 8; k++) { // for each bucket in cpu
2247 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2248 cc->counters[j][k]);
2249 }
2250 }
2251}
2252
2253static void
2254dump_board_info(struct xlr_board_info *board)
2255{
2256 struct xlr_gmac_block_t *gm;
2257 int i, k;
2258
2259 printf("cpu=%x ", xlr_revision());
2260 printf("board_version: major=%llx, minor=%llx\n",
2261 xlr_boot1_info.board_major_version,
2262 xlr_boot1_info.board_minor_version);
2263 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2264 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2265 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2266 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2267 printf("FMN: Core-station bucket sizes\n");
2268 for (i = 0; i < 128; i++) {
2269 if (i && ((i % 16) == 0))
2270 printf("\n");
2271 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2272 }
2273 printf("\n");
2274 for (i = 0; i < 3; i++) {
2275 gm = &board->gmac_block[i];
2276 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2277 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2278 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2279 gm->station_txbase, gm->station_rfr);
2280 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2281 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2282 gm->baseinst);
2283 }
2284 for (k = 0; k < 3; k++) { // for each NA
2285 dump_fmn_cpu_credits_for_gmac(board, k);
2286 dump_fmn_gmac_credits(board, k);
2287 }
2288}
2289
2290static void
2291dump_mac_stats(struct nlge_softc *sc)
2292{
2293 xlr_reg_t *addr;
2294 uint32_t pkts_tx, pkts_rx;
2295
2296 addr = sc->base;
2297 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2298 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2299
2300 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2301 pkts_rx);
2302 if (pkts_rx > 0) {
2303 uint32_t r;
2304
2305 /* dump all rx counters. we need this because pkts_rx includes
2306 bad packets. */
2307 for (r = R_RFCS; r <= R_ROVR; r++)
2308 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2309 NLGE_READ(sc->base, r));
2310 }
2311 if (pkts_tx > 0) {
2312 uint32_t r;
2313
2314 /* dump all tx counters. might be useful for debugging. */
2315 for (r = R_TMCA; r <= R_TFRG; r++) {
2316 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2317 continue;
2318 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2319 NLGE_READ(sc->base, r));
2320 }
2321 }
2322
2323}
2324
2325static void
2326dump_mii_regs(struct nlge_softc *sc)
2327{
2328 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2329 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2330 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2331 0x1c, 0x1d, 0x1e};
2332 int i, n_regs;
2333
2334 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2335 return;
2336
2337 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2338 for (i = 0; i < n_regs; i++) {
2339 printf("[mii_0x%x] = %x\n", mii_regs[i],
2340 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2341 mii_regs[i]));
2342 }
2343}
2344
2345static void
2346dump_ifmedia(struct ifmedia *ifm)
2347{
2348 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2349 ifm->ifm_media, ifm->ifm_cur);
2350 if (ifm->ifm_cur != NULL) {
2351 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2352 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2353 ifm->ifm_cur->ifm_data);
2354 }
2355}
2356
2357static void
2358dump_mii_data(struct mii_data *mii)
2359{
2360 dump_ifmedia(&mii->mii_media);
2361 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2362 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2363 mii->mii_media_status, mii->mii_media_active);
2364}
2365
2366static void
2367dump_pcs_regs(struct nlge_softc *sc, int phy)
2368{
2369 int i, val;
2370
2371 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2372 for (i = 0; i < 18; i++) {
2373 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2374 continue;
2375 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2376 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2377 }
2378}
2379#endif