Deleted Added
full compact
if_nlge.c (211994) if_nlge.c (211996)
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 211994 2010-08-30 13:05:21Z jchandra $");
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 211996 2010-08-30 13:26:07Z jchandra $");
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
105#include <mips/rmi/debug.h>
106#include <mips/rmi/pic.h>
107#include <mips/rmi/board.h>
108#include <mips/rmi/rmi_mips_exts.h>
109#include <mips/rmi/rmi_boot_info.h>
110#include <mips/rmi/dev/xlr/atx_cpld.h>
111#include <mips/rmi/dev/xlr/xgmac_mdio.h>
112
113#include <dev/mii/mii.h>
114#include <dev/mii/miivar.h>
115#include "miidevs.h"
116#include <dev/mii/brgphyreg.h>
117#include "miibus_if.h"
118
119#include <mips/rmi/dev/nlge/if_nlge.h>
120
121MODULE_DEPEND(nlna, nlge, 1, 1, 1);
122MODULE_DEPEND(nlge, ether, 1, 1, 1);
123MODULE_DEPEND(nlge, miibus, 1, 1, 1);
124
125/* Network accelarator entry points */
126static int nlna_probe(device_t);
127static int nlna_attach(device_t);
128static int nlna_detach(device_t);
129static int nlna_suspend(device_t);
130static int nlna_resume(device_t);
131static int nlna_shutdown(device_t);
132
133/* GMAC port entry points */
134static int nlge_probe(device_t);
135static int nlge_attach(device_t);
136static int nlge_detach(device_t);
137static int nlge_suspend(device_t);
138static int nlge_resume(device_t);
139static void nlge_init(void *);
140static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
141static void nlge_start(struct ifnet *);
142static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
143
144static int nlge_mii_write(struct device *, int, int, int);
145static int nlge_mii_read(struct device *, int, int);
146static void nlge_mac_mii_statchg(device_t);
147static int nlge_mediachange(struct ifnet *ifp);
148static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
149
150/* Other internal/helper functions */
151static void *get_buf(void);
152static struct mbuf *get_mbuf(void);
153
154static void nlna_add_to_port_set(struct nlge_port_set *pset,
155 struct nlge_softc *sc);
156static void nlna_config_pde(struct nlna_softc *);
157static void nlna_config_parser(struct nlna_softc *);
158static void nlna_config_classifier(struct nlna_softc *);
159static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
160static void nlna_config_common(struct nlna_softc *);
161static void nlna_disable_ports(struct nlna_softc *sc);
162static void nlna_enable_intr(struct nlna_softc *sc);
163static void nlna_disable_intr(struct nlna_softc *sc);
164static void nlna_enable_ports(struct nlna_softc *sc);
165static void nlna_get_all_softc(device_t iodi_dev,
166 struct nlna_softc **sc_vec, uint32_t vec_sz);
167static void nlna_hw_init(struct nlna_softc *sc);
168static int nlna_is_last_active_na(struct nlna_softc *sc);
169static void nlna_media_specific_config(struct nlna_softc *sc);
170static void nlna_reset_ports(struct nlna_softc *sc,
171 struct xlr_gmac_block_t *blk);
172static struct nlna_softc *nlna_sc_init(device_t dev,
173 struct xlr_gmac_block_t *blk);
174static __inline__ int nlna_send_free_desc(struct nlna_softc *nlna,
175 vm_paddr_t addr);
176static void nlna_setup_intr(struct nlna_softc *sc);
177static void nlna_smp_update_pde(void *dummy __unused);
178static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
179 uint32_t n_desc);
180
181static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
182static void nlge_hw_init(struct nlge_softc *sc);
183static int nlge_if_init(struct nlge_softc *sc);
184static void nlge_intr(void *arg);
185static int nlge_irq_init(struct nlge_softc *sc);
186static void nlge_irq_fini(struct nlge_softc *sc);
187static void nlge_media_specific_init(struct nlge_softc *sc);
188static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
189static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
190 int regidx);
191static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
192 int regidx, int regval);
193void nlge_msgring_handler(int bucket, int size, int code,
194 int stid, struct msgrng_msg *msg, void *data);
195static void nlge_port_disable(int id, xlr_reg_t *base, int port_type);
196static void nlge_port_enable(struct nlge_softc *sc);
197static void nlge_read_mac_addr(struct nlge_softc *sc);
198static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
199 struct xlr_gmac_port *port_info);
200static void nlge_set_mac_addr(struct nlge_softc *sc);
201static void nlge_set_port_attribs(struct nlge_softc *,
202 struct xlr_gmac_port *);
203static void nlge_sgmii_init(struct nlge_softc *sc);
204static void nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc);
205
206static int prepare_fmn_message(struct nlge_softc *sc,
207 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
208 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
209
210static void release_mbuf(uint64_t phy_addr);
211static void release_tx_desc(struct msgrng_msg *msg);
212static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
213 uint32_t n_entries);
214
215//#define DEBUG
216#ifdef DEBUG
217static int mac_debug = 1;
218static int reg_dump = 0;
219#undef PDEBUG
220#define PDEBUG(fmt, args...) \
221 do {\
222 if (mac_debug) {\
223 printf("[%s@%d|%s]: cpu_%d: " fmt, \
224 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
225 }\
226 } while(0);
227
228/* Debug/dump functions */
229static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
230static void dump_gmac_registers(struct nlge_softc *);
231static void dump_na_registers(xlr_reg_t *base, int port_id);
232static void dump_mac_stats(struct nlge_softc *sc);
233static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
234static void dump_mii_data(struct mii_data *mii) __attribute__((used));
235static void dump_board_info(struct xlr_board_info *);
236static void dump_pcs_regs(struct nlge_softc *sc, int phy);
237
238#else
239#undef PDEBUG
240#define PDEBUG(fmt, args...)
241#define dump_reg(a, o, n) /* nop */
242#define dump_gmac_registers(a) /* nop */
243#define dump_na_registers(a, p) /* nop */
244#define dump_board_info(b) /* nop */
245#define dump_mac_stats(sc) /* nop */
246#define dump_mii_regs(sc) /* nop */
247#define dump_mii_data(mii) /* nop */
248#define dump_pcs_regs(sc, phy) /* nop */
249#endif
250
251/* Wrappers etc. to export the driver entry points. */
252static device_method_t nlna_methods[] = {
253 /* Device interface */
254 DEVMETHOD(device_probe, nlna_probe),
255 DEVMETHOD(device_attach, nlna_attach),
256 DEVMETHOD(device_detach, nlna_detach),
257 DEVMETHOD(device_shutdown, nlna_shutdown),
258 DEVMETHOD(device_suspend, nlna_suspend),
259 DEVMETHOD(device_resume, nlna_resume),
260
261 /* bus interface : TBD : what are these for ? */
262 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
263 DEVMETHOD(bus_print_child, bus_generic_print_child),
264 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
265
266 { 0, 0 }
267};
268
269static driver_t nlna_driver = {
270 "nlna",
271 nlna_methods,
272 sizeof(struct nlna_softc)
273};
274
275static devclass_t nlna_devclass;
276
277static device_method_t nlge_methods[] = {
278 /* Device interface */
279 DEVMETHOD(device_probe, nlge_probe),
280 DEVMETHOD(device_attach, nlge_attach),
281 DEVMETHOD(device_detach, nlge_detach),
282 DEVMETHOD(device_shutdown, bus_generic_shutdown),
283 DEVMETHOD(device_suspend, nlge_suspend),
284 DEVMETHOD(device_resume, nlge_resume),
285
286 /* MII interface */
287 DEVMETHOD(miibus_readreg, nlge_mii_read),
288 DEVMETHOD(miibus_writereg, nlge_mii_write),
289 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
290
291 {0, 0}
292};
293
294static driver_t nlge_driver = {
295 "nlge",
296 nlge_methods,
297 sizeof(struct nlge_softc)
298};
299
300static devclass_t nlge_devclass;
301
302DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
303DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
304DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
305
306static uma_zone_t nl_tx_desc_zone;
307
308/* Function to atomically increment an integer with the given value. */
309static __inline__ unsigned int
310ldadd_wu(unsigned int value, unsigned long *addr)
311{
312 __asm__ __volatile__( ".set push\n"
313 ".set noreorder\n"
314 "move $8, %2\n"
315 "move $9, %3\n"
316 /* "ldaddwu $8, $9\n" */
317 ".word 0x71280011\n"
318 "move %0, $8\n"
319 ".set pop\n"
320 : "=&r"(value), "+m"(*addr)
321 : "0"(value), "r" ((unsigned long)addr)
322 : "$8", "$9");
323 return value;
324}
325
326static __inline__ uint32_t
327xlr_enable_kx(void)
328{
329 uint32_t sr = mips_rd_status();
330
331 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
332 return sr;
333}
334
335static int
336nlna_probe(device_t dev)
337{
338 return (BUS_PROBE_DEFAULT);
339}
340
341/*
342 * Add all attached GMAC/XGMAC ports to the device tree. Port
343 * configuration is spread in two regions - common configuration
344 * for all ports in the NA and per-port configuration in MAC-specific
345 * region. This function does the following:
346 * - adds the ports to the device tree
347 * - reset the ports
348 * - do all the common initialization
349 * - invoke bus_generic_attach for per-port configuration
350 * - supply initial free rx descriptors to ports
351 * - initialize s/w data structures
352 * - finally, enable interrupts (only in the last NA).
353 *
354 * For reference, sample address space for common and per-port
355 * registers is given below.
356 *
357 * The address map for RNA0 is: (typical value)
358 *
359 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
360 * | |
361 * | |
362 * | |
363 * | |
364 * | |
365 * | |
366 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
367 * | |
368 * | |
369 * (common) -> |......................................| 0xbef0_c400
370 * | |
371 * | (RGMII/SGMII: common registers) |
372 * | |
373 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
374 * | |
375 * | |
376 * (common) -> |......................................| 0xbef0_d400
377 * | |
378 * | (RGMII/SGMII: common registers) |
379 * | |
380 * |......................................|
381 * and so on ....
382 *
383 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
384 */
385static int
386nlna_attach(device_t dev)
387{
388 struct xlr_gmac_block_t *block_info;
389 device_t gmac_dev;
390 struct nlna_softc *sc;
391 int error;
392 int i;
393 int id;
394
395 id = device_get_unit(dev);
396 block_info = device_get_ivars(dev);
397 if (!block_info->enabled) {
398 return 0;
399 }
400
401#ifdef DEBUG
402 dump_board_info(&xlr_board_info);
403#endif
404 block_info->baseaddr += DEFAULT_XLR_IO_BASE;
405
406 /* Initialize nlna state in softc structure */
407 sc = nlna_sc_init(dev, block_info);
408
409 /* Add device's for the ports controlled by this NA. */
410 if (block_info->type == XLR_GMAC) {
411 KASSERT(id < 2, ("No GMACs supported with this network"
412 "accelerator: %d", id));
413 for (i = 0; i < sc->num_ports; i++) {
414 gmac_dev = device_add_child(dev, "nlge", -1);
415 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
416 }
417 } else if (block_info->type == XLR_XGMAC) {
418 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
419 "network accelerator: %d", id));
420 gmac_dev = device_add_child(dev, "nlge", -1);
421 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
422 } else if (block_info->type == XLR_SPI4) {
423 /* SPI4 is not supported here */
424 device_printf(dev, "Unsupported: NA with SPI4 type");
425 return (ENOTSUP);
426 }
427
428 nlna_reset_ports(sc, block_info);
429
430 /* Initialize Network Accelarator registers. */
431 nlna_hw_init(sc);
432
433 error = bus_generic_attach(dev);
434 if (error) {
435 device_printf(dev, "failed to attach port(s)\n");
436 goto fail;
437 }
438
439 /* Send out the initial pool of free-descriptors for the rx path */
440 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
441
442 /* S/w data structure initializations shared by all NA's. */
443 if (nl_tx_desc_zone == NULL) {
444 /* Create a zone for allocating tx descriptors */
445 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
446 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
447 XLR_CACHELINE_SIZE, 0);
448 }
449
450 /* Enable NA interrupts */
451 nlna_setup_intr(sc);
452
453 return (0);
454
455fail:
456 return (error);
457}
458
459static int
460nlna_detach(device_t dev)
461{
462 struct nlna_softc *sc;
463
464 sc = device_get_softc(dev);
465 if (device_is_alive(dev)) {
466 nlna_disable_intr(sc);
467 /* This will make sure that per-port detach is complete
468 * and all traffic on the ports has been stopped. */
469 bus_generic_detach(dev);
470 uma_zdestroy(nl_tx_desc_zone);
471 }
472
473 return (0);
474}
475
476static int
477nlna_suspend(device_t dev)
478{
479
480 return (0);
481}
482
483static int
484nlna_resume(device_t dev)
485{
486
487 return (0);
488}
489
490static int
491nlna_shutdown(device_t dev)
492{
493 return (0);
494}
495
496
497/* GMAC port entry points */
498static int
499nlge_probe(device_t dev)
500{
501 struct nlge_softc *sc;
502 struct xlr_gmac_port *port_info;
503 int index;
504 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
505 "Unknown"};
506
507 port_info = device_get_ivars(dev);
508 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
509 5 : port_info->type;
510 device_set_desc_copy(dev, desc[index]);
511
512 sc = device_get_softc(dev);
513 nlge_sc_init(sc, dev, port_info);
514
515 nlge_port_disable(sc->id, sc->base, sc->port_type);
516
517 return (0);
518}
519
520static int
521nlge_attach(device_t dev)
522{
523 struct nlge_softc *sc;
524 struct nlna_softc *nsc;
525 int error;
526
527 sc = device_get_softc(dev);
528
529 nlge_if_init(sc);
530 nlge_mii_init(dev, sc);
531 error = nlge_irq_init(sc);
532 if (error)
533 return error;
534 nlge_hw_init(sc);
535
536 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
537 nsc->child_sc[sc->instance] = sc;
538
539 return (0);
540}
541
542static int
543nlge_detach(device_t dev)
544{
545 struct nlge_softc *sc;
546 struct ifnet *ifp;
547
548 sc = device_get_softc(dev);
549 ifp = sc->nlge_if;
550
551 if (device_is_attached(dev)) {
552 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
553 nlge_port_disable(sc->id, sc->base, sc->port_type);
554 nlge_irq_fini(sc);
555 ether_ifdetach(ifp);
556 bus_generic_detach(dev);
557 }
558 if (ifp)
559 if_free(ifp);
560
561 return (0);
562}
563
564static int
565nlge_suspend(device_t dev)
566{
567 return (0);
568}
569
570static int
571nlge_resume(device_t dev)
572{
573 return (0);
574}
575
576static void
577nlge_init(void *addr)
578{
579 struct nlge_softc *sc;
580 struct ifnet *ifp;
581
582 sc = (struct nlge_softc *)addr;
583 ifp = sc->nlge_if;
584
585 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
586 return;
587
588 nlge_gmac_config_speed(sc, 0);
589 ifp->if_drv_flags |= IFF_DRV_RUNNING;
590 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
591 nlge_port_enable(sc);
592
593 if (sc->port_type == XLR_SGMII) {
594 dump_pcs_regs(sc, 27);
595 }
596 dump_gmac_registers(sc);
597 dump_mac_stats(sc);
598}
599
600static int
601nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
602{
603 struct mii_data *mii;
604 struct nlge_softc *sc;
605 struct ifreq *ifr;
606 int error;
607
608 sc = ifp->if_softc;
609 error = 0;
610 ifr = (struct ifreq *)data;
611 switch(command) {
612 case SIOCSIFFLAGS:
613 break;
614 case SIOCSIFMEDIA:
615 case SIOCGIFMEDIA:
616 if (sc->mii_bus != NULL) {
617 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
618 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
619 command);
620 }
621 break;
622 case SIOCSIFADDR:
623 // intentional fall thru
624 case SIOCSIFMTU:
625 default:
626 error = ether_ioctl(ifp, command, data);
627 break;
628 }
629
630 return (error);
631}
632
633/* This function is called from an interrupt handler */
634void
635nlge_msgring_handler(int bucket, int size, int code, int stid,
636 struct msgrng_msg *msg, void *data)
637{
638 struct nlna_softc *na_sc;
639 struct nlge_softc *sc;
640 struct ifnet *ifp;
641 uint64_t phys_addr;
642 unsigned long addr;
643 uint32_t length;
644 int ctrl;
645 int cpu;
646 int tx_error;
647 int port;
648 int vcpu;
649 int is_p2p;
650
651 cpu = xlr_core_id();
652 vcpu = (cpu << 2) + xlr_thr_id();
653
654 addr = 0;
655 is_p2p = 0;
656 tx_error = 0;
657 length = (msg->msg0 >> 40) & 0x3fff;
658 na_sc = (struct nlna_softc *)data;
659 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
660 if (length == 0) {
661 ctrl = CTRL_REG_FREE;
662 port = (msg->msg0 >> 54) & 0x0f;
663 is_p2p = (msg->msg0 >> 62) & 0x1;
664 tx_error = (msg->msg0 >> 58) & 0xf;
665 } else {
666 ctrl = CTRL_SNGL;
667 length = length - BYTE_OFFSET - MAC_CRC_LEN;
668 port = msg->msg0 & 0x0f;
669 }
670
671 sc = na_sc->child_sc[port];
672 if (sc == NULL) {
673 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
674 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
675 "Freeback for tx packet"));
676 return;
677 }
678
679 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
680 if (is_p2p)
681 release_tx_desc(msg);
682 else {
683 release_mbuf(msg->msg0 & 0xffffffffffULL);
684 }
685 ifp = sc->nlge_if;
686 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
687 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
688 }
689 ldadd_wu(1, (tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
690 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
691 /* Rx Packet */
692
693 nlge_rx(sc, phys_addr, length);
694 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
695 } else {
696 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
697 }
698
699}
700
701static void
702nlge_start(struct ifnet *ifp)
703{
704 struct nlge_softc *sc;
705
706 sc = ifp->if_softc;
707 //NLGE_LOCK(sc);
708 nlge_start_locked(ifp, sc);
709 //NLGE_UNLOCK(sc);
710}
711
712static void
713nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc)
714{
715 struct msgrng_msg msg;
716 struct mbuf *m;
717 struct nlge_tx_desc *tx_desc;
718 uint64_t fr_stid;
719 uint32_t cpu;
720 uint32_t n_entries;
721 uint32_t tid;
722 int ret;
723 int sent;
724
725 cpu = xlr_core_id();
726 tid = xlr_thr_id();
727 fr_stid = cpu * 8 + tid + 4;
728
729 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
730 return;
731 }
732
733 do {
734 /* Grab a packet off the queue. */
735 IF_DEQUEUE(&ifp->if_snd, m);
736 if (m == NULL) {
737 return;
738 }
739
740 tx_desc = NULL;
741 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
742 if (ret) {
743 goto fail;
744 }
745 sent = send_fmn_msg_tx(sc, &msg, n_entries);
746 if (!sent) {
747 goto fail;
748 }
749 } while(1);
750
751 return;
752
753fail:
754 if (tx_desc != NULL) {
755 uma_zfree(nl_tx_desc_zone, tx_desc);
756 }
757 if (m != NULL) {
758 /*
759 * TBD: It is observed that only when both of the statements
760 * below are not enabled, traffic continues till the end.
761 * Otherwise, the port locks up in the middle and never
762 * recovers from it. The current theory for this behavior
763 * is that the queue is full and the upper layer is neither
764 * able to add to it not invoke nlge_start to drian the
765 * queue. The driver may have to do something in addition
766 * to reset'ing the OACTIVE bit when a trasnmit free-back
767 * is received.
768 */
769 //ifp->if_drv_flags |= IFF_DRV_OACTIVE;
770 //IF_PREPEND(&ifp->if_snd, m);
771 m_freem(m);
772 ldadd_wu(1, &ifp->if_iqdrops);
773 }
774 return;
775}
776
777static void
778nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
779{
780 struct ifnet *ifp;
781 struct mbuf *m;
782 uint32_t tm, mag, sr;
783
784 sr = xlr_enable_kx();
785 tm = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE);
786 mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
787 mips_wr_status(sr);
788
789 m = (struct mbuf *)(intptr_t)tm;
790 if (mag != 0xf00bad) {
791 /* somebody else's packet. Error - FIXME in intialization */
792 printf("cpu %d: *ERROR* Not my packet paddr %llx\n",
793 xlr_core_id(), (uint64_t) paddr);
794 return;
795 }
796
797 ifp = sc->nlge_if;
798 /* align the data */
799 m->m_data += BYTE_OFFSET;
800 m->m_pkthdr.len = m->m_len = len;
801 m->m_pkthdr.rcvif = ifp;
802
803 ldadd_wu(1, &ifp->if_ipackets);
804 (*ifp->if_input)(ifp, m);
805}
806
807static int
808nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
809{
810 struct nlge_softc *sc;
811
812 sc = device_get_softc(dev);
813 if (sc->phy_addr == phyaddr && sc->port_type != XLR_XGMII)
814 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
815
816 return (0);
817}
818
819static int
820nlge_mii_read(struct device *dev, int phyaddr, int regidx)
821{
822 struct nlge_softc *sc;
823 int val;
824
825 sc = device_get_softc(dev);
826 val = (sc->phy_addr != phyaddr && sc->port_type != XLR_XGMII) ? (0xffff) :
827 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
828
829 return (val);
830}
831
832static void
833nlge_mac_mii_statchg(device_t dev)
834{
835}
836
837static int
838nlge_mediachange(struct ifnet *ifp)
839{
840 return 0;
841}
842
843static void
844nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
845{
846 struct nlge_softc *sc;
847 struct mii_data *md;
848
849 md = NULL;
850 sc = ifp->if_softc;
851 if (sc->mii_bus)
852 md = device_get_softc(sc->mii_bus);
853
854 ifmr->ifm_status = IFM_AVALID;
855 ifmr->ifm_active = IFM_ETHER;
856
857 if (sc->link == xlr_mac_link_down)
858 return;
859
860 if (md != NULL)
861 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
862 ifmr->ifm_status |= IFM_ACTIVE;
863}
864
865static struct nlna_softc *
866nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
867{
868 struct nlna_softc *sc;
869
870 sc = device_get_softc(dev);
871 memset(sc, 0, sizeof(*sc));
872 sc->nlna_dev = dev;
873 sc->base = (xlr_reg_t *) blk->baseaddr;
874 sc->rfrbucket = blk->station_rfr;
875 sc->station_id = blk->station_id;
876 sc->na_type = blk->type;
877 sc->mac_type = blk->mode;
878 sc->num_ports = blk->num_ports;
879
880 sc->mdio_set.port_vec = sc->mdio_sc;
881 sc->mdio_set.vec_sz = XLR_MAX_MACS;
882
883 return (sc);
884}
885
886/*
887 * Do:
888 * - Initialize common GMAC registers (index range 0x100-0x3ff).
889 */
890static void
891nlna_hw_init(struct nlna_softc *sc)
892{
893
894 /*
895 * It is seen that this is a critical function in bringing up FreeBSD.
896 * When it is not invoked, FreeBSD panics and fails during the
897 * multi-processor init (SI_SUB_SMP of * mi_startup). The key function
898 * in this sequence seems to be platform_prep_smp_launch. */
899 if (register_msgring_handler(sc->station_id, nlge_msgring_handler, sc)) {
900 panic("Couldn't register msgring handler\n");
901 }
902 nlna_config_fifo_spill_area(sc);
903 nlna_config_pde(sc);
904 nlna_config_common(sc);
905 nlna_config_parser(sc);
906 nlna_config_classifier(sc);
907}
908
909/*
910 * Enable interrupts on all the ports controlled by this NA. For now, we
911 * only care about the MII interrupt and this has to be enabled only
912 * on the port id0.
913 *
914 * This function is not in-sync with the regular way of doing things - it
915 * executes only in the context of the last active network accelerator (and
916 * thereby has some ugly accesses in the device tree). Though inelegant, it
917 * is necessary to do it this way as the per-port interrupts can be
918 * setup/enabled only after all the network accelerators have been
919 * initialized.
920 */
921static void
922nlna_setup_intr(struct nlna_softc *sc)
923{
924 struct nlna_softc *na_sc[XLR_MAX_NLNA];
925 struct nlge_port_set *pset;
926 struct xlr_gmac_port *port_info;
927 device_t iodi_dev;
928 int i, j;
929
930 if (!nlna_is_last_active_na(sc))
931 return ;
932
933 /* Collect all nlna softc pointers */
934 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
935 iodi_dev = device_get_parent(sc->nlna_dev);
936 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
937
938 /* Setup the MDIO interrupt lists. */
939 /*
940 * MDIO interrupts are coarse - a single interrupt line provides
941 * information about one of many possible ports. To figure out the
942 * exact port on which action is to be taken, all of the ports
943 * linked to an MDIO interrupt should be read. To enable this,
944 * ports need to add themselves to port sets.
945 */
946 for (i = 0; i < XLR_MAX_NLNA; i++) {
947 if (na_sc[i] == NULL)
948 continue;
949 for (j = 0; j < na_sc[i]->num_ports; j++) {
950 /* processing j-th port on i-th NA */
951 port_info = device_get_ivars(
952 na_sc[i]->child_sc[j]->nlge_dev);
953 pset = &na_sc[port_info->mdint_id]->mdio_set;
954 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
955 }
956 }
957
958 /* Enable interrupts */
959 for (i = 0; i < XLR_MAX_NLNA; i++) {
960 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
961 nlna_enable_intr(na_sc[i]);
962 }
963 }
964}
965
966static void
967nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
968{
969 int i;
970
971 /* step past the non-NULL elements */
972 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
973 if (i < pset->vec_sz)
974 pset->port_vec[i] = sc;
975 else
976 printf("warning: internal error: out-of-bounds for MDIO array");
977}
978
979static void
980nlna_enable_intr(struct nlna_softc *sc)
981{
982 int i;
983
984 for (i = 0; i < sc->num_ports; i++) {
985 if (sc->child_sc[i]->instance == 0)
986 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
987 (1 << O_INTMASK__MDInt));
988 }
989}
990
991static void
992nlna_disable_intr(struct nlna_softc *sc)
993{
994 int i;
995
996 for (i = 0; i < sc->num_ports; i++) {
997 if (sc->child_sc[i]->instance == 0)
998 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
999 }
1000}
1001
1002static int
1003nlna_is_last_active_na(struct nlna_softc *sc)
1004{
1005 int id;
1006
1007 id = device_get_unit(sc->nlna_dev);
1008 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
1009}
1010
1011static __inline__ int
1012nlna_send_free_desc(struct nlna_softc *sc, vm_paddr_t addr)
1013{
1014 struct msgrng_msg msg;
1015 int stid;
1016 int code;
1017 int i;
1018
1019 stid = sc->rfrbucket;
1020 memset(&msg, 0, sizeof(msg));
1021 msg.msg0 = (uint64_t) addr & 0xffffffffe0ULL;
1022
1023 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1024 for (i = 0; i < MAX_MSG_SND_ATTEMPTS; i++) {
1025 if (message_send(1, code, stid, &msg) == 0)
1026 return (0);
1027 }
1028 printf("Error: failed to send free desc to station %d\n", stid);
1029 return (1);
1030}
1031
1032static void
1033nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
1034{
1035 void *ptr;
1036 unsigned long msgrng_flags;
1037 int i;
1038 int ret;
1039
1040 if (n_desc > 1) {
1041 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1042 sc->rfrbucket);
1043 }
1044
1045 for (i = 0; i < n_desc; i++) {
1046 ptr = get_buf();
1047 if (!ptr) {
1048 ret = -ENOMEM;
1049 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1050 break;
1051 }
1052
1053 /* Send the free Rx desc to the MAC */
1054 msgrng_access_enable(msgrng_flags);
1055 ret = nlna_send_free_desc(sc, vtophys(ptr));
1056 msgrng_access_disable(msgrng_flags);
1057 if (ret) /* no point trying other descriptors after
1058 a failure. */
1059 break;
1060 }
1061}
1062
1063static __inline__ void *
1064nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1065 int reg_size, int size)
1066{
1067 void *spill;
1068 uint64_t phys_addr;
1069 uint32_t spill_size;
1070
1071 spill_size = size;
1072 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1073 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1074 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1075 panic("Unable to allocate memory for spill area!\n");
1076 }
1077 phys_addr = vtophys(spill);
1078 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1079 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1080 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1081 NLGE_WRITE(base, reg_size, spill_size);
1082
1083 return (spill);
1084}
1085
1086/*
1087 * Configure the 6 FIFO's that are used by the network accelarator to
1088 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1089 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1090 * the NA with free descriptors.
1091 */
1092static void
1093nlna_config_fifo_spill_area(struct nlna_softc *sc)
1094{
1095 sc->frin_spill = nlna_config_spill(sc->base,
1096 R_REG_FRIN_SPILL_MEM_START_0,
1097 R_REG_FRIN_SPILL_MEM_START_1,
1098 R_REG_FRIN_SPILL_MEM_SIZE,
1099 MAX_FRIN_SPILL *
1100 sizeof(struct fr_desc));
1101 sc->frout_spill = nlna_config_spill(sc->base,
1102 R_FROUT_SPILL_MEM_START_0,
1103 R_FROUT_SPILL_MEM_START_1,
1104 R_FROUT_SPILL_MEM_SIZE,
1105 MAX_FROUT_SPILL *
1106 sizeof(struct fr_desc));
1107 sc->class_0_spill = nlna_config_spill(sc->base,
1108 R_CLASS0_SPILL_MEM_START_0,
1109 R_CLASS0_SPILL_MEM_START_1,
1110 R_CLASS0_SPILL_MEM_SIZE,
1111 MAX_CLASS_0_SPILL *
1112 sizeof(union rx_tx_desc));
1113 sc->class_1_spill = nlna_config_spill(sc->base,
1114 R_CLASS1_SPILL_MEM_START_0,
1115 R_CLASS1_SPILL_MEM_START_1,
1116 R_CLASS1_SPILL_MEM_SIZE,
1117 MAX_CLASS_1_SPILL *
1118 sizeof(union rx_tx_desc));
1119 sc->class_2_spill = nlna_config_spill(sc->base,
1120 R_CLASS2_SPILL_MEM_START_0,
1121 R_CLASS2_SPILL_MEM_START_1,
1122 R_CLASS2_SPILL_MEM_SIZE,
1123 MAX_CLASS_2_SPILL *
1124 sizeof(union rx_tx_desc));
1125 sc->class_3_spill = nlna_config_spill(sc->base,
1126 R_CLASS3_SPILL_MEM_START_0,
1127 R_CLASS3_SPILL_MEM_START_1,
1128 R_CLASS3_SPILL_MEM_SIZE,
1129 MAX_CLASS_3_SPILL *
1130 sizeof(union rx_tx_desc));
1131}
1132
1133/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1134static void
1135nlna_config_pde(struct nlna_softc *sc)
1136{
1137 uint64_t bucket_map;
1138 uint32_t cpumask;
1139 int i, cpu, bucket;
1140
1141 cpumask = 0x1;
1142#ifdef SMP
1143 /*
1144 * rge may be called before SMP start in a BOOTP/NFSROOT
1145 * setup. we will distribute packets to other cpus only when
1146 * the SMP is started.
1147 */
1148 if (smp_started)
1149 cpumask = xlr_hw_thread_mask;
1150#endif
1151
1152 bucket_map = 0;
1153 for (i = 0; i < 32; i++) {
1154 if (cpumask & (1 << i)) {
1155 cpu = i;
1156 bucket = ((cpu >> 2) << 3);
1157 bucket_map |= (1ULL << bucket);
1158 }
1159 }
1160 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1161 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1162
1163 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1164 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1165
1166 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1167 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1168
1169 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1170 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1171}
1172
1173static void
1174nlna_smp_update_pde(void *dummy __unused)
1175{
1176 device_t iodi_dev;
1177 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1178 int i;
1179
1180 printf("Updating packet distribution for SMP\n");
1181
1182 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1183 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1184
1185 for (i = 0; i < XLR_MAX_NLNA; i++) {
1186 if (na_sc[i] == NULL)
1187 continue;
1188 nlna_disable_ports(na_sc[i]);
1189 nlna_config_pde(na_sc[i]);
1190 nlna_enable_ports(na_sc[i]);
1191 }
1192}
1193
1194SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1195 NULL);
1196
1197static void
1198nlna_config_parser(struct nlna_softc *sc)
1199{
1200 /*
1201 * Mark it as no classification. The parser extract is gauranteed to
1202 * be zero with no classfication
1203 */
1204 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x00);
1205 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1206
1207 /* configure the parser : L2 Type is configured in the bootloader */
1208 /* extract IP: src, dest protocol */
1209 NLGE_WRITE(sc->base, R_L3CTABLE,
1210 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1211 (0x0800 << 0));
1212 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1213 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1214}
1215
1216static void
1217nlna_config_classifier(struct nlna_softc *sc)
1218{
1219 int i;
1220
1221 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1222 /* xgmac translation table doesn't have sane values on reset */
1223 for (i = 0; i < 64; i++)
1224 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1225
1226 /*
1227 * use upper 7 bits of the parser extract to index the
1228 * translate table
1229 */
1230 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1231 }
1232}
1233
1234/*
1235 * Complete a bunch of h/w register initializations that are common for all the
1236 * ports controlled by a NA.
1237 */
1238static void
1239nlna_config_common(struct nlna_softc *sc)
1240{
1241 struct xlr_gmac_block_t *block_info;
1242 struct stn_cc *gmac_cc_config;
1243 int i, id;
1244
1245 block_info = device_get_ivars(sc->nlna_dev);
1246
1247 id = device_get_unit(sc->nlna_dev);
1248 gmac_cc_config = block_info->credit_config;
1249 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1250 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1251 gmac_cc_config->counters[i >> 3][i & 0x07]);
1252 }
1253
1254 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1255
1256 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1257 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1258 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1259 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1260 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1261
1262 nlna_media_specific_config(sc);
1263}
1264
1265static void
1266nlna_media_specific_config(struct nlna_softc *sc)
1267{
1268 struct bucket_size *bucket_sizes;
1269
1270 bucket_sizes = xlr_board_info.bucket_sizes;
1271 switch (sc->mac_type) {
1272 case XLR_RGMII:
1273 case XLR_SGMII:
1274 case XLR_XAUI:
1275 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1276 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1277 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1278 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1279 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1280 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1281 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1282 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1283
1284 if (sc->mac_type == XLR_XAUI) {
1285 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1286 }
1287 break;
1288
1289 case XLR_XGMII:
1290 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1291 bucket_sizes->bucket[sc->rfrbucket]);
1292
1293 default:
1294 break;
1295 }
1296}
1297
1298static void
1299nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1300{
1301 xlr_reg_t *addr;
1302 int i;
1303 uint32_t rx_ctrl;
1304
1305 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1306
1307 for (i = 0; i < sc->num_ports; i++) {
1308 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1309
1310 base += blk->gmac_port[i].base_addr;
1311 addr = (xlr_reg_t *) base;
1312
1313 /* 1. Reset RxEnable in MAC_CONFIG */
1314 switch (sc->mac_type) {
1315 case XLR_RGMII:
1316 case XLR_SGMII:
1317 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1318 (1 << O_MAC_CONFIG_1__rxen));
1319 break;
1320 case XLR_XAUI:
1321 case XLR_XGMII:
1322 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1323 (1 << O_RX_CONTROL__RxEnable));
1324 break;
1325 default:
1326 printf("Error: Unsupported port_type=%d\n",
1327 sc->mac_type);
1328 }
1329
1330 /* 1.1 Wait for RxControl.RxHalt to be set */
1331 do {
1332 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1333 } while (!(rx_ctrl & 0x2));
1334
1335 /* 2. Set the soft reset bit in RxControl */
1336 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1337 (1 << O_RX_CONTROL__SoftReset));
1338
1339 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1340 do {
1341 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1342 } while (!(rx_ctrl & 0x8));
1343
1344 /* 3. Clear the soft reset bit in RxControl */
1345 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1346 (1 << O_RX_CONTROL__SoftReset));
1347
1348 /* Turn off tx/rx on the port. */
1349 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1350 (1 << O_RX_CONTROL__RxEnable));
1351 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1352 (1 << O_TX_CONTROL__TxEnable));
1353 }
1354}
1355
1356static void
1357nlna_disable_ports(struct nlna_softc *sc)
1358{
1359 struct xlr_gmac_block_t *blk;
1360 xlr_reg_t *addr;
1361 int i;
1362
1363 blk = device_get_ivars(sc->nlna_dev);
1364 for (i = 0; i < sc->num_ports; i++) {
1365 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1366
1367 base += blk->gmac_port[i].base_addr;
1368 addr = (xlr_reg_t *) base;
1369 nlge_port_disable(i, addr, blk->gmac_port[i].type);
1370 }
1371}
1372
1373static void
1374nlna_enable_ports(struct nlna_softc *sc)
1375{
1376 device_t nlge_dev, *devlist;
1377 struct nlge_softc *port_sc;
1378 int i, numdevs;
1379
1380 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1381 for (i = 0; i < numdevs; i++) {
1382 nlge_dev = devlist[i];
1383 if (nlge_dev == NULL)
1384 continue;
1385 port_sc = device_get_softc(nlge_dev);
1386 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1387 nlge_port_enable(port_sc);
1388 }
1389 free(devlist, M_TEMP);
1390}
1391
1392static void
1393nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1394 uint32_t vec_sz)
1395{
1396 device_t na_dev;
1397 int i;
1398
1399 for (i = 0; i < vec_sz; i++) {
1400 sc_vec[i] = NULL;
1401 na_dev = device_find_child(iodi_dev, "nlna", i);
1402 if (na_dev != NULL)
1403 sc_vec[i] = device_get_softc(na_dev);
1404 }
1405}
1406
1407static void
1408nlge_port_disable(int id, xlr_reg_t *base, int port_type)
1409{
1410 uint32_t rd;
1411
1412 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1413 do {
1414 rd = NLGE_READ(base, R_RX_CONTROL);
1415 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1416
1417 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1418 do {
1419 rd = NLGE_READ(base, R_TX_CONTROL);
1420 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1421
1422 switch (port_type) {
1423 case XLR_RGMII:
1424 case XLR_SGMII:
1425 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1426 ((1 << O_MAC_CONFIG_1__rxen) |
1427 (1 << O_MAC_CONFIG_1__txen)));
1428 break;
1429 case XLR_XGMII:
1430 case XLR_XAUI:
1431 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1432 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1433 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1434 break;
1435 default:
1436 panic("Unknown MAC type on port %d\n", id);
1437 }
1438}
1439
1440static void
1441nlge_port_enable(struct nlge_softc *sc)
1442{
1443 struct xlr_gmac_port *self;
1444 xlr_reg_t *base;
1445
1446 base = sc->base;
1447 self = device_get_ivars(sc->nlge_dev);
1448 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1449 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1450 (1 << O_RX_CONTROL__RGMII));
1451
1452 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1453 (1 << O_RX_CONTROL__RxEnable));
1454 NLGE_UPDATE(base, R_TX_CONTROL,
1455 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1456 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1457 switch (sc->port_type) {
1458 case XLR_RGMII:
1459 case XLR_SGMII:
1460 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1461 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1462 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1463 break;
1464 case XLR_XGMII:
1465 case XLR_XAUI:
1466 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1467 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1468 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1469 break;
1470 default:
1471 panic("Unknown MAC type on port %d\n", sc->id);
1472 }
1473}
1474
1475static void
1476nlge_sgmii_init(struct nlge_softc *sc)
1477{
1478 xlr_reg_t *mmio_gpio;
1479 int i;
1480 int phy;
1481
1482 if (sc->port_type != XLR_SGMII)
1483 return;
1484
1485 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1486 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1487 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1488 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1489 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1490 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1491 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1492 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1493 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1494 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1495 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1496
1497 for(i=0;i<10000000;i++){} /* delay */
1498 /* program GPIO values for serdes init parameters */
1499 mmio_gpio = (xlr_reg_t *) (DEFAULT_XLR_IO_BASE + XLR_IO_GPIO_OFFSET);
1500 mmio_gpio[0x20] = 0x7e6802;
1501 mmio_gpio[0x10] = 0x7104;
1502 for(i=0;i<100000000;i++){}
1503
1504 /* enable autoneg - more magic */
1505 phy = sc->phy_addr % 4 + 27;
1506 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1507 DELAY(100000);
1508 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1509 DELAY(100000);
1510}
1511
1512static void
1513nlge_intr(void *arg)
1514{
1515 struct nlge_port_set *pset;
1516 struct nlge_softc *sc;
1517 struct nlge_softc *port_sc;
1518 xlr_reg_t *base;
1519 uint32_t intreg;
1520 uint32_t intr_status;
1521 int i;
1522
1523 sc = arg;
1524 if (sc == NULL) {
1525 printf("warning: No port registered for interrupt\n");
1526 return;
1527 }
1528 base = sc->base;
1529
1530 intreg = NLGE_READ(base, R_INTREG);
1531 if (intreg & (1 << O_INTREG__MDInt)) {
1532 pset = sc->mdio_pset;
1533 if (pset == NULL) {
1534 printf("warning: No ports for MDIO interrupt\n");
1535 return;
1536 }
1537 for (i = 0; i < pset->vec_sz; i++) {
1538 port_sc = pset->port_vec[i];
1539
1540 if (port_sc == NULL)
1541 continue;
1542
1543 /* Ack phy interrupt - clear on read*/
1544 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1545 port_sc->phy_addr, 26);
1546 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1547 intr_status);
1548
1549 if (!(intr_status & 0x8000)) {
1550 /* no interrupt for this port */
1551 continue;
1552 }
1553
1554 if (intr_status & 0x2410) {
1555 /* update link status for port */
1556 nlge_gmac_config_speed(port_sc, 0);
1557 } else {
1558 printf("%s: Unsupported phy interrupt"
1559 " (0x%08x)\n",
1560 device_get_nameunit(port_sc->nlge_dev),
1561 intr_status);
1562 }
1563 }
1564 }
1565
1566 /* Clear the NA interrupt */
1567 xlr_write_reg(base, R_INTREG, 0xffffffff);
1568
1569 return;
1570}
1571
1572static int
1573nlge_irq_init(struct nlge_softc *sc)
1574{
1575 struct resource irq_res;
1576 struct nlna_softc *na_sc;
1577 struct xlr_gmac_block_t *block_info;
1578 device_t na_dev;
1579 int ret;
1580 int irq_num;
1581
1582 na_dev = device_get_parent(sc->nlge_dev);
1583 block_info = device_get_ivars(na_dev);
1584
1585 irq_num = block_info->baseirq + sc->instance;
1586 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1587 ret = bus_setup_intr(sc->nlge_dev, &irq_res, (INTR_FAST |
1588 INTR_TYPE_NET | INTR_MPSAFE), NULL, nlge_intr, sc, NULL);
1589 if (ret) {
1590 nlge_detach(sc->nlge_dev);
1591 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1592 ret);
1593 return (ENXIO);
1594 }
1595 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1596 device_get_nameunit(sc->nlge_dev), irq_num);
1597
1598 if (sc->instance == 0) {
1599 na_sc = device_get_softc(na_dev);
1600 sc->mdio_pset = &na_sc->mdio_set;
1601 }
1602 return (0);
1603}
1604
1605static void
1606nlge_irq_fini(struct nlge_softc *sc)
1607{
1608}
1609
1610static void
1611nlge_hw_init(struct nlge_softc *sc)
1612{
1613 struct xlr_gmac_port *port_info;
1614 xlr_reg_t *base;
1615
1616 base = sc->base;
1617 port_info = device_get_ivars(sc->nlge_dev);
1618 sc->tx_bucket_id = port_info->tx_bucket_id;
1619
1620 /* each packet buffer is 1536 bytes */
1621 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1622 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1623 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1624 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1625 (1 << O_STATCTRL__ClrCnt)));
1626 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1627 NLGE_WRITE(base, R_INTMASK, 0);
1628 nlge_set_mac_addr(sc);
1629 nlge_media_specific_init(sc);
1630}
1631
1632static void
1633nlge_sc_init(struct nlge_softc *sc, device_t dev,
1634 struct xlr_gmac_port *port_info)
1635{
1636 memset(sc, 0, sizeof(*sc));
1637 sc->nlge_dev = dev;
1638 sc->id = device_get_unit(dev);
1639 nlge_set_port_attribs(sc, port_info);
1640}
1641
1642static void
1643nlge_media_specific_init(struct nlge_softc *sc)
1644{
1645 struct mii_data *media;
1646 struct bucket_size *bucket_sizes;
1647
1648 bucket_sizes = xlr_board_info.bucket_sizes;
1649 switch (sc->port_type) {
1650 case XLR_RGMII:
1651 case XLR_SGMII:
1652 case XLR_XAUI:
1653 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1654 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1655 (W_DESC_PACK_CTRL__ByteOffset <<
1656 O_DESC_PACK_CTRL__ByteOffset));
1657 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1658 bucket_sizes->bucket[sc->tx_bucket_id]);
1659 if (sc->port_type != XLR_XAUI) {
1660 nlge_gmac_config_speed(sc, 1);
1661 if (sc->mii_bus) {
1662 media = (struct mii_data *)device_get_softc(
1663 sc->mii_bus);
1664 }
1665 }
1666 break;
1667
1668 case XLR_XGMII:
1669 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1670 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1671 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1672 bucket_sizes->bucket[sc->tx_bucket_id]);
1673 break;
1674 default:
1675 break;
1676 }
1677}
1678
1679/*
1680 * Read the MAC address from the XLR boot registers. All port addresses
1681 * are identical except for the lowest octet.
1682 */
1683static void
1684nlge_read_mac_addr(struct nlge_softc *sc)
1685{
1686 int i, j;
1687
1688 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1689 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1690
1691 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1692}
1693
1694/*
1695 * Write the MAC address to the XLR MAC port. Also, set the address
1696 * masks and MAC filter configuration.
1697 */
1698static void
1699nlge_set_mac_addr(struct nlge_softc *sc)
1700{
1701 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1702 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1703 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1704 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1705 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1706
1707 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1708 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1709 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1710 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1711
1712 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1713 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1714 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1715 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1716
1717 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1718 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1719 }
1720}
1721
1722static int
1723nlge_if_init(struct nlge_softc *sc)
1724{
1725 struct ifnet *ifp;
1726 device_t dev;
1727 int error;
1728
1729 error = 0;
1730 dev = sc->nlge_dev;
1731 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1732
1733 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1734 if (ifp == NULL) {
1735 device_printf(dev, "can not if_alloc()\n");
1736 error = ENOSPC;
1737 goto fail;
1738 }
1739 ifp->if_softc = sc;
1740 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1741 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1742 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1743 ifp->if_capenable = ifp->if_capabilities;
1744 ifp->if_ioctl = nlge_ioctl;
1745 ifp->if_start = nlge_start;
1746 ifp->if_init = nlge_init;
1747 ifp->if_hwassist = 0;
1748 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1749 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1750 IFQ_SET_READY(&ifp->if_snd);
1751
1752 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1753 nlge_mediastatus);
1754 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1755 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1756 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1757 nlge_read_mac_addr(sc);
1758
1759 ether_ifattach(ifp, sc->dev_addr);
1760
1761fail:
1762 return (error);
1763}
1764
1765static void
1766nlge_mii_init(device_t dev, struct nlge_softc *sc)
1767{
1768 int error;
1769
1770 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1771 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1772 }
1773 error = mii_phy_probe(dev, &sc->mii_bus, nlge_mediachange, nlge_mediastatus);
1774 if (error) {
1775 device_printf(dev, "no PHY device found\n");
1776 sc->mii_bus = NULL;
1777 }
1778 if (sc->mii_bus != NULL) {
1779 /*
1780 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1781 * set about every 1 sec in GigE mode, ignore it for now...
1782 */
1783 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1784 0xfffffffe);
1785 }
1786}
1787
1788/*
1789 * Read a PHY register.
1790 *
1791 * Input parameters:
1792 * mii_base - Base address of MII
1793 * phyaddr - PHY's address
1794 * regidx = index of register to read
1795 *
1796 * Return value:
1797 * value read, or 0 if an error occurred.
1798 */
1799
1800static int
1801nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1802{
1803 int i, val;
1804
1805 /* setup the phy reg to be used */
1806 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1807 (phyaddr << 8) | (regidx << 0));
1808 /* Issue the read command */
1809 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1810 (1 << O_MII_MGMT_COMMAND__rstat));
1811
1812 /* poll for the read cycle to complete */
1813 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1814 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1815 break;
1816 }
1817
1818 /* clear the read cycle */
1819 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1820
1821 if (i == PHY_STATUS_RETRIES) {
1822 return (0xffffffff);
1823 }
1824
1825 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1826
1827 return (val);
1828}
1829
1830/*
1831 * Write a value to a PHY register.
1832 *
1833 * Input parameters:
1834 * mii_base - Base address of MII
1835 * phyaddr - PHY to use
1836 * regidx - register within the PHY
1837 * regval - data to write to register
1838 *
1839 * Return value:
1840 * nothing
1841 */
1842static void
1843nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1844 int regval)
1845{
1846 int i;
1847
1848 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1849 (phyaddr << 8) | (regidx << 0));
1850
1851 /* Write the data which starts the write cycle */
1852 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1853
1854 /* poll for the write cycle to complete */
1855 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1856 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1857 break;
1858 }
1859}
1860
1861/*
1862 * Function to optimize the use of p2d descriptors for the given PDU.
1863 * As it is on the fast-path (called during packet transmission), it
1864 * described in more detail than the initialization functions.
1865 *
1866 * Input: mbuf chain (MC), pointer to fmn message
1867 * Input constraints: None
1868 * Output: FMN message to transmit the data in MC
1869 * Return values: 0 - success
1870 * 1 - MC cannot be handled (see Limitations below)
1871 * 2 - MC cannot be handled presently (maybe worth re-trying)
1872 * Other output: Number of entries filled in the FMN message
1873 *
1874 * Output structure/constraints:
1875 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
1876 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
1877 * 3. Each p2d points to physically contiguous chunk of data (subject to
1878 * entire MC requiring max 17 p2d's).
1879 * Limitations:
1880 * 1. MC's that require more than 17 p2d's are not handled.
1881 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
1882 * the p2p structure. Small packets (which typically give low
1883 * performance) are expected to have a small MC that takes
1884 * advantage of this.
1885 */
1886static int
1887prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
1888 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
1889 struct nlge_tx_desc **tx_desc)
1890{
1891 struct mbuf *m;
1892 struct nlge_tx_desc *p2p;
1893 uint64_t *cur_p2d;
1894 vm_offset_t buf;
1895 vm_paddr_t paddr;
1896 int msg_sz, p2p_sz, is_p2p;
1897 int len, frag_sz;
1898 /* Num entries per FMN msg is 4 for XLR/XLS */
1899 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
1900
1901 msg_sz = p2p_sz = is_p2p = 0;
1902 p2p = NULL;
1903 cur_p2d = &fmn_msg->msg0;
1904
1905 for (m = mbuf_chain; m != NULL; m = m->m_next) {
1906 buf = (vm_offset_t) m->m_data;
1907 len = m->m_len;
1908
1909 while (len) {
1910 if (msg_sz == (FMN_SZ - 1)) {
1911 p2p = uma_zalloc(nl_tx_desc_zone, M_WAITOK);
1912 if (p2p == NULL)
1913 return 2;
1914 /*
1915 * As we currently use xlr_paddr_lw on a 32-bit
1916 * OS, both the pointers are laid out in one
1917 * 64-bit location - this makes it easy to
1918 * retrieve the pointers when processing the
1919 * tx free-back descriptor.
1920 */
1921 p2p->frag[XLR_MAX_TX_FRAGS] =
1922 (((uint64_t) (vm_offset_t) p2p) << 32) |
1923 ((vm_offset_t) mbuf_chain);
1924 cur_p2d = &p2p->frag[0];
1925 is_p2p = 1;
1926 } else if (msg_sz == (FMN_SZ - 1 + XLR_MAX_TX_FRAGS)) {
1927 uma_zfree(nl_tx_desc_zone, p2p);
1928 return 1;
1929 }
1930 paddr = vtophys(buf);
1931 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
1932 if (len < frag_sz)
1933 frag_sz = len;
1934 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
1935 | paddr;
1936 msg_sz++;
1937 if (is_p2p)
1938 p2p_sz++;
1939 len -= frag_sz;
1940 buf += frag_sz;
1941 }
1942 }
1943
1944 if (msg_sz > 0) {
1945 cur_p2d[-1] |= (1ULL << 63); /* set eop in most-recent p2d */
1946 } else {
1947 printf("Zero-length mbuf chain ??\n");
1948 *n_entries = msg_sz ;
1949 return 0;
1950 }
1951
1952 *tx_desc = p2p;
1953
1954 if (is_p2p) {
1955 paddr = vtophys(p2p);
1956 fmn_msg->msg3 = (1ULL << 63) | (1ULL << 62) |
1957 ((uint64_t)fb_stn_id << 54) |
1958 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
1959 *n_entries = FMN_SZ;
1960 } else {
1961 /* zero-len p2d */
1962 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) |
1963 (vm_offset_t) mbuf_chain;
1964 *n_entries = msg_sz + 1;
1965 }
1966
1967 return (0);
1968}
1969
1970static int
1971send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
1972 uint32_t n_entries)
1973{
1974 unsigned long mflags;
1975 int ret;
1976
1977 mflags = 0;
1978 msgrng_access_enable(mflags);
1979 ret = message_send_retry(n_entries, MSGRNG_CODE_MAC, sc->tx_bucket_id,
1980 msg);
1981 msgrng_access_disable(mflags);
1982 return (!ret);
1983}
1984
1985static void
1986release_mbuf(uint64_t phy_addr)
1987{
1988 struct mbuf *m;
1989
1990 m = (struct mbuf *)((uint32_t) phy_addr);
1991 m_freem(m);
1992}
1993
1994static void
1995release_tx_desc(struct msgrng_msg *msg)
1996{
1997 vm_paddr_t paddr;
1998 uint64_t temp;
1999 struct nlge_tx_desc *tx_desc;
2000 struct mbuf *m;
2001 uint32_t sr;
2002
2003 paddr = msg->msg0 & 0xffffffffffULL;
2004 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
2005 sr = xlr_enable_kx();
2006 temp = xlr_paddr_lw(paddr);
2007 tx_desc = (struct nlge_tx_desc*)((intptr_t) temp);
2008 paddr += sizeof(void *);
2009 temp = xlr_paddr_lw(paddr);
2010 mips_wr_status(sr);
2011 m = (struct mbuf *)((intptr_t) temp);
2012 m_freem(m);
2013
2014 uma_zfree(nl_tx_desc_zone, tx_desc);
2015}
2016
2017static struct mbuf *
2018get_mbuf(void)
2019{
2020 struct mbuf *m_new;
2021
2022 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
2023 return NULL;
2024 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2025 return (m_new);
2026}
2027
2028static void *
2029get_buf(void)
2030{
2031 struct mbuf *m_new;
2032 vm_paddr_t temp1, temp2;
2033 unsigned int *md;
2034
2035 m_new = get_mbuf();
2036 if (m_new == NULL)
2037 return m_new;
2038
2039 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
2040 md = (unsigned int *)m_new->m_data;
2041 md[0] = (unsigned int)m_new; /* Back Ptr */
2042 md[1] = 0xf00bad;
2043 m_adj(m_new, XLR_CACHELINE_SIZE);
2044
2045 temp1 = vtophys((vm_offset_t) m_new->m_data);
2046 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
2047 if ((temp1 + 1536) != temp2)
2048 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
2049
2050 return ((void *)m_new->m_data);
2051}
2052
2053static int
2054nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
2055{
2056 struct mii_data *md;
2057 xlr_reg_t *mmio;
2058 int bmsr, n_tries, max_tries;
2059 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2060 int sgmii_speed[] = { SGMII_SPEED_10,
2061 SGMII_SPEED_100,
2062 SGMII_SPEED_1000,
2063 SGMII_SPEED_100 }; /* default to 100Mbps */
2064 char *speed_str[] = { "10",
2065 "100",
2066 "1000",
2067 "unknown, defaulting to 100" };
2068 int link_state = LINK_STATE_DOWN;
2069
2070 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2071 return 0;
2072
2073 md = NULL;
2074 mmio = sc->base;
2075 if (sc->mii_base != NULL) {
2076 max_tries = (quick == 1) ? 100 : 4000;
2077 bmsr = 0;
2078 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2079 bmsr = nlge_mii_read_internal(sc->mii_base,
2080 sc->phy_addr, MII_BMSR);
2081 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2082 break; /* Auto-negotiation is complete
2083 and link is up */
2084 DELAY(1000);
2085 }
2086 bmsr &= BMSR_LINK;
2087 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2088 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2089 sc->speed = (sc->speed >> 3) & 0x03;
2090 if (sc->link == xlr_mac_link_up) {
2091 link_state = LINK_STATE_UP;
2092 nlge_sgmii_init(sc);
2093 }
2094 if (sc->mii_bus)
2095 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2096 }
2097
2098 if (sc->port_type != XLR_RGMII)
2099 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2100 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2101 sc->speed == xlr_mac_speed_rsvd) {
2102 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2103 } else if (sc->speed == xlr_mac_speed_1000) {
2104 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2105 if (md != NULL) {
2106 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2107 IFM_1000_T, IFM_FDX, md->mii_instance));
2108 }
2109 }
2110 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2111 if_link_state_change(sc->nlge_if, link_state);
2112 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2113 speed_str[sc->speed]);
2114
2115 return (0);
2116}
2117
2118/*
2119 * This function is called for each port that was added to the device tree
2120 * and it initializes the following port attributes:
2121 * - type
2122 * - base (base address to access port-specific registers)
2123 * - mii_base
2124 * - phy_addr
2125 */
2126static void
2127nlge_set_port_attribs(struct nlge_softc *sc,
2128 struct xlr_gmac_port *port_info)
2129{
2130 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2131 sc->port_type = port_info->type;
2132 sc->base = (xlr_reg_t *) (port_info->base_addr +
2133 (uint32_t)DEFAULT_XLR_IO_BASE);
2134 sc->mii_base = (xlr_reg_t *) (port_info->mii_addr +
2135 (uint32_t)DEFAULT_XLR_IO_BASE);
2136 if (port_info->pcs_addr != 0)
2137 sc->pcs_addr = (xlr_reg_t *) (port_info->pcs_addr +
2138 (uint32_t)DEFAULT_XLR_IO_BASE);
2139 if (port_info->serdes_addr != 0)
2140 sc->serdes_addr = (xlr_reg_t *) (port_info->serdes_addr +
2141 (uint32_t)DEFAULT_XLR_IO_BASE);
2142 sc->phy_addr = port_info->phy_addr;
2143
2144 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2145 sc->mii_base, sc->phy_addr);
2146}
2147
2148/* ------------------------------------------------------------------------ */
2149
2150/* Debug dump functions */
2151
2152#ifdef DEBUG
2153
2154static void
2155dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2156{
2157 int val;
2158
2159 val = NLGE_READ(base, offset);
2160 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2161}
2162
2163#define STRINGIFY(x) #x
2164
2165static void
2166dump_na_registers(xlr_reg_t *base_addr, int port_id)
2167{
2168 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2169 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2170 PDEBUG("Tx bucket sizes\n");
2171 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2172 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2173 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2174 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2175 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2176 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2177 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2178 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2179 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2180 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2181 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2182 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2183 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2184 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2185 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2186 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2187 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2188 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2189}
2190
2191static void
2192dump_gmac_registers(struct nlge_softc *sc)
2193{
2194 xlr_reg_t *base_addr = sc->base;
2195 int port_id = sc->instance;
2196
2197 PDEBUG("Register dump for port=%d\n", port_id);
2198 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2199 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2200 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2201 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2202 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2203 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2204 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2205 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2206 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2207 STRINGIFY(R_MII_MGMT_CONFIG));
2208 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2209 STRINGIFY(R_MII_MGMT_COMMAND));
2210 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2211 STRINGIFY(R_MII_MGMT_ADDRESS));
2212 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2213 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2214 dump_reg(base_addr, R_MII_MGMT_STATUS,
2215 STRINGIFY(R_MII_MGMT_STATUS));
2216 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2217 STRINGIFY(R_MII_MGMT_INDICATORS));
2218 dump_reg(base_addr, R_INTERFACE_CONTROL,
2219 STRINGIFY(R_INTERFACE_CONTROL));
2220 dump_reg(base_addr, R_INTERFACE_STATUS,
2221 STRINGIFY(R_INTERFACE_STATUS));
2222 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2223 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2224 STRINGIFY(R_XGMAC_CONFIG_0));
2225 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2226 STRINGIFY(R_XGMAC_CONFIG_1));
2227 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2228 STRINGIFY(R_XGMAC_CONFIG_2));
2229 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2230 STRINGIFY(R_XGMAC_CONFIG_3));
2231 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2232 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2233 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2234 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2235 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2236 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2237 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2238 STRINGIFY(R_XGMAC_REV_LEVEL));
2239 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2240 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2241 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2242 STRINGIFY(R_XGMAC_MIIM_FILED));
2243 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2244 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2245 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2246 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2247 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2248 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2249 }
2250
2251 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2252 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2253 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2254 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2255 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2256 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2257 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2258 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2259 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2260 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2261 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2262 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2263 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2264 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2265 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2266 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2267 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2268 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2269 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2270 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2271 dump_na_registers(base_addr, port_id);
2272}
2273
2274static void
2275dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2276{
2277 struct stn_cc *cc;
2278 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2279 int j, k, r, c;
2280 int n_gmac_buckets;
2281
2282 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2283 for (j = 0; j < 8; j++) { // for each cpu
2284 cc = board->credit_configs[j];
2285 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2286 for (k = 0; k < n_gmac_buckets; k++) {
2287 r = gmac_bucket_ids[k] / 8;
2288 c = gmac_bucket_ids[k] % 8;
2289 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2290 gmac_bucket_ids[k], cc->counters[r][c]);
2291 }
2292 }
2293}
2294
2295static void
2296dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2297{
2298 struct stn_cc *cc;
2299 int j, k;
2300
2301 cc = board->gmac_block[gmac_id].credit_config;
2302 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2303 for (j = 0; j < 8; j++) { // for each cpu
2304 printf(" ---> cpu_%d\n", j);
2305 for (k = 0; k < 8; k++) { // for each bucket in cpu
2306 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2307 cc->counters[j][k]);
2308 }
2309 }
2310}
2311
2312static void
2313dump_board_info(struct xlr_board_info *board)
2314{
2315 struct xlr_gmac_block_t *gm;
2316 int i, k;
2317
2318 printf("cpu=%x ", xlr_revision());
2319 printf("board_version: major=%llx, minor=%llx\n",
2320 xlr_boot1_info.board_major_version,
2321 xlr_boot1_info.board_minor_version);
2322 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2323 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2324 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2325 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2326 printf("FMN: Core-station bucket sizes\n");
2327 for (i = 0; i < 128; i++) {
2328 if (i && ((i % 16) == 0))
2329 printf("\n");
2330 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2331 }
2332 printf("\n");
2333 for (i = 0; i < 3; i++) {
2334 gm = &board->gmac_block[i];
2335 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2336 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2337 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2338 gm->station_txbase, gm->station_rfr);
2339 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2340 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2341 gm->baseinst);
2342 }
2343 for (k = 0; k < 3; k++) { // for each NA
2344 dump_fmn_cpu_credits_for_gmac(board, k);
2345 dump_fmn_gmac_credits(board, k);
2346 }
2347}
2348
2349static void
2350dump_mac_stats(struct nlge_softc *sc)
2351{
2352 xlr_reg_t *addr;
2353 uint32_t pkts_tx, pkts_rx;
2354
2355 addr = sc->base;
2356 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2357 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2358
2359 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2360 pkts_rx);
2361 if (pkts_rx > 0) {
2362 uint32_t r;
2363
2364 /* dump all rx counters. we need this because pkts_rx includes
2365 bad packets. */
2366 for (r = R_RFCS; r <= R_ROVR; r++)
2367 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2368 NLGE_READ(sc->base, r));
2369 }
2370 if (pkts_tx > 0) {
2371 uint32_t r;
2372
2373 /* dump all tx counters. might be useful for debugging. */
2374 for (r = R_TMCA; r <= R_TFRG; r++) {
2375 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2376 continue;
2377 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2378 NLGE_READ(sc->base, r));
2379 }
2380 }
2381
2382}
2383
2384static void
2385dump_mii_regs(struct nlge_softc *sc)
2386{
2387 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2388 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2389 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2390 0x1c, 0x1d, 0x1e};
2391 int i, n_regs;
2392
2393 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2394 return;
2395
2396 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2397 for (i = 0; i < n_regs; i++) {
2398 printf("[mii_0x%x] = %x\n", mii_regs[i],
2399 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2400 mii_regs[i]));
2401 }
2402}
2403
2404static void
2405dump_ifmedia(struct ifmedia *ifm)
2406{
2407 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2408 ifm->ifm_media, ifm->ifm_cur);
2409 if (ifm->ifm_cur != NULL) {
2410 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2411 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2412 ifm->ifm_cur->ifm_data);
2413 }
2414}
2415
2416static void
2417dump_mii_data(struct mii_data *mii)
2418{
2419 dump_ifmedia(&mii->mii_media);
2420 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2421 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2422 mii->mii_media_status, mii->mii_media_active);
2423}
2424
2425static void
2426dump_pcs_regs(struct nlge_softc *sc, int phy)
2427{
2428 int i, val;
2429
2430 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2431 for (i = 0; i < 18; i++) {
2432 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2433 continue;
2434 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2435 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2436 }
2437}
2438#endif
105#include <mips/rmi/pic.h>
106#include <mips/rmi/board.h>
107#include <mips/rmi/rmi_mips_exts.h>
108#include <mips/rmi/rmi_boot_info.h>
109#include <mips/rmi/dev/xlr/atx_cpld.h>
110#include <mips/rmi/dev/xlr/xgmac_mdio.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114#include "miidevs.h"
115#include <dev/mii/brgphyreg.h>
116#include "miibus_if.h"
117
118#include <mips/rmi/dev/nlge/if_nlge.h>
119
120MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121MODULE_DEPEND(nlge, ether, 1, 1, 1);
122MODULE_DEPEND(nlge, miibus, 1, 1, 1);
123
124/* Network accelarator entry points */
125static int nlna_probe(device_t);
126static int nlna_attach(device_t);
127static int nlna_detach(device_t);
128static int nlna_suspend(device_t);
129static int nlna_resume(device_t);
130static int nlna_shutdown(device_t);
131
132/* GMAC port entry points */
133static int nlge_probe(device_t);
134static int nlge_attach(device_t);
135static int nlge_detach(device_t);
136static int nlge_suspend(device_t);
137static int nlge_resume(device_t);
138static void nlge_init(void *);
139static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
140static void nlge_start(struct ifnet *);
141static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
142
143static int nlge_mii_write(struct device *, int, int, int);
144static int nlge_mii_read(struct device *, int, int);
145static void nlge_mac_mii_statchg(device_t);
146static int nlge_mediachange(struct ifnet *ifp);
147static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
148
149/* Other internal/helper functions */
150static void *get_buf(void);
151static struct mbuf *get_mbuf(void);
152
153static void nlna_add_to_port_set(struct nlge_port_set *pset,
154 struct nlge_softc *sc);
155static void nlna_config_pde(struct nlna_softc *);
156static void nlna_config_parser(struct nlna_softc *);
157static void nlna_config_classifier(struct nlna_softc *);
158static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
159static void nlna_config_common(struct nlna_softc *);
160static void nlna_disable_ports(struct nlna_softc *sc);
161static void nlna_enable_intr(struct nlna_softc *sc);
162static void nlna_disable_intr(struct nlna_softc *sc);
163static void nlna_enable_ports(struct nlna_softc *sc);
164static void nlna_get_all_softc(device_t iodi_dev,
165 struct nlna_softc **sc_vec, uint32_t vec_sz);
166static void nlna_hw_init(struct nlna_softc *sc);
167static int nlna_is_last_active_na(struct nlna_softc *sc);
168static void nlna_media_specific_config(struct nlna_softc *sc);
169static void nlna_reset_ports(struct nlna_softc *sc,
170 struct xlr_gmac_block_t *blk);
171static struct nlna_softc *nlna_sc_init(device_t dev,
172 struct xlr_gmac_block_t *blk);
173static __inline__ int nlna_send_free_desc(struct nlna_softc *nlna,
174 vm_paddr_t addr);
175static void nlna_setup_intr(struct nlna_softc *sc);
176static void nlna_smp_update_pde(void *dummy __unused);
177static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
178 uint32_t n_desc);
179
180static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
181static void nlge_hw_init(struct nlge_softc *sc);
182static int nlge_if_init(struct nlge_softc *sc);
183static void nlge_intr(void *arg);
184static int nlge_irq_init(struct nlge_softc *sc);
185static void nlge_irq_fini(struct nlge_softc *sc);
186static void nlge_media_specific_init(struct nlge_softc *sc);
187static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
188static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
189 int regidx);
190static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
191 int regidx, int regval);
192void nlge_msgring_handler(int bucket, int size, int code,
193 int stid, struct msgrng_msg *msg, void *data);
194static void nlge_port_disable(int id, xlr_reg_t *base, int port_type);
195static void nlge_port_enable(struct nlge_softc *sc);
196static void nlge_read_mac_addr(struct nlge_softc *sc);
197static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
198 struct xlr_gmac_port *port_info);
199static void nlge_set_mac_addr(struct nlge_softc *sc);
200static void nlge_set_port_attribs(struct nlge_softc *,
201 struct xlr_gmac_port *);
202static void nlge_sgmii_init(struct nlge_softc *sc);
203static void nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc);
204
205static int prepare_fmn_message(struct nlge_softc *sc,
206 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
207 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
208
209static void release_mbuf(uint64_t phy_addr);
210static void release_tx_desc(struct msgrng_msg *msg);
211static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
212 uint32_t n_entries);
213
214//#define DEBUG
215#ifdef DEBUG
216static int mac_debug = 1;
217static int reg_dump = 0;
218#undef PDEBUG
219#define PDEBUG(fmt, args...) \
220 do {\
221 if (mac_debug) {\
222 printf("[%s@%d|%s]: cpu_%d: " fmt, \
223 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
224 }\
225 } while(0);
226
227/* Debug/dump functions */
228static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
229static void dump_gmac_registers(struct nlge_softc *);
230static void dump_na_registers(xlr_reg_t *base, int port_id);
231static void dump_mac_stats(struct nlge_softc *sc);
232static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
233static void dump_mii_data(struct mii_data *mii) __attribute__((used));
234static void dump_board_info(struct xlr_board_info *);
235static void dump_pcs_regs(struct nlge_softc *sc, int phy);
236
237#else
238#undef PDEBUG
239#define PDEBUG(fmt, args...)
240#define dump_reg(a, o, n) /* nop */
241#define dump_gmac_registers(a) /* nop */
242#define dump_na_registers(a, p) /* nop */
243#define dump_board_info(b) /* nop */
244#define dump_mac_stats(sc) /* nop */
245#define dump_mii_regs(sc) /* nop */
246#define dump_mii_data(mii) /* nop */
247#define dump_pcs_regs(sc, phy) /* nop */
248#endif
249
250/* Wrappers etc. to export the driver entry points. */
251static device_method_t nlna_methods[] = {
252 /* Device interface */
253 DEVMETHOD(device_probe, nlna_probe),
254 DEVMETHOD(device_attach, nlna_attach),
255 DEVMETHOD(device_detach, nlna_detach),
256 DEVMETHOD(device_shutdown, nlna_shutdown),
257 DEVMETHOD(device_suspend, nlna_suspend),
258 DEVMETHOD(device_resume, nlna_resume),
259
260 /* bus interface : TBD : what are these for ? */
261 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
262 DEVMETHOD(bus_print_child, bus_generic_print_child),
263 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
264
265 { 0, 0 }
266};
267
268static driver_t nlna_driver = {
269 "nlna",
270 nlna_methods,
271 sizeof(struct nlna_softc)
272};
273
274static devclass_t nlna_devclass;
275
276static device_method_t nlge_methods[] = {
277 /* Device interface */
278 DEVMETHOD(device_probe, nlge_probe),
279 DEVMETHOD(device_attach, nlge_attach),
280 DEVMETHOD(device_detach, nlge_detach),
281 DEVMETHOD(device_shutdown, bus_generic_shutdown),
282 DEVMETHOD(device_suspend, nlge_suspend),
283 DEVMETHOD(device_resume, nlge_resume),
284
285 /* MII interface */
286 DEVMETHOD(miibus_readreg, nlge_mii_read),
287 DEVMETHOD(miibus_writereg, nlge_mii_write),
288 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
289
290 {0, 0}
291};
292
293static driver_t nlge_driver = {
294 "nlge",
295 nlge_methods,
296 sizeof(struct nlge_softc)
297};
298
299static devclass_t nlge_devclass;
300
301DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
302DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
303DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
304
305static uma_zone_t nl_tx_desc_zone;
306
307/* Function to atomically increment an integer with the given value. */
308static __inline__ unsigned int
309ldadd_wu(unsigned int value, unsigned long *addr)
310{
311 __asm__ __volatile__( ".set push\n"
312 ".set noreorder\n"
313 "move $8, %2\n"
314 "move $9, %3\n"
315 /* "ldaddwu $8, $9\n" */
316 ".word 0x71280011\n"
317 "move %0, $8\n"
318 ".set pop\n"
319 : "=&r"(value), "+m"(*addr)
320 : "0"(value), "r" ((unsigned long)addr)
321 : "$8", "$9");
322 return value;
323}
324
325static __inline__ uint32_t
326xlr_enable_kx(void)
327{
328 uint32_t sr = mips_rd_status();
329
330 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
331 return sr;
332}
333
334static int
335nlna_probe(device_t dev)
336{
337 return (BUS_PROBE_DEFAULT);
338}
339
340/*
341 * Add all attached GMAC/XGMAC ports to the device tree. Port
342 * configuration is spread in two regions - common configuration
343 * for all ports in the NA and per-port configuration in MAC-specific
344 * region. This function does the following:
345 * - adds the ports to the device tree
346 * - reset the ports
347 * - do all the common initialization
348 * - invoke bus_generic_attach for per-port configuration
349 * - supply initial free rx descriptors to ports
350 * - initialize s/w data structures
351 * - finally, enable interrupts (only in the last NA).
352 *
353 * For reference, sample address space for common and per-port
354 * registers is given below.
355 *
356 * The address map for RNA0 is: (typical value)
357 *
358 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
359 * | |
360 * | |
361 * | |
362 * | |
363 * | |
364 * | |
365 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
366 * | |
367 * | |
368 * (common) -> |......................................| 0xbef0_c400
369 * | |
370 * | (RGMII/SGMII: common registers) |
371 * | |
372 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
373 * | |
374 * | |
375 * (common) -> |......................................| 0xbef0_d400
376 * | |
377 * | (RGMII/SGMII: common registers) |
378 * | |
379 * |......................................|
380 * and so on ....
381 *
382 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
383 */
384static int
385nlna_attach(device_t dev)
386{
387 struct xlr_gmac_block_t *block_info;
388 device_t gmac_dev;
389 struct nlna_softc *sc;
390 int error;
391 int i;
392 int id;
393
394 id = device_get_unit(dev);
395 block_info = device_get_ivars(dev);
396 if (!block_info->enabled) {
397 return 0;
398 }
399
400#ifdef DEBUG
401 dump_board_info(&xlr_board_info);
402#endif
403 block_info->baseaddr += DEFAULT_XLR_IO_BASE;
404
405 /* Initialize nlna state in softc structure */
406 sc = nlna_sc_init(dev, block_info);
407
408 /* Add device's for the ports controlled by this NA. */
409 if (block_info->type == XLR_GMAC) {
410 KASSERT(id < 2, ("No GMACs supported with this network"
411 "accelerator: %d", id));
412 for (i = 0; i < sc->num_ports; i++) {
413 gmac_dev = device_add_child(dev, "nlge", -1);
414 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
415 }
416 } else if (block_info->type == XLR_XGMAC) {
417 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
418 "network accelerator: %d", id));
419 gmac_dev = device_add_child(dev, "nlge", -1);
420 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
421 } else if (block_info->type == XLR_SPI4) {
422 /* SPI4 is not supported here */
423 device_printf(dev, "Unsupported: NA with SPI4 type");
424 return (ENOTSUP);
425 }
426
427 nlna_reset_ports(sc, block_info);
428
429 /* Initialize Network Accelarator registers. */
430 nlna_hw_init(sc);
431
432 error = bus_generic_attach(dev);
433 if (error) {
434 device_printf(dev, "failed to attach port(s)\n");
435 goto fail;
436 }
437
438 /* Send out the initial pool of free-descriptors for the rx path */
439 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
440
441 /* S/w data structure initializations shared by all NA's. */
442 if (nl_tx_desc_zone == NULL) {
443 /* Create a zone for allocating tx descriptors */
444 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
445 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
446 XLR_CACHELINE_SIZE, 0);
447 }
448
449 /* Enable NA interrupts */
450 nlna_setup_intr(sc);
451
452 return (0);
453
454fail:
455 return (error);
456}
457
458static int
459nlna_detach(device_t dev)
460{
461 struct nlna_softc *sc;
462
463 sc = device_get_softc(dev);
464 if (device_is_alive(dev)) {
465 nlna_disable_intr(sc);
466 /* This will make sure that per-port detach is complete
467 * and all traffic on the ports has been stopped. */
468 bus_generic_detach(dev);
469 uma_zdestroy(nl_tx_desc_zone);
470 }
471
472 return (0);
473}
474
475static int
476nlna_suspend(device_t dev)
477{
478
479 return (0);
480}
481
482static int
483nlna_resume(device_t dev)
484{
485
486 return (0);
487}
488
489static int
490nlna_shutdown(device_t dev)
491{
492 return (0);
493}
494
495
496/* GMAC port entry points */
497static int
498nlge_probe(device_t dev)
499{
500 struct nlge_softc *sc;
501 struct xlr_gmac_port *port_info;
502 int index;
503 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
504 "Unknown"};
505
506 port_info = device_get_ivars(dev);
507 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
508 5 : port_info->type;
509 device_set_desc_copy(dev, desc[index]);
510
511 sc = device_get_softc(dev);
512 nlge_sc_init(sc, dev, port_info);
513
514 nlge_port_disable(sc->id, sc->base, sc->port_type);
515
516 return (0);
517}
518
519static int
520nlge_attach(device_t dev)
521{
522 struct nlge_softc *sc;
523 struct nlna_softc *nsc;
524 int error;
525
526 sc = device_get_softc(dev);
527
528 nlge_if_init(sc);
529 nlge_mii_init(dev, sc);
530 error = nlge_irq_init(sc);
531 if (error)
532 return error;
533 nlge_hw_init(sc);
534
535 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
536 nsc->child_sc[sc->instance] = sc;
537
538 return (0);
539}
540
541static int
542nlge_detach(device_t dev)
543{
544 struct nlge_softc *sc;
545 struct ifnet *ifp;
546
547 sc = device_get_softc(dev);
548 ifp = sc->nlge_if;
549
550 if (device_is_attached(dev)) {
551 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
552 nlge_port_disable(sc->id, sc->base, sc->port_type);
553 nlge_irq_fini(sc);
554 ether_ifdetach(ifp);
555 bus_generic_detach(dev);
556 }
557 if (ifp)
558 if_free(ifp);
559
560 return (0);
561}
562
563static int
564nlge_suspend(device_t dev)
565{
566 return (0);
567}
568
569static int
570nlge_resume(device_t dev)
571{
572 return (0);
573}
574
575static void
576nlge_init(void *addr)
577{
578 struct nlge_softc *sc;
579 struct ifnet *ifp;
580
581 sc = (struct nlge_softc *)addr;
582 ifp = sc->nlge_if;
583
584 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
585 return;
586
587 nlge_gmac_config_speed(sc, 0);
588 ifp->if_drv_flags |= IFF_DRV_RUNNING;
589 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
590 nlge_port_enable(sc);
591
592 if (sc->port_type == XLR_SGMII) {
593 dump_pcs_regs(sc, 27);
594 }
595 dump_gmac_registers(sc);
596 dump_mac_stats(sc);
597}
598
599static int
600nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
601{
602 struct mii_data *mii;
603 struct nlge_softc *sc;
604 struct ifreq *ifr;
605 int error;
606
607 sc = ifp->if_softc;
608 error = 0;
609 ifr = (struct ifreq *)data;
610 switch(command) {
611 case SIOCSIFFLAGS:
612 break;
613 case SIOCSIFMEDIA:
614 case SIOCGIFMEDIA:
615 if (sc->mii_bus != NULL) {
616 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
617 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
618 command);
619 }
620 break;
621 case SIOCSIFADDR:
622 // intentional fall thru
623 case SIOCSIFMTU:
624 default:
625 error = ether_ioctl(ifp, command, data);
626 break;
627 }
628
629 return (error);
630}
631
632/* This function is called from an interrupt handler */
633void
634nlge_msgring_handler(int bucket, int size, int code, int stid,
635 struct msgrng_msg *msg, void *data)
636{
637 struct nlna_softc *na_sc;
638 struct nlge_softc *sc;
639 struct ifnet *ifp;
640 uint64_t phys_addr;
641 unsigned long addr;
642 uint32_t length;
643 int ctrl;
644 int cpu;
645 int tx_error;
646 int port;
647 int vcpu;
648 int is_p2p;
649
650 cpu = xlr_core_id();
651 vcpu = (cpu << 2) + xlr_thr_id();
652
653 addr = 0;
654 is_p2p = 0;
655 tx_error = 0;
656 length = (msg->msg0 >> 40) & 0x3fff;
657 na_sc = (struct nlna_softc *)data;
658 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
659 if (length == 0) {
660 ctrl = CTRL_REG_FREE;
661 port = (msg->msg0 >> 54) & 0x0f;
662 is_p2p = (msg->msg0 >> 62) & 0x1;
663 tx_error = (msg->msg0 >> 58) & 0xf;
664 } else {
665 ctrl = CTRL_SNGL;
666 length = length - BYTE_OFFSET - MAC_CRC_LEN;
667 port = msg->msg0 & 0x0f;
668 }
669
670 sc = na_sc->child_sc[port];
671 if (sc == NULL) {
672 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
673 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
674 "Freeback for tx packet"));
675 return;
676 }
677
678 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
679 if (is_p2p)
680 release_tx_desc(msg);
681 else {
682 release_mbuf(msg->msg0 & 0xffffffffffULL);
683 }
684 ifp = sc->nlge_if;
685 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
686 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
687 }
688 ldadd_wu(1, (tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
689 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
690 /* Rx Packet */
691
692 nlge_rx(sc, phys_addr, length);
693 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
694 } else {
695 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
696 }
697
698}
699
700static void
701nlge_start(struct ifnet *ifp)
702{
703 struct nlge_softc *sc;
704
705 sc = ifp->if_softc;
706 //NLGE_LOCK(sc);
707 nlge_start_locked(ifp, sc);
708 //NLGE_UNLOCK(sc);
709}
710
711static void
712nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc)
713{
714 struct msgrng_msg msg;
715 struct mbuf *m;
716 struct nlge_tx_desc *tx_desc;
717 uint64_t fr_stid;
718 uint32_t cpu;
719 uint32_t n_entries;
720 uint32_t tid;
721 int ret;
722 int sent;
723
724 cpu = xlr_core_id();
725 tid = xlr_thr_id();
726 fr_stid = cpu * 8 + tid + 4;
727
728 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
729 return;
730 }
731
732 do {
733 /* Grab a packet off the queue. */
734 IF_DEQUEUE(&ifp->if_snd, m);
735 if (m == NULL) {
736 return;
737 }
738
739 tx_desc = NULL;
740 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
741 if (ret) {
742 goto fail;
743 }
744 sent = send_fmn_msg_tx(sc, &msg, n_entries);
745 if (!sent) {
746 goto fail;
747 }
748 } while(1);
749
750 return;
751
752fail:
753 if (tx_desc != NULL) {
754 uma_zfree(nl_tx_desc_zone, tx_desc);
755 }
756 if (m != NULL) {
757 /*
758 * TBD: It is observed that only when both of the statements
759 * below are not enabled, traffic continues till the end.
760 * Otherwise, the port locks up in the middle and never
761 * recovers from it. The current theory for this behavior
762 * is that the queue is full and the upper layer is neither
763 * able to add to it not invoke nlge_start to drian the
764 * queue. The driver may have to do something in addition
765 * to reset'ing the OACTIVE bit when a trasnmit free-back
766 * is received.
767 */
768 //ifp->if_drv_flags |= IFF_DRV_OACTIVE;
769 //IF_PREPEND(&ifp->if_snd, m);
770 m_freem(m);
771 ldadd_wu(1, &ifp->if_iqdrops);
772 }
773 return;
774}
775
776static void
777nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
778{
779 struct ifnet *ifp;
780 struct mbuf *m;
781 uint32_t tm, mag, sr;
782
783 sr = xlr_enable_kx();
784 tm = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE);
785 mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
786 mips_wr_status(sr);
787
788 m = (struct mbuf *)(intptr_t)tm;
789 if (mag != 0xf00bad) {
790 /* somebody else's packet. Error - FIXME in intialization */
791 printf("cpu %d: *ERROR* Not my packet paddr %llx\n",
792 xlr_core_id(), (uint64_t) paddr);
793 return;
794 }
795
796 ifp = sc->nlge_if;
797 /* align the data */
798 m->m_data += BYTE_OFFSET;
799 m->m_pkthdr.len = m->m_len = len;
800 m->m_pkthdr.rcvif = ifp;
801
802 ldadd_wu(1, &ifp->if_ipackets);
803 (*ifp->if_input)(ifp, m);
804}
805
806static int
807nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
808{
809 struct nlge_softc *sc;
810
811 sc = device_get_softc(dev);
812 if (sc->phy_addr == phyaddr && sc->port_type != XLR_XGMII)
813 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
814
815 return (0);
816}
817
818static int
819nlge_mii_read(struct device *dev, int phyaddr, int regidx)
820{
821 struct nlge_softc *sc;
822 int val;
823
824 sc = device_get_softc(dev);
825 val = (sc->phy_addr != phyaddr && sc->port_type != XLR_XGMII) ? (0xffff) :
826 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
827
828 return (val);
829}
830
831static void
832nlge_mac_mii_statchg(device_t dev)
833{
834}
835
836static int
837nlge_mediachange(struct ifnet *ifp)
838{
839 return 0;
840}
841
842static void
843nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
844{
845 struct nlge_softc *sc;
846 struct mii_data *md;
847
848 md = NULL;
849 sc = ifp->if_softc;
850 if (sc->mii_bus)
851 md = device_get_softc(sc->mii_bus);
852
853 ifmr->ifm_status = IFM_AVALID;
854 ifmr->ifm_active = IFM_ETHER;
855
856 if (sc->link == xlr_mac_link_down)
857 return;
858
859 if (md != NULL)
860 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
861 ifmr->ifm_status |= IFM_ACTIVE;
862}
863
864static struct nlna_softc *
865nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
866{
867 struct nlna_softc *sc;
868
869 sc = device_get_softc(dev);
870 memset(sc, 0, sizeof(*sc));
871 sc->nlna_dev = dev;
872 sc->base = (xlr_reg_t *) blk->baseaddr;
873 sc->rfrbucket = blk->station_rfr;
874 sc->station_id = blk->station_id;
875 sc->na_type = blk->type;
876 sc->mac_type = blk->mode;
877 sc->num_ports = blk->num_ports;
878
879 sc->mdio_set.port_vec = sc->mdio_sc;
880 sc->mdio_set.vec_sz = XLR_MAX_MACS;
881
882 return (sc);
883}
884
885/*
886 * Do:
887 * - Initialize common GMAC registers (index range 0x100-0x3ff).
888 */
889static void
890nlna_hw_init(struct nlna_softc *sc)
891{
892
893 /*
894 * It is seen that this is a critical function in bringing up FreeBSD.
895 * When it is not invoked, FreeBSD panics and fails during the
896 * multi-processor init (SI_SUB_SMP of * mi_startup). The key function
897 * in this sequence seems to be platform_prep_smp_launch. */
898 if (register_msgring_handler(sc->station_id, nlge_msgring_handler, sc)) {
899 panic("Couldn't register msgring handler\n");
900 }
901 nlna_config_fifo_spill_area(sc);
902 nlna_config_pde(sc);
903 nlna_config_common(sc);
904 nlna_config_parser(sc);
905 nlna_config_classifier(sc);
906}
907
908/*
909 * Enable interrupts on all the ports controlled by this NA. For now, we
910 * only care about the MII interrupt and this has to be enabled only
911 * on the port id0.
912 *
913 * This function is not in-sync with the regular way of doing things - it
914 * executes only in the context of the last active network accelerator (and
915 * thereby has some ugly accesses in the device tree). Though inelegant, it
916 * is necessary to do it this way as the per-port interrupts can be
917 * setup/enabled only after all the network accelerators have been
918 * initialized.
919 */
920static void
921nlna_setup_intr(struct nlna_softc *sc)
922{
923 struct nlna_softc *na_sc[XLR_MAX_NLNA];
924 struct nlge_port_set *pset;
925 struct xlr_gmac_port *port_info;
926 device_t iodi_dev;
927 int i, j;
928
929 if (!nlna_is_last_active_na(sc))
930 return ;
931
932 /* Collect all nlna softc pointers */
933 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
934 iodi_dev = device_get_parent(sc->nlna_dev);
935 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
936
937 /* Setup the MDIO interrupt lists. */
938 /*
939 * MDIO interrupts are coarse - a single interrupt line provides
940 * information about one of many possible ports. To figure out the
941 * exact port on which action is to be taken, all of the ports
942 * linked to an MDIO interrupt should be read. To enable this,
943 * ports need to add themselves to port sets.
944 */
945 for (i = 0; i < XLR_MAX_NLNA; i++) {
946 if (na_sc[i] == NULL)
947 continue;
948 for (j = 0; j < na_sc[i]->num_ports; j++) {
949 /* processing j-th port on i-th NA */
950 port_info = device_get_ivars(
951 na_sc[i]->child_sc[j]->nlge_dev);
952 pset = &na_sc[port_info->mdint_id]->mdio_set;
953 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
954 }
955 }
956
957 /* Enable interrupts */
958 for (i = 0; i < XLR_MAX_NLNA; i++) {
959 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
960 nlna_enable_intr(na_sc[i]);
961 }
962 }
963}
964
965static void
966nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
967{
968 int i;
969
970 /* step past the non-NULL elements */
971 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
972 if (i < pset->vec_sz)
973 pset->port_vec[i] = sc;
974 else
975 printf("warning: internal error: out-of-bounds for MDIO array");
976}
977
978static void
979nlna_enable_intr(struct nlna_softc *sc)
980{
981 int i;
982
983 for (i = 0; i < sc->num_ports; i++) {
984 if (sc->child_sc[i]->instance == 0)
985 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
986 (1 << O_INTMASK__MDInt));
987 }
988}
989
990static void
991nlna_disable_intr(struct nlna_softc *sc)
992{
993 int i;
994
995 for (i = 0; i < sc->num_ports; i++) {
996 if (sc->child_sc[i]->instance == 0)
997 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
998 }
999}
1000
1001static int
1002nlna_is_last_active_na(struct nlna_softc *sc)
1003{
1004 int id;
1005
1006 id = device_get_unit(sc->nlna_dev);
1007 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
1008}
1009
1010static __inline__ int
1011nlna_send_free_desc(struct nlna_softc *sc, vm_paddr_t addr)
1012{
1013 struct msgrng_msg msg;
1014 int stid;
1015 int code;
1016 int i;
1017
1018 stid = sc->rfrbucket;
1019 memset(&msg, 0, sizeof(msg));
1020 msg.msg0 = (uint64_t) addr & 0xffffffffe0ULL;
1021
1022 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1023 for (i = 0; i < MAX_MSG_SND_ATTEMPTS; i++) {
1024 if (message_send(1, code, stid, &msg) == 0)
1025 return (0);
1026 }
1027 printf("Error: failed to send free desc to station %d\n", stid);
1028 return (1);
1029}
1030
1031static void
1032nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
1033{
1034 void *ptr;
1035 unsigned long msgrng_flags;
1036 int i;
1037 int ret;
1038
1039 if (n_desc > 1) {
1040 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1041 sc->rfrbucket);
1042 }
1043
1044 for (i = 0; i < n_desc; i++) {
1045 ptr = get_buf();
1046 if (!ptr) {
1047 ret = -ENOMEM;
1048 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1049 break;
1050 }
1051
1052 /* Send the free Rx desc to the MAC */
1053 msgrng_access_enable(msgrng_flags);
1054 ret = nlna_send_free_desc(sc, vtophys(ptr));
1055 msgrng_access_disable(msgrng_flags);
1056 if (ret) /* no point trying other descriptors after
1057 a failure. */
1058 break;
1059 }
1060}
1061
1062static __inline__ void *
1063nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1064 int reg_size, int size)
1065{
1066 void *spill;
1067 uint64_t phys_addr;
1068 uint32_t spill_size;
1069
1070 spill_size = size;
1071 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1072 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1073 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1074 panic("Unable to allocate memory for spill area!\n");
1075 }
1076 phys_addr = vtophys(spill);
1077 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1078 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1079 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1080 NLGE_WRITE(base, reg_size, spill_size);
1081
1082 return (spill);
1083}
1084
1085/*
1086 * Configure the 6 FIFO's that are used by the network accelarator to
1087 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1088 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1089 * the NA with free descriptors.
1090 */
1091static void
1092nlna_config_fifo_spill_area(struct nlna_softc *sc)
1093{
1094 sc->frin_spill = nlna_config_spill(sc->base,
1095 R_REG_FRIN_SPILL_MEM_START_0,
1096 R_REG_FRIN_SPILL_MEM_START_1,
1097 R_REG_FRIN_SPILL_MEM_SIZE,
1098 MAX_FRIN_SPILL *
1099 sizeof(struct fr_desc));
1100 sc->frout_spill = nlna_config_spill(sc->base,
1101 R_FROUT_SPILL_MEM_START_0,
1102 R_FROUT_SPILL_MEM_START_1,
1103 R_FROUT_SPILL_MEM_SIZE,
1104 MAX_FROUT_SPILL *
1105 sizeof(struct fr_desc));
1106 sc->class_0_spill = nlna_config_spill(sc->base,
1107 R_CLASS0_SPILL_MEM_START_0,
1108 R_CLASS0_SPILL_MEM_START_1,
1109 R_CLASS0_SPILL_MEM_SIZE,
1110 MAX_CLASS_0_SPILL *
1111 sizeof(union rx_tx_desc));
1112 sc->class_1_spill = nlna_config_spill(sc->base,
1113 R_CLASS1_SPILL_MEM_START_0,
1114 R_CLASS1_SPILL_MEM_START_1,
1115 R_CLASS1_SPILL_MEM_SIZE,
1116 MAX_CLASS_1_SPILL *
1117 sizeof(union rx_tx_desc));
1118 sc->class_2_spill = nlna_config_spill(sc->base,
1119 R_CLASS2_SPILL_MEM_START_0,
1120 R_CLASS2_SPILL_MEM_START_1,
1121 R_CLASS2_SPILL_MEM_SIZE,
1122 MAX_CLASS_2_SPILL *
1123 sizeof(union rx_tx_desc));
1124 sc->class_3_spill = nlna_config_spill(sc->base,
1125 R_CLASS3_SPILL_MEM_START_0,
1126 R_CLASS3_SPILL_MEM_START_1,
1127 R_CLASS3_SPILL_MEM_SIZE,
1128 MAX_CLASS_3_SPILL *
1129 sizeof(union rx_tx_desc));
1130}
1131
1132/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1133static void
1134nlna_config_pde(struct nlna_softc *sc)
1135{
1136 uint64_t bucket_map;
1137 uint32_t cpumask;
1138 int i, cpu, bucket;
1139
1140 cpumask = 0x1;
1141#ifdef SMP
1142 /*
1143 * rge may be called before SMP start in a BOOTP/NFSROOT
1144 * setup. we will distribute packets to other cpus only when
1145 * the SMP is started.
1146 */
1147 if (smp_started)
1148 cpumask = xlr_hw_thread_mask;
1149#endif
1150
1151 bucket_map = 0;
1152 for (i = 0; i < 32; i++) {
1153 if (cpumask & (1 << i)) {
1154 cpu = i;
1155 bucket = ((cpu >> 2) << 3);
1156 bucket_map |= (1ULL << bucket);
1157 }
1158 }
1159 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1160 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1161
1162 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1163 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1164
1165 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1166 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1167
1168 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1169 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1170}
1171
1172static void
1173nlna_smp_update_pde(void *dummy __unused)
1174{
1175 device_t iodi_dev;
1176 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1177 int i;
1178
1179 printf("Updating packet distribution for SMP\n");
1180
1181 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1182 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1183
1184 for (i = 0; i < XLR_MAX_NLNA; i++) {
1185 if (na_sc[i] == NULL)
1186 continue;
1187 nlna_disable_ports(na_sc[i]);
1188 nlna_config_pde(na_sc[i]);
1189 nlna_enable_ports(na_sc[i]);
1190 }
1191}
1192
1193SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1194 NULL);
1195
1196static void
1197nlna_config_parser(struct nlna_softc *sc)
1198{
1199 /*
1200 * Mark it as no classification. The parser extract is gauranteed to
1201 * be zero with no classfication
1202 */
1203 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x00);
1204 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1205
1206 /* configure the parser : L2 Type is configured in the bootloader */
1207 /* extract IP: src, dest protocol */
1208 NLGE_WRITE(sc->base, R_L3CTABLE,
1209 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1210 (0x0800 << 0));
1211 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1212 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1213}
1214
1215static void
1216nlna_config_classifier(struct nlna_softc *sc)
1217{
1218 int i;
1219
1220 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1221 /* xgmac translation table doesn't have sane values on reset */
1222 for (i = 0; i < 64; i++)
1223 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1224
1225 /*
1226 * use upper 7 bits of the parser extract to index the
1227 * translate table
1228 */
1229 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1230 }
1231}
1232
1233/*
1234 * Complete a bunch of h/w register initializations that are common for all the
1235 * ports controlled by a NA.
1236 */
1237static void
1238nlna_config_common(struct nlna_softc *sc)
1239{
1240 struct xlr_gmac_block_t *block_info;
1241 struct stn_cc *gmac_cc_config;
1242 int i, id;
1243
1244 block_info = device_get_ivars(sc->nlna_dev);
1245
1246 id = device_get_unit(sc->nlna_dev);
1247 gmac_cc_config = block_info->credit_config;
1248 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1249 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1250 gmac_cc_config->counters[i >> 3][i & 0x07]);
1251 }
1252
1253 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1254
1255 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1256 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1257 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1258 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1259 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1260
1261 nlna_media_specific_config(sc);
1262}
1263
1264static void
1265nlna_media_specific_config(struct nlna_softc *sc)
1266{
1267 struct bucket_size *bucket_sizes;
1268
1269 bucket_sizes = xlr_board_info.bucket_sizes;
1270 switch (sc->mac_type) {
1271 case XLR_RGMII:
1272 case XLR_SGMII:
1273 case XLR_XAUI:
1274 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1275 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1276 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1277 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1278 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1279 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1280 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1281 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1282
1283 if (sc->mac_type == XLR_XAUI) {
1284 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1285 }
1286 break;
1287
1288 case XLR_XGMII:
1289 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1290 bucket_sizes->bucket[sc->rfrbucket]);
1291
1292 default:
1293 break;
1294 }
1295}
1296
1297static void
1298nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1299{
1300 xlr_reg_t *addr;
1301 int i;
1302 uint32_t rx_ctrl;
1303
1304 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1305
1306 for (i = 0; i < sc->num_ports; i++) {
1307 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1308
1309 base += blk->gmac_port[i].base_addr;
1310 addr = (xlr_reg_t *) base;
1311
1312 /* 1. Reset RxEnable in MAC_CONFIG */
1313 switch (sc->mac_type) {
1314 case XLR_RGMII:
1315 case XLR_SGMII:
1316 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1317 (1 << O_MAC_CONFIG_1__rxen));
1318 break;
1319 case XLR_XAUI:
1320 case XLR_XGMII:
1321 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1322 (1 << O_RX_CONTROL__RxEnable));
1323 break;
1324 default:
1325 printf("Error: Unsupported port_type=%d\n",
1326 sc->mac_type);
1327 }
1328
1329 /* 1.1 Wait for RxControl.RxHalt to be set */
1330 do {
1331 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1332 } while (!(rx_ctrl & 0x2));
1333
1334 /* 2. Set the soft reset bit in RxControl */
1335 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1336 (1 << O_RX_CONTROL__SoftReset));
1337
1338 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1339 do {
1340 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1341 } while (!(rx_ctrl & 0x8));
1342
1343 /* 3. Clear the soft reset bit in RxControl */
1344 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1345 (1 << O_RX_CONTROL__SoftReset));
1346
1347 /* Turn off tx/rx on the port. */
1348 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1349 (1 << O_RX_CONTROL__RxEnable));
1350 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1351 (1 << O_TX_CONTROL__TxEnable));
1352 }
1353}
1354
1355static void
1356nlna_disable_ports(struct nlna_softc *sc)
1357{
1358 struct xlr_gmac_block_t *blk;
1359 xlr_reg_t *addr;
1360 int i;
1361
1362 blk = device_get_ivars(sc->nlna_dev);
1363 for (i = 0; i < sc->num_ports; i++) {
1364 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1365
1366 base += blk->gmac_port[i].base_addr;
1367 addr = (xlr_reg_t *) base;
1368 nlge_port_disable(i, addr, blk->gmac_port[i].type);
1369 }
1370}
1371
1372static void
1373nlna_enable_ports(struct nlna_softc *sc)
1374{
1375 device_t nlge_dev, *devlist;
1376 struct nlge_softc *port_sc;
1377 int i, numdevs;
1378
1379 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1380 for (i = 0; i < numdevs; i++) {
1381 nlge_dev = devlist[i];
1382 if (nlge_dev == NULL)
1383 continue;
1384 port_sc = device_get_softc(nlge_dev);
1385 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1386 nlge_port_enable(port_sc);
1387 }
1388 free(devlist, M_TEMP);
1389}
1390
1391static void
1392nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1393 uint32_t vec_sz)
1394{
1395 device_t na_dev;
1396 int i;
1397
1398 for (i = 0; i < vec_sz; i++) {
1399 sc_vec[i] = NULL;
1400 na_dev = device_find_child(iodi_dev, "nlna", i);
1401 if (na_dev != NULL)
1402 sc_vec[i] = device_get_softc(na_dev);
1403 }
1404}
1405
1406static void
1407nlge_port_disable(int id, xlr_reg_t *base, int port_type)
1408{
1409 uint32_t rd;
1410
1411 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1412 do {
1413 rd = NLGE_READ(base, R_RX_CONTROL);
1414 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1415
1416 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1417 do {
1418 rd = NLGE_READ(base, R_TX_CONTROL);
1419 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1420
1421 switch (port_type) {
1422 case XLR_RGMII:
1423 case XLR_SGMII:
1424 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1425 ((1 << O_MAC_CONFIG_1__rxen) |
1426 (1 << O_MAC_CONFIG_1__txen)));
1427 break;
1428 case XLR_XGMII:
1429 case XLR_XAUI:
1430 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1431 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1432 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1433 break;
1434 default:
1435 panic("Unknown MAC type on port %d\n", id);
1436 }
1437}
1438
1439static void
1440nlge_port_enable(struct nlge_softc *sc)
1441{
1442 struct xlr_gmac_port *self;
1443 xlr_reg_t *base;
1444
1445 base = sc->base;
1446 self = device_get_ivars(sc->nlge_dev);
1447 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1448 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1449 (1 << O_RX_CONTROL__RGMII));
1450
1451 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1452 (1 << O_RX_CONTROL__RxEnable));
1453 NLGE_UPDATE(base, R_TX_CONTROL,
1454 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1455 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1456 switch (sc->port_type) {
1457 case XLR_RGMII:
1458 case XLR_SGMII:
1459 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1460 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1461 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1462 break;
1463 case XLR_XGMII:
1464 case XLR_XAUI:
1465 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1466 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1467 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1468 break;
1469 default:
1470 panic("Unknown MAC type on port %d\n", sc->id);
1471 }
1472}
1473
1474static void
1475nlge_sgmii_init(struct nlge_softc *sc)
1476{
1477 xlr_reg_t *mmio_gpio;
1478 int i;
1479 int phy;
1480
1481 if (sc->port_type != XLR_SGMII)
1482 return;
1483
1484 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1485 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1486 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1487 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1488 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1489 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1490 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1491 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1492 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1493 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1494 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1495
1496 for(i=0;i<10000000;i++){} /* delay */
1497 /* program GPIO values for serdes init parameters */
1498 mmio_gpio = (xlr_reg_t *) (DEFAULT_XLR_IO_BASE + XLR_IO_GPIO_OFFSET);
1499 mmio_gpio[0x20] = 0x7e6802;
1500 mmio_gpio[0x10] = 0x7104;
1501 for(i=0;i<100000000;i++){}
1502
1503 /* enable autoneg - more magic */
1504 phy = sc->phy_addr % 4 + 27;
1505 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1506 DELAY(100000);
1507 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1508 DELAY(100000);
1509}
1510
1511static void
1512nlge_intr(void *arg)
1513{
1514 struct nlge_port_set *pset;
1515 struct nlge_softc *sc;
1516 struct nlge_softc *port_sc;
1517 xlr_reg_t *base;
1518 uint32_t intreg;
1519 uint32_t intr_status;
1520 int i;
1521
1522 sc = arg;
1523 if (sc == NULL) {
1524 printf("warning: No port registered for interrupt\n");
1525 return;
1526 }
1527 base = sc->base;
1528
1529 intreg = NLGE_READ(base, R_INTREG);
1530 if (intreg & (1 << O_INTREG__MDInt)) {
1531 pset = sc->mdio_pset;
1532 if (pset == NULL) {
1533 printf("warning: No ports for MDIO interrupt\n");
1534 return;
1535 }
1536 for (i = 0; i < pset->vec_sz; i++) {
1537 port_sc = pset->port_vec[i];
1538
1539 if (port_sc == NULL)
1540 continue;
1541
1542 /* Ack phy interrupt - clear on read*/
1543 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1544 port_sc->phy_addr, 26);
1545 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1546 intr_status);
1547
1548 if (!(intr_status & 0x8000)) {
1549 /* no interrupt for this port */
1550 continue;
1551 }
1552
1553 if (intr_status & 0x2410) {
1554 /* update link status for port */
1555 nlge_gmac_config_speed(port_sc, 0);
1556 } else {
1557 printf("%s: Unsupported phy interrupt"
1558 " (0x%08x)\n",
1559 device_get_nameunit(port_sc->nlge_dev),
1560 intr_status);
1561 }
1562 }
1563 }
1564
1565 /* Clear the NA interrupt */
1566 xlr_write_reg(base, R_INTREG, 0xffffffff);
1567
1568 return;
1569}
1570
1571static int
1572nlge_irq_init(struct nlge_softc *sc)
1573{
1574 struct resource irq_res;
1575 struct nlna_softc *na_sc;
1576 struct xlr_gmac_block_t *block_info;
1577 device_t na_dev;
1578 int ret;
1579 int irq_num;
1580
1581 na_dev = device_get_parent(sc->nlge_dev);
1582 block_info = device_get_ivars(na_dev);
1583
1584 irq_num = block_info->baseirq + sc->instance;
1585 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1586 ret = bus_setup_intr(sc->nlge_dev, &irq_res, (INTR_FAST |
1587 INTR_TYPE_NET | INTR_MPSAFE), NULL, nlge_intr, sc, NULL);
1588 if (ret) {
1589 nlge_detach(sc->nlge_dev);
1590 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1591 ret);
1592 return (ENXIO);
1593 }
1594 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1595 device_get_nameunit(sc->nlge_dev), irq_num);
1596
1597 if (sc->instance == 0) {
1598 na_sc = device_get_softc(na_dev);
1599 sc->mdio_pset = &na_sc->mdio_set;
1600 }
1601 return (0);
1602}
1603
1604static void
1605nlge_irq_fini(struct nlge_softc *sc)
1606{
1607}
1608
1609static void
1610nlge_hw_init(struct nlge_softc *sc)
1611{
1612 struct xlr_gmac_port *port_info;
1613 xlr_reg_t *base;
1614
1615 base = sc->base;
1616 port_info = device_get_ivars(sc->nlge_dev);
1617 sc->tx_bucket_id = port_info->tx_bucket_id;
1618
1619 /* each packet buffer is 1536 bytes */
1620 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1621 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1622 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1623 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1624 (1 << O_STATCTRL__ClrCnt)));
1625 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1626 NLGE_WRITE(base, R_INTMASK, 0);
1627 nlge_set_mac_addr(sc);
1628 nlge_media_specific_init(sc);
1629}
1630
1631static void
1632nlge_sc_init(struct nlge_softc *sc, device_t dev,
1633 struct xlr_gmac_port *port_info)
1634{
1635 memset(sc, 0, sizeof(*sc));
1636 sc->nlge_dev = dev;
1637 sc->id = device_get_unit(dev);
1638 nlge_set_port_attribs(sc, port_info);
1639}
1640
1641static void
1642nlge_media_specific_init(struct nlge_softc *sc)
1643{
1644 struct mii_data *media;
1645 struct bucket_size *bucket_sizes;
1646
1647 bucket_sizes = xlr_board_info.bucket_sizes;
1648 switch (sc->port_type) {
1649 case XLR_RGMII:
1650 case XLR_SGMII:
1651 case XLR_XAUI:
1652 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1653 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1654 (W_DESC_PACK_CTRL__ByteOffset <<
1655 O_DESC_PACK_CTRL__ByteOffset));
1656 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1657 bucket_sizes->bucket[sc->tx_bucket_id]);
1658 if (sc->port_type != XLR_XAUI) {
1659 nlge_gmac_config_speed(sc, 1);
1660 if (sc->mii_bus) {
1661 media = (struct mii_data *)device_get_softc(
1662 sc->mii_bus);
1663 }
1664 }
1665 break;
1666
1667 case XLR_XGMII:
1668 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1669 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1670 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1671 bucket_sizes->bucket[sc->tx_bucket_id]);
1672 break;
1673 default:
1674 break;
1675 }
1676}
1677
1678/*
1679 * Read the MAC address from the XLR boot registers. All port addresses
1680 * are identical except for the lowest octet.
1681 */
1682static void
1683nlge_read_mac_addr(struct nlge_softc *sc)
1684{
1685 int i, j;
1686
1687 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1688 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1689
1690 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1691}
1692
1693/*
1694 * Write the MAC address to the XLR MAC port. Also, set the address
1695 * masks and MAC filter configuration.
1696 */
1697static void
1698nlge_set_mac_addr(struct nlge_softc *sc)
1699{
1700 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1701 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1702 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1703 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1704 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1705
1706 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1707 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1708 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1709 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1710
1711 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1712 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1713 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1714 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1715
1716 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1717 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1718 }
1719}
1720
1721static int
1722nlge_if_init(struct nlge_softc *sc)
1723{
1724 struct ifnet *ifp;
1725 device_t dev;
1726 int error;
1727
1728 error = 0;
1729 dev = sc->nlge_dev;
1730 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1731
1732 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1733 if (ifp == NULL) {
1734 device_printf(dev, "can not if_alloc()\n");
1735 error = ENOSPC;
1736 goto fail;
1737 }
1738 ifp->if_softc = sc;
1739 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1740 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1741 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1742 ifp->if_capenable = ifp->if_capabilities;
1743 ifp->if_ioctl = nlge_ioctl;
1744 ifp->if_start = nlge_start;
1745 ifp->if_init = nlge_init;
1746 ifp->if_hwassist = 0;
1747 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1748 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1749 IFQ_SET_READY(&ifp->if_snd);
1750
1751 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1752 nlge_mediastatus);
1753 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1754 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1755 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1756 nlge_read_mac_addr(sc);
1757
1758 ether_ifattach(ifp, sc->dev_addr);
1759
1760fail:
1761 return (error);
1762}
1763
1764static void
1765nlge_mii_init(device_t dev, struct nlge_softc *sc)
1766{
1767 int error;
1768
1769 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1770 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1771 }
1772 error = mii_phy_probe(dev, &sc->mii_bus, nlge_mediachange, nlge_mediastatus);
1773 if (error) {
1774 device_printf(dev, "no PHY device found\n");
1775 sc->mii_bus = NULL;
1776 }
1777 if (sc->mii_bus != NULL) {
1778 /*
1779 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1780 * set about every 1 sec in GigE mode, ignore it for now...
1781 */
1782 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1783 0xfffffffe);
1784 }
1785}
1786
1787/*
1788 * Read a PHY register.
1789 *
1790 * Input parameters:
1791 * mii_base - Base address of MII
1792 * phyaddr - PHY's address
1793 * regidx = index of register to read
1794 *
1795 * Return value:
1796 * value read, or 0 if an error occurred.
1797 */
1798
1799static int
1800nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1801{
1802 int i, val;
1803
1804 /* setup the phy reg to be used */
1805 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1806 (phyaddr << 8) | (regidx << 0));
1807 /* Issue the read command */
1808 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1809 (1 << O_MII_MGMT_COMMAND__rstat));
1810
1811 /* poll for the read cycle to complete */
1812 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1813 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1814 break;
1815 }
1816
1817 /* clear the read cycle */
1818 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1819
1820 if (i == PHY_STATUS_RETRIES) {
1821 return (0xffffffff);
1822 }
1823
1824 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1825
1826 return (val);
1827}
1828
1829/*
1830 * Write a value to a PHY register.
1831 *
1832 * Input parameters:
1833 * mii_base - Base address of MII
1834 * phyaddr - PHY to use
1835 * regidx - register within the PHY
1836 * regval - data to write to register
1837 *
1838 * Return value:
1839 * nothing
1840 */
1841static void
1842nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1843 int regval)
1844{
1845 int i;
1846
1847 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1848 (phyaddr << 8) | (regidx << 0));
1849
1850 /* Write the data which starts the write cycle */
1851 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1852
1853 /* poll for the write cycle to complete */
1854 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1855 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1856 break;
1857 }
1858}
1859
1860/*
1861 * Function to optimize the use of p2d descriptors for the given PDU.
1862 * As it is on the fast-path (called during packet transmission), it
1863 * described in more detail than the initialization functions.
1864 *
1865 * Input: mbuf chain (MC), pointer to fmn message
1866 * Input constraints: None
1867 * Output: FMN message to transmit the data in MC
1868 * Return values: 0 - success
1869 * 1 - MC cannot be handled (see Limitations below)
1870 * 2 - MC cannot be handled presently (maybe worth re-trying)
1871 * Other output: Number of entries filled in the FMN message
1872 *
1873 * Output structure/constraints:
1874 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
1875 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
1876 * 3. Each p2d points to physically contiguous chunk of data (subject to
1877 * entire MC requiring max 17 p2d's).
1878 * Limitations:
1879 * 1. MC's that require more than 17 p2d's are not handled.
1880 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
1881 * the p2p structure. Small packets (which typically give low
1882 * performance) are expected to have a small MC that takes
1883 * advantage of this.
1884 */
1885static int
1886prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
1887 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
1888 struct nlge_tx_desc **tx_desc)
1889{
1890 struct mbuf *m;
1891 struct nlge_tx_desc *p2p;
1892 uint64_t *cur_p2d;
1893 vm_offset_t buf;
1894 vm_paddr_t paddr;
1895 int msg_sz, p2p_sz, is_p2p;
1896 int len, frag_sz;
1897 /* Num entries per FMN msg is 4 for XLR/XLS */
1898 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
1899
1900 msg_sz = p2p_sz = is_p2p = 0;
1901 p2p = NULL;
1902 cur_p2d = &fmn_msg->msg0;
1903
1904 for (m = mbuf_chain; m != NULL; m = m->m_next) {
1905 buf = (vm_offset_t) m->m_data;
1906 len = m->m_len;
1907
1908 while (len) {
1909 if (msg_sz == (FMN_SZ - 1)) {
1910 p2p = uma_zalloc(nl_tx_desc_zone, M_WAITOK);
1911 if (p2p == NULL)
1912 return 2;
1913 /*
1914 * As we currently use xlr_paddr_lw on a 32-bit
1915 * OS, both the pointers are laid out in one
1916 * 64-bit location - this makes it easy to
1917 * retrieve the pointers when processing the
1918 * tx free-back descriptor.
1919 */
1920 p2p->frag[XLR_MAX_TX_FRAGS] =
1921 (((uint64_t) (vm_offset_t) p2p) << 32) |
1922 ((vm_offset_t) mbuf_chain);
1923 cur_p2d = &p2p->frag[0];
1924 is_p2p = 1;
1925 } else if (msg_sz == (FMN_SZ - 1 + XLR_MAX_TX_FRAGS)) {
1926 uma_zfree(nl_tx_desc_zone, p2p);
1927 return 1;
1928 }
1929 paddr = vtophys(buf);
1930 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
1931 if (len < frag_sz)
1932 frag_sz = len;
1933 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
1934 | paddr;
1935 msg_sz++;
1936 if (is_p2p)
1937 p2p_sz++;
1938 len -= frag_sz;
1939 buf += frag_sz;
1940 }
1941 }
1942
1943 if (msg_sz > 0) {
1944 cur_p2d[-1] |= (1ULL << 63); /* set eop in most-recent p2d */
1945 } else {
1946 printf("Zero-length mbuf chain ??\n");
1947 *n_entries = msg_sz ;
1948 return 0;
1949 }
1950
1951 *tx_desc = p2p;
1952
1953 if (is_p2p) {
1954 paddr = vtophys(p2p);
1955 fmn_msg->msg3 = (1ULL << 63) | (1ULL << 62) |
1956 ((uint64_t)fb_stn_id << 54) |
1957 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
1958 *n_entries = FMN_SZ;
1959 } else {
1960 /* zero-len p2d */
1961 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) |
1962 (vm_offset_t) mbuf_chain;
1963 *n_entries = msg_sz + 1;
1964 }
1965
1966 return (0);
1967}
1968
1969static int
1970send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
1971 uint32_t n_entries)
1972{
1973 unsigned long mflags;
1974 int ret;
1975
1976 mflags = 0;
1977 msgrng_access_enable(mflags);
1978 ret = message_send_retry(n_entries, MSGRNG_CODE_MAC, sc->tx_bucket_id,
1979 msg);
1980 msgrng_access_disable(mflags);
1981 return (!ret);
1982}
1983
1984static void
1985release_mbuf(uint64_t phy_addr)
1986{
1987 struct mbuf *m;
1988
1989 m = (struct mbuf *)((uint32_t) phy_addr);
1990 m_freem(m);
1991}
1992
1993static void
1994release_tx_desc(struct msgrng_msg *msg)
1995{
1996 vm_paddr_t paddr;
1997 uint64_t temp;
1998 struct nlge_tx_desc *tx_desc;
1999 struct mbuf *m;
2000 uint32_t sr;
2001
2002 paddr = msg->msg0 & 0xffffffffffULL;
2003 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
2004 sr = xlr_enable_kx();
2005 temp = xlr_paddr_lw(paddr);
2006 tx_desc = (struct nlge_tx_desc*)((intptr_t) temp);
2007 paddr += sizeof(void *);
2008 temp = xlr_paddr_lw(paddr);
2009 mips_wr_status(sr);
2010 m = (struct mbuf *)((intptr_t) temp);
2011 m_freem(m);
2012
2013 uma_zfree(nl_tx_desc_zone, tx_desc);
2014}
2015
2016static struct mbuf *
2017get_mbuf(void)
2018{
2019 struct mbuf *m_new;
2020
2021 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
2022 return NULL;
2023 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2024 return (m_new);
2025}
2026
2027static void *
2028get_buf(void)
2029{
2030 struct mbuf *m_new;
2031 vm_paddr_t temp1, temp2;
2032 unsigned int *md;
2033
2034 m_new = get_mbuf();
2035 if (m_new == NULL)
2036 return m_new;
2037
2038 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
2039 md = (unsigned int *)m_new->m_data;
2040 md[0] = (unsigned int)m_new; /* Back Ptr */
2041 md[1] = 0xf00bad;
2042 m_adj(m_new, XLR_CACHELINE_SIZE);
2043
2044 temp1 = vtophys((vm_offset_t) m_new->m_data);
2045 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
2046 if ((temp1 + 1536) != temp2)
2047 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
2048
2049 return ((void *)m_new->m_data);
2050}
2051
2052static int
2053nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
2054{
2055 struct mii_data *md;
2056 xlr_reg_t *mmio;
2057 int bmsr, n_tries, max_tries;
2058 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2059 int sgmii_speed[] = { SGMII_SPEED_10,
2060 SGMII_SPEED_100,
2061 SGMII_SPEED_1000,
2062 SGMII_SPEED_100 }; /* default to 100Mbps */
2063 char *speed_str[] = { "10",
2064 "100",
2065 "1000",
2066 "unknown, defaulting to 100" };
2067 int link_state = LINK_STATE_DOWN;
2068
2069 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2070 return 0;
2071
2072 md = NULL;
2073 mmio = sc->base;
2074 if (sc->mii_base != NULL) {
2075 max_tries = (quick == 1) ? 100 : 4000;
2076 bmsr = 0;
2077 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2078 bmsr = nlge_mii_read_internal(sc->mii_base,
2079 sc->phy_addr, MII_BMSR);
2080 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2081 break; /* Auto-negotiation is complete
2082 and link is up */
2083 DELAY(1000);
2084 }
2085 bmsr &= BMSR_LINK;
2086 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2087 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2088 sc->speed = (sc->speed >> 3) & 0x03;
2089 if (sc->link == xlr_mac_link_up) {
2090 link_state = LINK_STATE_UP;
2091 nlge_sgmii_init(sc);
2092 }
2093 if (sc->mii_bus)
2094 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2095 }
2096
2097 if (sc->port_type != XLR_RGMII)
2098 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2099 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2100 sc->speed == xlr_mac_speed_rsvd) {
2101 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2102 } else if (sc->speed == xlr_mac_speed_1000) {
2103 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2104 if (md != NULL) {
2105 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2106 IFM_1000_T, IFM_FDX, md->mii_instance));
2107 }
2108 }
2109 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2110 if_link_state_change(sc->nlge_if, link_state);
2111 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2112 speed_str[sc->speed]);
2113
2114 return (0);
2115}
2116
2117/*
2118 * This function is called for each port that was added to the device tree
2119 * and it initializes the following port attributes:
2120 * - type
2121 * - base (base address to access port-specific registers)
2122 * - mii_base
2123 * - phy_addr
2124 */
2125static void
2126nlge_set_port_attribs(struct nlge_softc *sc,
2127 struct xlr_gmac_port *port_info)
2128{
2129 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2130 sc->port_type = port_info->type;
2131 sc->base = (xlr_reg_t *) (port_info->base_addr +
2132 (uint32_t)DEFAULT_XLR_IO_BASE);
2133 sc->mii_base = (xlr_reg_t *) (port_info->mii_addr +
2134 (uint32_t)DEFAULT_XLR_IO_BASE);
2135 if (port_info->pcs_addr != 0)
2136 sc->pcs_addr = (xlr_reg_t *) (port_info->pcs_addr +
2137 (uint32_t)DEFAULT_XLR_IO_BASE);
2138 if (port_info->serdes_addr != 0)
2139 sc->serdes_addr = (xlr_reg_t *) (port_info->serdes_addr +
2140 (uint32_t)DEFAULT_XLR_IO_BASE);
2141 sc->phy_addr = port_info->phy_addr;
2142
2143 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2144 sc->mii_base, sc->phy_addr);
2145}
2146
2147/* ------------------------------------------------------------------------ */
2148
2149/* Debug dump functions */
2150
2151#ifdef DEBUG
2152
2153static void
2154dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2155{
2156 int val;
2157
2158 val = NLGE_READ(base, offset);
2159 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2160}
2161
2162#define STRINGIFY(x) #x
2163
2164static void
2165dump_na_registers(xlr_reg_t *base_addr, int port_id)
2166{
2167 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2168 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2169 PDEBUG("Tx bucket sizes\n");
2170 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2171 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2172 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2173 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2174 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2175 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2176 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2177 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2178 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2179 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2180 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2181 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2182 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2183 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2184 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2185 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2186 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2187 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2188}
2189
2190static void
2191dump_gmac_registers(struct nlge_softc *sc)
2192{
2193 xlr_reg_t *base_addr = sc->base;
2194 int port_id = sc->instance;
2195
2196 PDEBUG("Register dump for port=%d\n", port_id);
2197 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2198 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2199 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2200 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2201 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2202 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2203 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2204 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2205 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2206 STRINGIFY(R_MII_MGMT_CONFIG));
2207 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2208 STRINGIFY(R_MII_MGMT_COMMAND));
2209 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2210 STRINGIFY(R_MII_MGMT_ADDRESS));
2211 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2212 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2213 dump_reg(base_addr, R_MII_MGMT_STATUS,
2214 STRINGIFY(R_MII_MGMT_STATUS));
2215 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2216 STRINGIFY(R_MII_MGMT_INDICATORS));
2217 dump_reg(base_addr, R_INTERFACE_CONTROL,
2218 STRINGIFY(R_INTERFACE_CONTROL));
2219 dump_reg(base_addr, R_INTERFACE_STATUS,
2220 STRINGIFY(R_INTERFACE_STATUS));
2221 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2222 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2223 STRINGIFY(R_XGMAC_CONFIG_0));
2224 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2225 STRINGIFY(R_XGMAC_CONFIG_1));
2226 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2227 STRINGIFY(R_XGMAC_CONFIG_2));
2228 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2229 STRINGIFY(R_XGMAC_CONFIG_3));
2230 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2231 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2232 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2233 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2234 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2235 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2236 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2237 STRINGIFY(R_XGMAC_REV_LEVEL));
2238 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2239 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2240 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2241 STRINGIFY(R_XGMAC_MIIM_FILED));
2242 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2243 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2244 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2245 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2246 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2247 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2248 }
2249
2250 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2251 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2252 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2253 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2254 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2255 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2256 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2257 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2258 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2259 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2260 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2261 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2262 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2263 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2264 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2265 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2266 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2267 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2268 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2269 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2270 dump_na_registers(base_addr, port_id);
2271}
2272
2273static void
2274dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2275{
2276 struct stn_cc *cc;
2277 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2278 int j, k, r, c;
2279 int n_gmac_buckets;
2280
2281 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2282 for (j = 0; j < 8; j++) { // for each cpu
2283 cc = board->credit_configs[j];
2284 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2285 for (k = 0; k < n_gmac_buckets; k++) {
2286 r = gmac_bucket_ids[k] / 8;
2287 c = gmac_bucket_ids[k] % 8;
2288 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2289 gmac_bucket_ids[k], cc->counters[r][c]);
2290 }
2291 }
2292}
2293
2294static void
2295dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2296{
2297 struct stn_cc *cc;
2298 int j, k;
2299
2300 cc = board->gmac_block[gmac_id].credit_config;
2301 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2302 for (j = 0; j < 8; j++) { // for each cpu
2303 printf(" ---> cpu_%d\n", j);
2304 for (k = 0; k < 8; k++) { // for each bucket in cpu
2305 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2306 cc->counters[j][k]);
2307 }
2308 }
2309}
2310
2311static void
2312dump_board_info(struct xlr_board_info *board)
2313{
2314 struct xlr_gmac_block_t *gm;
2315 int i, k;
2316
2317 printf("cpu=%x ", xlr_revision());
2318 printf("board_version: major=%llx, minor=%llx\n",
2319 xlr_boot1_info.board_major_version,
2320 xlr_boot1_info.board_minor_version);
2321 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2322 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2323 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2324 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2325 printf("FMN: Core-station bucket sizes\n");
2326 for (i = 0; i < 128; i++) {
2327 if (i && ((i % 16) == 0))
2328 printf("\n");
2329 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2330 }
2331 printf("\n");
2332 for (i = 0; i < 3; i++) {
2333 gm = &board->gmac_block[i];
2334 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2335 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2336 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2337 gm->station_txbase, gm->station_rfr);
2338 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2339 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2340 gm->baseinst);
2341 }
2342 for (k = 0; k < 3; k++) { // for each NA
2343 dump_fmn_cpu_credits_for_gmac(board, k);
2344 dump_fmn_gmac_credits(board, k);
2345 }
2346}
2347
2348static void
2349dump_mac_stats(struct nlge_softc *sc)
2350{
2351 xlr_reg_t *addr;
2352 uint32_t pkts_tx, pkts_rx;
2353
2354 addr = sc->base;
2355 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2356 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2357
2358 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2359 pkts_rx);
2360 if (pkts_rx > 0) {
2361 uint32_t r;
2362
2363 /* dump all rx counters. we need this because pkts_rx includes
2364 bad packets. */
2365 for (r = R_RFCS; r <= R_ROVR; r++)
2366 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2367 NLGE_READ(sc->base, r));
2368 }
2369 if (pkts_tx > 0) {
2370 uint32_t r;
2371
2372 /* dump all tx counters. might be useful for debugging. */
2373 for (r = R_TMCA; r <= R_TFRG; r++) {
2374 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2375 continue;
2376 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2377 NLGE_READ(sc->base, r));
2378 }
2379 }
2380
2381}
2382
2383static void
2384dump_mii_regs(struct nlge_softc *sc)
2385{
2386 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2387 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2388 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2389 0x1c, 0x1d, 0x1e};
2390 int i, n_regs;
2391
2392 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2393 return;
2394
2395 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2396 for (i = 0; i < n_regs; i++) {
2397 printf("[mii_0x%x] = %x\n", mii_regs[i],
2398 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2399 mii_regs[i]));
2400 }
2401}
2402
2403static void
2404dump_ifmedia(struct ifmedia *ifm)
2405{
2406 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2407 ifm->ifm_media, ifm->ifm_cur);
2408 if (ifm->ifm_cur != NULL) {
2409 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2410 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2411 ifm->ifm_cur->ifm_data);
2412 }
2413}
2414
2415static void
2416dump_mii_data(struct mii_data *mii)
2417{
2418 dump_ifmedia(&mii->mii_media);
2419 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2420 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2421 mii->mii_media_status, mii->mii_media_active);
2422}
2423
2424static void
2425dump_pcs_regs(struct nlge_softc *sc, int phy)
2426{
2427 int i, val;
2428
2429 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2430 for (i = 0; i < 18; i++) {
2431 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2432 continue;
2433 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2434 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2435 }
2436}
2437#endif