Deleted Added
full compact
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31/*
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
45 */
46
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 212553 2010-09-13 13:11:50Z jchandra $");
48__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/nlge/if_nlge.c 212758 2010-09-16 19:13:55Z jchandra $");
49
50#ifdef HAVE_KERNEL_OPTION_HEADERS
51#include "opt_device_polling.h"
52#endif
53
54#include <sys/endian.h>
55#include <sys/systm.h>
56#include <sys/sockio.h>
57#include <sys/param.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/limits.h>
62#include <sys/bus.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/module.h>
67#include <sys/socket.h>
68#define __RMAN_RESOURCE_VISIBLE
69#include <sys/rman.h>
70#include <sys/taskqueue.h>
71#include <sys/smp.h>
72#include <sys/sysctl.h>
73
74#include <net/if.h>
75#include <net/if_arp.h>
76#include <net/ethernet.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/bpf.h>
80#include <net/if_types.h>
81#include <net/if_vlan_var.h>
82
83#include <netinet/in_systm.h>
84#include <netinet/in.h>
85#include <netinet/ip.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/uma.h>
90
91#include <machine/reg.h>
92#include <machine/cpu.h>
93#include <machine/mips_opcode.h>
94#include <machine/asm.h>
95#include <machine/cpuregs.h>
96#include <machine/param.h>
97#include <machine/intr_machdep.h>
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101
102#include <mips/rmi/interrupt.h>
103#include <mips/rmi/msgring.h>
104#include <mips/rmi/iomap.h>
105#include <mips/rmi/pic.h>
106#include <mips/rmi/board.h>
107#include <mips/rmi/rmi_mips_exts.h>
108#include <mips/rmi/rmi_boot_info.h>
109#include <mips/rmi/dev/xlr/atx_cpld.h>
110#include <mips/rmi/dev/xlr/xgmac_mdio.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114#include "miidevs.h"
115#include <dev/mii/brgphyreg.h>
116#include "miibus_if.h"
117
118#include <mips/rmi/dev/nlge/if_nlge.h>
119
120MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121MODULE_DEPEND(nlge, ether, 1, 1, 1);
122MODULE_DEPEND(nlge, miibus, 1, 1, 1);
123
124/* Network accelarator entry points */
125static int nlna_probe(device_t);
126static int nlna_attach(device_t);
127static int nlna_detach(device_t);
128static int nlna_suspend(device_t);
129static int nlna_resume(device_t);
130static int nlna_shutdown(device_t);
131
132/* GMAC port entry points */
133static int nlge_probe(device_t);
134static int nlge_attach(device_t);
135static int nlge_detach(device_t);
136static int nlge_suspend(device_t);
137static int nlge_resume(device_t);
138static void nlge_init(void *);
139static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
140static void nlge_start(struct ifnet *);
141static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
142
143static int nlge_mii_write(struct device *, int, int, int);
144static int nlge_mii_read(struct device *, int, int);
145static void nlge_mac_mii_statchg(device_t);
146static int nlge_mediachange(struct ifnet *ifp);
147static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
148
149/* Other internal/helper functions */
150static void *get_buf(void);
151
152static void nlna_add_to_port_set(struct nlge_port_set *pset,
153 struct nlge_softc *sc);
154static void nlna_config_pde(struct nlna_softc *);
155static void nlna_config_parser(struct nlna_softc *);
156static void nlna_config_classifier(struct nlna_softc *);
157static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
158static void nlna_config_common(struct nlna_softc *);
159static void nlna_disable_ports(struct nlna_softc *sc);
160static void nlna_enable_intr(struct nlna_softc *sc);
161static void nlna_disable_intr(struct nlna_softc *sc);
162static void nlna_enable_ports(struct nlna_softc *sc);
163static void nlna_get_all_softc(device_t iodi_dev,
164 struct nlna_softc **sc_vec, uint32_t vec_sz);
165static void nlna_hw_init(struct nlna_softc *sc);
166static int nlna_is_last_active_na(struct nlna_softc *sc);
167static void nlna_media_specific_config(struct nlna_softc *sc);
168static void nlna_reset_ports(struct nlna_softc *sc,
169 struct xlr_gmac_block_t *blk);
170static struct nlna_softc *nlna_sc_init(device_t dev,
171 struct xlr_gmac_block_t *blk);
172static void nlna_setup_intr(struct nlna_softc *sc);
173static void nlna_smp_update_pde(void *dummy __unused);
174static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
175 uint32_t n_desc);
176
177static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
178static void nlge_hw_init(struct nlge_softc *sc);
179static int nlge_if_init(struct nlge_softc *sc);
180static void nlge_intr(void *arg);
181static int nlge_irq_init(struct nlge_softc *sc);
182static void nlge_irq_fini(struct nlge_softc *sc);
183static void nlge_media_specific_init(struct nlge_softc *sc);
184static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
185static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
186 int regidx);
187static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
188 int regidx, int regval);
189void nlge_msgring_handler(int bucket, int size, int code,
190 int stid, struct msgrng_msg *msg, void *data);
191static void nlge_port_disable(int id, xlr_reg_t *base, int port_type);
192static void nlge_port_enable(struct nlge_softc *sc);
193static void nlge_read_mac_addr(struct nlge_softc *sc);
194static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
195 struct xlr_gmac_port *port_info);
196static void nlge_set_mac_addr(struct nlge_softc *sc);
197static void nlge_set_port_attribs(struct nlge_softc *,
198 struct xlr_gmac_port *);
199static void nlge_sgmii_init(struct nlge_softc *sc);
200static void nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc);
201
202static int prepare_fmn_message(struct nlge_softc *sc,
203 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
204 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
205
206static void release_tx_desc(vm_paddr_t phy_addr);
207static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
208 uint32_t n_entries);
209
210//#define DEBUG
211#ifdef DEBUG
212static int mac_debug = 1;
213static int reg_dump = 0;
214#undef PDEBUG
215#define PDEBUG(fmt, args...) \
216 do {\
217 if (mac_debug) {\
218 printf("[%s@%d|%s]: cpu_%d: " fmt, \
219 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
220 }\
221 } while(0);
222
223/* Debug/dump functions */
224static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
225static void dump_gmac_registers(struct nlge_softc *);
226static void dump_na_registers(xlr_reg_t *base, int port_id);
227static void dump_mac_stats(struct nlge_softc *sc);
228static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
229static void dump_mii_data(struct mii_data *mii) __attribute__((used));
230static void dump_board_info(struct xlr_board_info *);
231static void dump_pcs_regs(struct nlge_softc *sc, int phy);
232
233#else
234#undef PDEBUG
235#define PDEBUG(fmt, args...)
236#define dump_reg(a, o, n) /* nop */
237#define dump_gmac_registers(a) /* nop */
238#define dump_na_registers(a, p) /* nop */
239#define dump_board_info(b) /* nop */
240#define dump_mac_stats(sc) /* nop */
241#define dump_mii_regs(sc) /* nop */
242#define dump_mii_data(mii) /* nop */
243#define dump_pcs_regs(sc, phy) /* nop */
244#endif
245
246/* Wrappers etc. to export the driver entry points. */
247static device_method_t nlna_methods[] = {
248 /* Device interface */
249 DEVMETHOD(device_probe, nlna_probe),
250 DEVMETHOD(device_attach, nlna_attach),
251 DEVMETHOD(device_detach, nlna_detach),
252 DEVMETHOD(device_shutdown, nlna_shutdown),
253 DEVMETHOD(device_suspend, nlna_suspend),
254 DEVMETHOD(device_resume, nlna_resume),
255
256 /* bus interface : TBD : what are these for ? */
257 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
258 DEVMETHOD(bus_print_child, bus_generic_print_child),
259 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
260
261 { 0, 0 }
262};
263
264static driver_t nlna_driver = {
265 "nlna",
266 nlna_methods,
267 sizeof(struct nlna_softc)
268};
269
270static devclass_t nlna_devclass;
271
272static device_method_t nlge_methods[] = {
273 /* Device interface */
274 DEVMETHOD(device_probe, nlge_probe),
275 DEVMETHOD(device_attach, nlge_attach),
276 DEVMETHOD(device_detach, nlge_detach),
277 DEVMETHOD(device_shutdown, bus_generic_shutdown),
278 DEVMETHOD(device_suspend, nlge_suspend),
279 DEVMETHOD(device_resume, nlge_resume),
280
281 /* MII interface */
282 DEVMETHOD(miibus_readreg, nlge_mii_read),
283 DEVMETHOD(miibus_writereg, nlge_mii_write),
284 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
285
286 {0, 0}
287};
288
289static driver_t nlge_driver = {
290 "nlge",
291 nlge_methods,
292 sizeof(struct nlge_softc)
293};
294
295static devclass_t nlge_devclass;
296
297DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
298DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
299DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
300
301static uma_zone_t nl_tx_desc_zone;
302
303/* Function to atomically increment an integer with the given value. */
304static __inline__ unsigned int
305ldadd_wu(unsigned int value, unsigned long *addr)
303static __inline void
304atomic_incr_long(unsigned long *addr)
305{
307 __asm__ __volatile__( ".set push\n"
308 ".set noreorder\n"
309 "move $8, %2\n"
310 "move $9, %3\n"
311 /* "ldaddwu $8, $9\n" */
312 ".word 0x71280011\n"
313 "move %0, $8\n"
314 ".set pop\n"
315 : "=&r"(value), "+m"(*addr)
316 : "0"(value), "r" ((unsigned long)addr)
317 : "$8", "$9");
318 return value;
319}
306 /* XXX: fix for 64 bit */
307 unsigned int *iaddr = (unsigned int *)addr;
308
321static __inline__ uint32_t
322xlr_enable_kx(void)
323{
324 uint32_t sr = mips_rd_status();
325
326 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
327 return sr;
309 xlr_ldaddwu(1, iaddr);
310}
311
312static int
313nlna_probe(device_t dev)
314{
315 return (BUS_PROBE_DEFAULT);
316}
317
318/*
319 * Add all attached GMAC/XGMAC ports to the device tree. Port
320 * configuration is spread in two regions - common configuration
321 * for all ports in the NA and per-port configuration in MAC-specific
322 * region. This function does the following:
323 * - adds the ports to the device tree
324 * - reset the ports
325 * - do all the common initialization
326 * - invoke bus_generic_attach for per-port configuration
327 * - supply initial free rx descriptors to ports
328 * - initialize s/w data structures
329 * - finally, enable interrupts (only in the last NA).
330 *
331 * For reference, sample address space for common and per-port
332 * registers is given below.
333 *
334 * The address map for RNA0 is: (typical value)
335 *
336 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
337 * | |
338 * | |
339 * | |
340 * | |
341 * | |
342 * | |
343 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
344 * | |
345 * | |
346 * (common) -> |......................................| 0xbef0_c400
347 * | |
348 * | (RGMII/SGMII: common registers) |
349 * | |
350 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
351 * | |
352 * | |
353 * (common) -> |......................................| 0xbef0_d400
354 * | |
355 * | (RGMII/SGMII: common registers) |
356 * | |
357 * |......................................|
358 * and so on ....
359 *
360 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
361 */
362static int
363nlna_attach(device_t dev)
364{
365 struct xlr_gmac_block_t *block_info;
366 device_t gmac_dev;
367 struct nlna_softc *sc;
368 int error;
369 int i;
370 int id;
371
372 id = device_get_unit(dev);
373 block_info = device_get_ivars(dev);
374 if (!block_info->enabled) {
375 return 0;
376 }
377
378#ifdef DEBUG
379 dump_board_info(&xlr_board_info);
380#endif
381 block_info->baseaddr += DEFAULT_XLR_IO_BASE;
382
383 /* Initialize nlna state in softc structure */
384 sc = nlna_sc_init(dev, block_info);
385
386 /* Add device's for the ports controlled by this NA. */
387 if (block_info->type == XLR_GMAC) {
388 KASSERT(id < 2, ("No GMACs supported with this network"
389 "accelerator: %d", id));
390 for (i = 0; i < sc->num_ports; i++) {
391 gmac_dev = device_add_child(dev, "nlge", -1);
392 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
393 }
394 } else if (block_info->type == XLR_XGMAC) {
395 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
396 "network accelerator: %d", id));
397 gmac_dev = device_add_child(dev, "nlge", -1);
398 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
399 } else if (block_info->type == XLR_SPI4) {
400 /* SPI4 is not supported here */
401 device_printf(dev, "Unsupported: NA with SPI4 type");
402 return (ENOTSUP);
403 }
404
405 nlna_reset_ports(sc, block_info);
406
407 /* Initialize Network Accelarator registers. */
408 nlna_hw_init(sc);
409
410 error = bus_generic_attach(dev);
411 if (error) {
412 device_printf(dev, "failed to attach port(s)\n");
413 goto fail;
414 }
415
416 /* Send out the initial pool of free-descriptors for the rx path */
417 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
418
419 /* S/w data structure initializations shared by all NA's. */
420 if (nl_tx_desc_zone == NULL) {
421 /* Create a zone for allocating tx descriptors */
422 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
423 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
424 XLR_CACHELINE_SIZE, 0);
425 }
426
427 /* Enable NA interrupts */
428 nlna_setup_intr(sc);
429
430 return (0);
431
432fail:
433 return (error);
434}
435
436static int
437nlna_detach(device_t dev)
438{
439 struct nlna_softc *sc;
440
441 sc = device_get_softc(dev);
442 if (device_is_alive(dev)) {
443 nlna_disable_intr(sc);
444 /* This will make sure that per-port detach is complete
445 * and all traffic on the ports has been stopped. */
446 bus_generic_detach(dev);
447 uma_zdestroy(nl_tx_desc_zone);
448 }
449
450 return (0);
451}
452
453static int
454nlna_suspend(device_t dev)
455{
456
457 return (0);
458}
459
460static int
461nlna_resume(device_t dev)
462{
463
464 return (0);
465}
466
467static int
468nlna_shutdown(device_t dev)
469{
470 return (0);
471}
472
473
474/* GMAC port entry points */
475static int
476nlge_probe(device_t dev)
477{
478 struct nlge_softc *sc;
479 struct xlr_gmac_port *port_info;
480 int index;
481 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
482 "Unknown"};
483
484 port_info = device_get_ivars(dev);
485 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
486 5 : port_info->type;
487 device_set_desc_copy(dev, desc[index]);
488
489 sc = device_get_softc(dev);
490 nlge_sc_init(sc, dev, port_info);
491
492 nlge_port_disable(sc->id, sc->base, sc->port_type);
493
494 return (0);
495}
496
497static int
498nlge_attach(device_t dev)
499{
500 struct nlge_softc *sc;
501 struct nlna_softc *nsc;
502 int error;
503
504 sc = device_get_softc(dev);
505
506 nlge_if_init(sc);
507 nlge_mii_init(dev, sc);
508 error = nlge_irq_init(sc);
509 if (error)
510 return error;
511 nlge_hw_init(sc);
512
513 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
514 nsc->child_sc[sc->instance] = sc;
515
516 return (0);
517}
518
519static int
520nlge_detach(device_t dev)
521{
522 struct nlge_softc *sc;
523 struct ifnet *ifp;
524
525 sc = device_get_softc(dev);
526 ifp = sc->nlge_if;
527
528 if (device_is_attached(dev)) {
529 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
530 nlge_port_disable(sc->id, sc->base, sc->port_type);
531 nlge_irq_fini(sc);
532 ether_ifdetach(ifp);
533 bus_generic_detach(dev);
534 }
535 if (ifp)
536 if_free(ifp);
537
538 return (0);
539}
540
541static int
542nlge_suspend(device_t dev)
543{
544 return (0);
545}
546
547static int
548nlge_resume(device_t dev)
549{
550 return (0);
551}
552
553static void
554nlge_init(void *addr)
555{
556 struct nlge_softc *sc;
557 struct ifnet *ifp;
558
559 sc = (struct nlge_softc *)addr;
560 ifp = sc->nlge_if;
561
562 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
563 return;
564
565 nlge_gmac_config_speed(sc, 0);
566 ifp->if_drv_flags |= IFF_DRV_RUNNING;
567 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
568 nlge_port_enable(sc);
569
570 if (sc->port_type == XLR_SGMII) {
571 dump_pcs_regs(sc, 27);
572 }
573 dump_gmac_registers(sc);
574 dump_mac_stats(sc);
575}
576
577static int
578nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
579{
580 struct mii_data *mii;
581 struct nlge_softc *sc;
582 struct ifreq *ifr;
583 int error;
584
585 sc = ifp->if_softc;
586 error = 0;
587 ifr = (struct ifreq *)data;
588 switch(command) {
589 case SIOCSIFFLAGS:
590 break;
591 case SIOCSIFMEDIA:
592 case SIOCGIFMEDIA:
593 if (sc->mii_bus != NULL) {
594 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
595 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
596 command);
597 }
598 break;
599 case SIOCSIFADDR:
600 // intentional fall thru
601 case SIOCSIFMTU:
602 default:
603 error = ether_ioctl(ifp, command, data);
604 break;
605 }
606
607 return (error);
608}
609
610/* This function is called from an interrupt handler */
611void
612nlge_msgring_handler(int bucket, int size, int code, int stid,
613 struct msgrng_msg *msg, void *data)
614{
615 struct nlna_softc *na_sc;
616 struct nlge_softc *sc;
617 struct ifnet *ifp;
618 vm_paddr_t phys_addr;
619 unsigned long addr;
620 uint32_t length;
621 int ctrl;
622 int cpu;
623 int tx_error;
624 int port;
625 int vcpu;
626 int is_p2p;
627
628 cpu = xlr_core_id();
629 vcpu = (cpu << 2) + xlr_thr_id();
630
631 addr = 0;
632 is_p2p = 0;
633 tx_error = 0;
634 length = (msg->msg0 >> 40) & 0x3fff;
635 na_sc = (struct nlna_softc *)data;
636 if (length == 0) {
637 ctrl = CTRL_REG_FREE;
638 phys_addr = msg->msg0 & 0xffffffffffULL;
639 port = (msg->msg0 >> 54) & 0x0f;
640 is_p2p = (msg->msg0 >> 62) & 0x1;
641 tx_error = (msg->msg0 >> 58) & 0xf;
642 } else {
643 ctrl = CTRL_SNGL;
644 phys_addr = msg->msg0 & 0xffffffffe0ULL;
645 length = length - BYTE_OFFSET - MAC_CRC_LEN;
646 port = msg->msg0 & 0x0f;
647 }
648
649 sc = na_sc->child_sc[port];
650 if (sc == NULL) {
651 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
652 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
653 "Freeback for tx packet"));
654 return;
655 }
656
657 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
658 if (is_p2p) {
659 release_tx_desc(phys_addr);
660 } else {
661 m_freem((struct mbuf *)(uintptr_t)phys_addr);
662 }
663
664 ifp = sc->nlge_if;
665 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
666 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
667 }
686 ldadd_wu(1, (tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
668 atomic_incr_long((tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
669 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
670 /* Rx Packet */
671
672 nlge_rx(sc, phys_addr, length);
673 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
674 } else {
675 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
676 }
677
678}
679
680static void
681nlge_start(struct ifnet *ifp)
682{
683 struct nlge_softc *sc;
684
685 sc = ifp->if_softc;
686 //NLGE_LOCK(sc);
687 nlge_start_locked(ifp, sc);
688 //NLGE_UNLOCK(sc);
689}
690
691static void
692nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc)
693{
694 struct msgrng_msg msg;
695 struct mbuf *m;
696 struct nlge_tx_desc *tx_desc;
697 uint64_t fr_stid;
698 uint32_t cpu;
699 uint32_t n_entries;
700 uint32_t tid;
701 int ret;
702 int sent;
703
704 cpu = xlr_core_id();
705 tid = xlr_thr_id();
706 fr_stid = cpu * 8 + tid + 4;
707
708 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
709 return;
710 }
711
712 do {
713 /* Grab a packet off the queue. */
714 IF_DEQUEUE(&ifp->if_snd, m);
715 if (m == NULL) {
716 return;
717 }
718
719 tx_desc = NULL;
720 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
721 if (ret) {
722 goto fail;
723 }
724 sent = send_fmn_msg_tx(sc, &msg, n_entries);
725 if (sent != 0) {
726 goto fail;
727 }
728 } while(1);
729
730 return;
731
732fail:
733 if (tx_desc != NULL) {
734 uma_zfree(nl_tx_desc_zone, tx_desc);
735 }
736 if (m != NULL) {
737 /*
738 * TBD: It is observed that only when both of the statements
739 * below are not enabled, traffic continues till the end.
740 * Otherwise, the port locks up in the middle and never
741 * recovers from it. The current theory for this behavior
742 * is that the queue is full and the upper layer is neither
743 * able to add to it not invoke nlge_start to drian the
744 * queue. The driver may have to do something in addition
745 * to reset'ing the OACTIVE bit when a trasnmit free-back
746 * is received.
747 */
748 //ifp->if_drv_flags |= IFF_DRV_OACTIVE;
749 //IF_PREPEND(&ifp->if_snd, m);
750 m_freem(m);
769 ldadd_wu(1, &ifp->if_iqdrops);
751 atomic_incr_long(&ifp->if_iqdrops);
752 }
753 return;
754}
755
756static void
757nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
758{
777 struct ifnet *ifp;
778 struct mbuf *m;
779 uint32_t tm, mag, sr;
759 struct ifnet *ifp;
760 struct mbuf *m;
761 uint64_t tm, mag;
762 uint32_t sr;
763
764 sr = xlr_enable_kx();
782 tm = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE);
783 mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
784 mips_wr_status(sr);
765 tm = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
766 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
767 xlr_restore_kx(sr);
768
769 m = (struct mbuf *)(intptr_t)tm;
770 if (mag != 0xf00bad) {
771 /* somebody else's packet. Error - FIXME in intialization */
772 printf("cpu %d: *ERROR* Not my packet paddr %llx\n",
773 xlr_core_id(), (uint64_t) paddr);
774 return;
775 }
776
777 ifp = sc->nlge_if;
778 /* align the data */
779 m->m_data += BYTE_OFFSET;
780 m->m_pkthdr.len = m->m_len = len;
781 m->m_pkthdr.rcvif = ifp;
782
800 ldadd_wu(1, &ifp->if_ipackets);
783 atomic_incr_long(&ifp->if_ipackets);
784 (*ifp->if_input)(ifp, m);
785}
786
787static int
788nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
789{
790 struct nlge_softc *sc;
791
792 sc = device_get_softc(dev);
793 if (sc->phy_addr == phyaddr && sc->port_type != XLR_XGMII)
794 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
795
796 return (0);
797}
798
799static int
800nlge_mii_read(struct device *dev, int phyaddr, int regidx)
801{
802 struct nlge_softc *sc;
803 int val;
804
805 sc = device_get_softc(dev);
806 val = (sc->phy_addr != phyaddr && sc->port_type != XLR_XGMII) ? (0xffff) :
807 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
808
809 return (val);
810}
811
812static void
813nlge_mac_mii_statchg(device_t dev)
814{
815}
816
817static int
818nlge_mediachange(struct ifnet *ifp)
819{
820 return 0;
821}
822
823static void
824nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
825{
826 struct nlge_softc *sc;
827 struct mii_data *md;
828
829 md = NULL;
830 sc = ifp->if_softc;
831 if (sc->mii_bus)
832 md = device_get_softc(sc->mii_bus);
833
834 ifmr->ifm_status = IFM_AVALID;
835 ifmr->ifm_active = IFM_ETHER;
836
837 if (sc->link == xlr_mac_link_down)
838 return;
839
840 if (md != NULL)
841 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
842 ifmr->ifm_status |= IFM_ACTIVE;
843}
844
845static struct nlna_softc *
846nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
847{
848 struct nlna_softc *sc;
849
850 sc = device_get_softc(dev);
851 memset(sc, 0, sizeof(*sc));
852 sc->nlna_dev = dev;
853 sc->base = (xlr_reg_t *) blk->baseaddr;
854 sc->rfrbucket = blk->station_rfr;
855 sc->station_id = blk->station_id;
856 sc->na_type = blk->type;
857 sc->mac_type = blk->mode;
858 sc->num_ports = blk->num_ports;
859
860 sc->mdio_set.port_vec = sc->mdio_sc;
861 sc->mdio_set.vec_sz = XLR_MAX_MACS;
862
863 return (sc);
864}
865
866/*
867 * Do:
868 * - Initialize common GMAC registers (index range 0x100-0x3ff).
869 */
870static void
871nlna_hw_init(struct nlna_softc *sc)
872{
873
874 /*
875 * It is seen that this is a critical function in bringing up FreeBSD.
876 * When it is not invoked, FreeBSD panics and fails during the
877 * multi-processor init (SI_SUB_SMP of * mi_startup). The key function
878 * in this sequence seems to be platform_prep_smp_launch. */
879 if (register_msgring_handler(sc->station_id, nlge_msgring_handler, sc)) {
880 panic("Couldn't register msgring handler\n");
881 }
882 nlna_config_fifo_spill_area(sc);
883 nlna_config_pde(sc);
884 nlna_config_common(sc);
885 nlna_config_parser(sc);
886 nlna_config_classifier(sc);
887}
888
889/*
890 * Enable interrupts on all the ports controlled by this NA. For now, we
891 * only care about the MII interrupt and this has to be enabled only
892 * on the port id0.
893 *
894 * This function is not in-sync with the regular way of doing things - it
895 * executes only in the context of the last active network accelerator (and
896 * thereby has some ugly accesses in the device tree). Though inelegant, it
897 * is necessary to do it this way as the per-port interrupts can be
898 * setup/enabled only after all the network accelerators have been
899 * initialized.
900 */
901static void
902nlna_setup_intr(struct nlna_softc *sc)
903{
904 struct nlna_softc *na_sc[XLR_MAX_NLNA];
905 struct nlge_port_set *pset;
906 struct xlr_gmac_port *port_info;
907 device_t iodi_dev;
908 int i, j;
909
910 if (!nlna_is_last_active_na(sc))
911 return ;
912
913 /* Collect all nlna softc pointers */
914 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
915 iodi_dev = device_get_parent(sc->nlna_dev);
916 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
917
918 /* Setup the MDIO interrupt lists. */
919 /*
920 * MDIO interrupts are coarse - a single interrupt line provides
921 * information about one of many possible ports. To figure out the
922 * exact port on which action is to be taken, all of the ports
923 * linked to an MDIO interrupt should be read. To enable this,
924 * ports need to add themselves to port sets.
925 */
926 for (i = 0; i < XLR_MAX_NLNA; i++) {
927 if (na_sc[i] == NULL)
928 continue;
929 for (j = 0; j < na_sc[i]->num_ports; j++) {
930 /* processing j-th port on i-th NA */
931 port_info = device_get_ivars(
932 na_sc[i]->child_sc[j]->nlge_dev);
933 pset = &na_sc[port_info->mdint_id]->mdio_set;
934 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
935 }
936 }
937
938 /* Enable interrupts */
939 for (i = 0; i < XLR_MAX_NLNA; i++) {
940 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
941 nlna_enable_intr(na_sc[i]);
942 }
943 }
944}
945
946static void
947nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
948{
949 int i;
950
951 /* step past the non-NULL elements */
952 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
953 if (i < pset->vec_sz)
954 pset->port_vec[i] = sc;
955 else
956 printf("warning: internal error: out-of-bounds for MDIO array");
957}
958
959static void
960nlna_enable_intr(struct nlna_softc *sc)
961{
962 int i;
963
964 for (i = 0; i < sc->num_ports; i++) {
965 if (sc->child_sc[i]->instance == 0)
966 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
967 (1 << O_INTMASK__MDInt));
968 }
969}
970
971static void
972nlna_disable_intr(struct nlna_softc *sc)
973{
974 int i;
975
976 for (i = 0; i < sc->num_ports; i++) {
977 if (sc->child_sc[i]->instance == 0)
978 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
979 }
980}
981
982static int
983nlna_is_last_active_na(struct nlna_softc *sc)
984{
985 int id;
986
987 id = device_get_unit(sc->nlna_dev);
988 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
989}
990
991static void
992nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
993{
994 struct msgrng_msg msg;
995 void *ptr;
996 uint32_t msgrng_flags;
997 int i, n, stid, ret, code;
998
999 if (n_desc > 1) {
1000 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1001 sc->rfrbucket);
1002 }
1003
1004 stid = sc->rfrbucket;
1005 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1006 memset(&msg, 0, sizeof(msg));
1007
1008 for (i = 0; i < n_desc; i++) {
1009 ptr = get_buf();
1010 if (!ptr) {
1011 ret = -ENOMEM;
1012 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1013 break;
1014 }
1015
1016 /* Send the free Rx desc to the MAC */
1017 msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL;
1018 n = 0;
1019 do {
1020 msgrng_flags = msgrng_access_enable();
1021 ret = message_send(1, code, stid, &msg);
1022 msgrng_restore(msgrng_flags);
1023 KASSERT(n++ < 100000, ("Too many credit fails\n"));
1024 } while (ret != 0);
1025 }
1026}
1027
1028static __inline__ void *
1029nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1030 int reg_size, int size)
1031{
1032 void *spill;
1033 uint64_t phys_addr;
1034 uint32_t spill_size;
1035
1036 spill_size = size;
1037 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1038 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1039 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1040 panic("Unable to allocate memory for spill area!\n");
1041 }
1042 phys_addr = vtophys(spill);
1043 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1044 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1045 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1046 NLGE_WRITE(base, reg_size, spill_size);
1047
1048 return (spill);
1049}
1050
1051/*
1052 * Configure the 6 FIFO's that are used by the network accelarator to
1053 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1054 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1055 * the NA with free descriptors.
1056 */
1057static void
1058nlna_config_fifo_spill_area(struct nlna_softc *sc)
1059{
1060 sc->frin_spill = nlna_config_spill(sc->base,
1061 R_REG_FRIN_SPILL_MEM_START_0,
1062 R_REG_FRIN_SPILL_MEM_START_1,
1063 R_REG_FRIN_SPILL_MEM_SIZE,
1064 MAX_FRIN_SPILL *
1065 sizeof(struct fr_desc));
1066 sc->frout_spill = nlna_config_spill(sc->base,
1067 R_FROUT_SPILL_MEM_START_0,
1068 R_FROUT_SPILL_MEM_START_1,
1069 R_FROUT_SPILL_MEM_SIZE,
1070 MAX_FROUT_SPILL *
1071 sizeof(struct fr_desc));
1072 sc->class_0_spill = nlna_config_spill(sc->base,
1073 R_CLASS0_SPILL_MEM_START_0,
1074 R_CLASS0_SPILL_MEM_START_1,
1075 R_CLASS0_SPILL_MEM_SIZE,
1076 MAX_CLASS_0_SPILL *
1077 sizeof(union rx_tx_desc));
1078 sc->class_1_spill = nlna_config_spill(sc->base,
1079 R_CLASS1_SPILL_MEM_START_0,
1080 R_CLASS1_SPILL_MEM_START_1,
1081 R_CLASS1_SPILL_MEM_SIZE,
1082 MAX_CLASS_1_SPILL *
1083 sizeof(union rx_tx_desc));
1084 sc->class_2_spill = nlna_config_spill(sc->base,
1085 R_CLASS2_SPILL_MEM_START_0,
1086 R_CLASS2_SPILL_MEM_START_1,
1087 R_CLASS2_SPILL_MEM_SIZE,
1088 MAX_CLASS_2_SPILL *
1089 sizeof(union rx_tx_desc));
1090 sc->class_3_spill = nlna_config_spill(sc->base,
1091 R_CLASS3_SPILL_MEM_START_0,
1092 R_CLASS3_SPILL_MEM_START_1,
1093 R_CLASS3_SPILL_MEM_SIZE,
1094 MAX_CLASS_3_SPILL *
1095 sizeof(union rx_tx_desc));
1096}
1097
1098/* Set the CPU buckets that receive packets from the NA class FIFOs. */
1099static void
1100nlna_config_pde(struct nlna_softc *sc)
1101{
1102 uint64_t bucket_map;
1103 uint32_t cpumask;
1104 int i, cpu, bucket;
1105
1106 cpumask = 0x1;
1107#ifdef SMP
1108 /*
1109 * rge may be called before SMP start in a BOOTP/NFSROOT
1110 * setup. we will distribute packets to other cpus only when
1111 * the SMP is started.
1112 */
1113 if (smp_started)
1114 cpumask = xlr_hw_thread_mask;
1115#endif
1116
1117 bucket_map = 0;
1118 for (i = 0; i < 32; i++) {
1119 if (cpumask & (1 << i)) {
1120 cpu = i;
1121 bucket = ((cpu >> 2) << 3);
1122 bucket_map |= (1ULL << bucket);
1123 }
1124 }
1125 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1126 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1127
1128 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1129 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1130
1131 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1132 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1133
1134 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1135 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1136}
1137
1138static void
1139nlna_smp_update_pde(void *dummy __unused)
1140{
1141 device_t iodi_dev;
1142 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1143 int i;
1144
1145 printf("Updating packet distribution for SMP\n");
1146
1147 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1148 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1149
1150 for (i = 0; i < XLR_MAX_NLNA; i++) {
1151 if (na_sc[i] == NULL)
1152 continue;
1153 nlna_disable_ports(na_sc[i]);
1154 nlna_config_pde(na_sc[i]);
1155 nlna_enable_ports(na_sc[i]);
1156 }
1157}
1158
1159SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1160 NULL);
1161
1162static void
1163nlna_config_parser(struct nlna_softc *sc)
1164{
1165 /*
1166 * Mark it as no classification. The parser extract is gauranteed to
1167 * be zero with no classfication
1168 */
1169 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x00);
1170 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1171
1172 /* configure the parser : L2 Type is configured in the bootloader */
1173 /* extract IP: src, dest protocol */
1174 NLGE_WRITE(sc->base, R_L3CTABLE,
1175 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1176 (0x0800 << 0));
1177 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1178 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1179}
1180
1181static void
1182nlna_config_classifier(struct nlna_softc *sc)
1183{
1184 int i;
1185
1186 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1187 /* xgmac translation table doesn't have sane values on reset */
1188 for (i = 0; i < 64; i++)
1189 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1190
1191 /*
1192 * use upper 7 bits of the parser extract to index the
1193 * translate table
1194 */
1195 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1196 }
1197}
1198
1199/*
1200 * Complete a bunch of h/w register initializations that are common for all the
1201 * ports controlled by a NA.
1202 */
1203static void
1204nlna_config_common(struct nlna_softc *sc)
1205{
1206 struct xlr_gmac_block_t *block_info;
1207 struct stn_cc *gmac_cc_config;
1208 int i, id;
1209
1210 block_info = device_get_ivars(sc->nlna_dev);
1211
1212 id = device_get_unit(sc->nlna_dev);
1213 gmac_cc_config = block_info->credit_config;
1214 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1215 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1216 gmac_cc_config->counters[i >> 3][i & 0x07]);
1217 }
1218
1219 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1220
1221 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1222 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1223 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1224 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1225 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1226
1227 nlna_media_specific_config(sc);
1228}
1229
1230static void
1231nlna_media_specific_config(struct nlna_softc *sc)
1232{
1233 struct bucket_size *bucket_sizes;
1234
1235 bucket_sizes = xlr_board_info.bucket_sizes;
1236 switch (sc->mac_type) {
1237 case XLR_RGMII:
1238 case XLR_SGMII:
1239 case XLR_XAUI:
1240 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1241 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1242 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1243 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1244 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1245 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1246 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1247 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1248
1249 if (sc->mac_type == XLR_XAUI) {
1250 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1251 }
1252 break;
1253
1254 case XLR_XGMII:
1255 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1256 bucket_sizes->bucket[sc->rfrbucket]);
1257
1258 default:
1259 break;
1260 }
1261}
1262
1263static void
1264nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1265{
1266 xlr_reg_t *addr;
1267 int i;
1268 uint32_t rx_ctrl;
1269
1270 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1271
1272 for (i = 0; i < sc->num_ports; i++) {
1273 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1274
1275 base += blk->gmac_port[i].base_addr;
1276 addr = (xlr_reg_t *) base;
1277
1278 /* 1. Reset RxEnable in MAC_CONFIG */
1279 switch (sc->mac_type) {
1280 case XLR_RGMII:
1281 case XLR_SGMII:
1282 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1283 (1 << O_MAC_CONFIG_1__rxen));
1284 break;
1285 case XLR_XAUI:
1286 case XLR_XGMII:
1287 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1288 (1 << O_RX_CONTROL__RxEnable));
1289 break;
1290 default:
1291 printf("Error: Unsupported port_type=%d\n",
1292 sc->mac_type);
1293 }
1294
1295 /* 1.1 Wait for RxControl.RxHalt to be set */
1296 do {
1297 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1298 } while (!(rx_ctrl & 0x2));
1299
1300 /* 2. Set the soft reset bit in RxControl */
1301 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1302 (1 << O_RX_CONTROL__SoftReset));
1303
1304 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1305 do {
1306 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1307 } while (!(rx_ctrl & 0x8));
1308
1309 /* 3. Clear the soft reset bit in RxControl */
1310 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1311 (1 << O_RX_CONTROL__SoftReset));
1312
1313 /* Turn off tx/rx on the port. */
1314 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1315 (1 << O_RX_CONTROL__RxEnable));
1316 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1317 (1 << O_TX_CONTROL__TxEnable));
1318 }
1319}
1320
1321static void
1322nlna_disable_ports(struct nlna_softc *sc)
1323{
1324 struct xlr_gmac_block_t *blk;
1325 xlr_reg_t *addr;
1326 int i;
1327
1328 blk = device_get_ivars(sc->nlna_dev);
1329 for (i = 0; i < sc->num_ports; i++) {
1330 uint32_t base = (uint32_t)DEFAULT_XLR_IO_BASE;
1331
1332 base += blk->gmac_port[i].base_addr;
1333 addr = (xlr_reg_t *) base;
1334 nlge_port_disable(i, addr, blk->gmac_port[i].type);
1335 }
1336}
1337
1338static void
1339nlna_enable_ports(struct nlna_softc *sc)
1340{
1341 device_t nlge_dev, *devlist;
1342 struct nlge_softc *port_sc;
1343 int i, numdevs;
1344
1345 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1346 for (i = 0; i < numdevs; i++) {
1347 nlge_dev = devlist[i];
1348 if (nlge_dev == NULL)
1349 continue;
1350 port_sc = device_get_softc(nlge_dev);
1351 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1352 nlge_port_enable(port_sc);
1353 }
1354 free(devlist, M_TEMP);
1355}
1356
1357static void
1358nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1359 uint32_t vec_sz)
1360{
1361 device_t na_dev;
1362 int i;
1363
1364 for (i = 0; i < vec_sz; i++) {
1365 sc_vec[i] = NULL;
1366 na_dev = device_find_child(iodi_dev, "nlna", i);
1367 if (na_dev != NULL)
1368 sc_vec[i] = device_get_softc(na_dev);
1369 }
1370}
1371
1372static void
1373nlge_port_disable(int id, xlr_reg_t *base, int port_type)
1374{
1375 uint32_t rd;
1376
1377 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1378 do {
1379 rd = NLGE_READ(base, R_RX_CONTROL);
1380 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1381
1382 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1383 do {
1384 rd = NLGE_READ(base, R_TX_CONTROL);
1385 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1386
1387 switch (port_type) {
1388 case XLR_RGMII:
1389 case XLR_SGMII:
1390 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1391 ((1 << O_MAC_CONFIG_1__rxen) |
1392 (1 << O_MAC_CONFIG_1__txen)));
1393 break;
1394 case XLR_XGMII:
1395 case XLR_XAUI:
1396 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1397 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1398 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1399 break;
1400 default:
1401 panic("Unknown MAC type on port %d\n", id);
1402 }
1403}
1404
1405static void
1406nlge_port_enable(struct nlge_softc *sc)
1407{
1408 struct xlr_gmac_port *self;
1409 xlr_reg_t *base;
1410
1411 base = sc->base;
1412 self = device_get_ivars(sc->nlge_dev);
1413 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1414 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1415 (1 << O_RX_CONTROL__RGMII));
1416
1417 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1418 (1 << O_RX_CONTROL__RxEnable));
1419 NLGE_UPDATE(base, R_TX_CONTROL,
1420 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1421 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1422 switch (sc->port_type) {
1423 case XLR_RGMII:
1424 case XLR_SGMII:
1425 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1426 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1427 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1428 break;
1429 case XLR_XGMII:
1430 case XLR_XAUI:
1431 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1432 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1433 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1434 break;
1435 default:
1436 panic("Unknown MAC type on port %d\n", sc->id);
1437 }
1438}
1439
1440static void
1441nlge_sgmii_init(struct nlge_softc *sc)
1442{
1443 xlr_reg_t *mmio_gpio;
1444 int i;
1445 int phy;
1446
1447 if (sc->port_type != XLR_SGMII)
1448 return;
1449
1450 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1451 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1452 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1453 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1454 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1455 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1456 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1457 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1458 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1459 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1460 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1461
1462 for(i=0;i<10000000;i++){} /* delay */
1463 /* program GPIO values for serdes init parameters */
1464 mmio_gpio = (xlr_reg_t *) (DEFAULT_XLR_IO_BASE + XLR_IO_GPIO_OFFSET);
1465 mmio_gpio[0x20] = 0x7e6802;
1466 mmio_gpio[0x10] = 0x7104;
1467 for(i=0;i<100000000;i++){}
1468
1469 /* enable autoneg - more magic */
1470 phy = sc->phy_addr % 4 + 27;
1471 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1472 DELAY(100000);
1473 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1474 DELAY(100000);
1475}
1476
1477static void
1478nlge_intr(void *arg)
1479{
1480 struct nlge_port_set *pset;
1481 struct nlge_softc *sc;
1482 struct nlge_softc *port_sc;
1483 xlr_reg_t *base;
1484 uint32_t intreg;
1485 uint32_t intr_status;
1486 int i;
1487
1488 sc = arg;
1489 if (sc == NULL) {
1490 printf("warning: No port registered for interrupt\n");
1491 return;
1492 }
1493 base = sc->base;
1494
1495 intreg = NLGE_READ(base, R_INTREG);
1496 if (intreg & (1 << O_INTREG__MDInt)) {
1497 pset = sc->mdio_pset;
1498 if (pset == NULL) {
1499 printf("warning: No ports for MDIO interrupt\n");
1500 return;
1501 }
1502 for (i = 0; i < pset->vec_sz; i++) {
1503 port_sc = pset->port_vec[i];
1504
1505 if (port_sc == NULL)
1506 continue;
1507
1508 /* Ack phy interrupt - clear on read*/
1509 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1510 port_sc->phy_addr, 26);
1511 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1512 intr_status);
1513
1514 if (!(intr_status & 0x8000)) {
1515 /* no interrupt for this port */
1516 continue;
1517 }
1518
1519 if (intr_status & 0x2410) {
1520 /* update link status for port */
1521 nlge_gmac_config_speed(port_sc, 0);
1522 } else {
1523 printf("%s: Unsupported phy interrupt"
1524 " (0x%08x)\n",
1525 device_get_nameunit(port_sc->nlge_dev),
1526 intr_status);
1527 }
1528 }
1529 }
1530
1531 /* Clear the NA interrupt */
1532 xlr_write_reg(base, R_INTREG, 0xffffffff);
1533
1534 return;
1535}
1536
1537static int
1538nlge_irq_init(struct nlge_softc *sc)
1539{
1540 struct resource irq_res;
1541 struct nlna_softc *na_sc;
1542 struct xlr_gmac_block_t *block_info;
1543 device_t na_dev;
1544 int ret;
1545 int irq_num;
1546
1547 na_dev = device_get_parent(sc->nlge_dev);
1548 block_info = device_get_ivars(na_dev);
1549
1550 irq_num = block_info->baseirq + sc->instance;
1551 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1552 ret = bus_setup_intr(sc->nlge_dev, &irq_res, (INTR_FAST |
1553 INTR_TYPE_NET | INTR_MPSAFE), NULL, nlge_intr, sc, NULL);
1554 if (ret) {
1555 nlge_detach(sc->nlge_dev);
1556 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1557 ret);
1558 return (ENXIO);
1559 }
1560 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1561 device_get_nameunit(sc->nlge_dev), irq_num);
1562
1563 if (sc->instance == 0) {
1564 na_sc = device_get_softc(na_dev);
1565 sc->mdio_pset = &na_sc->mdio_set;
1566 }
1567 return (0);
1568}
1569
1570static void
1571nlge_irq_fini(struct nlge_softc *sc)
1572{
1573}
1574
1575static void
1576nlge_hw_init(struct nlge_softc *sc)
1577{
1578 struct xlr_gmac_port *port_info;
1579 xlr_reg_t *base;
1580
1581 base = sc->base;
1582 port_info = device_get_ivars(sc->nlge_dev);
1583 sc->tx_bucket_id = port_info->tx_bucket_id;
1584
1585 /* each packet buffer is 1536 bytes */
1586 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1587 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1588 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1589 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1590 (1 << O_STATCTRL__ClrCnt)));
1591 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1592 NLGE_WRITE(base, R_INTMASK, 0);
1593 nlge_set_mac_addr(sc);
1594 nlge_media_specific_init(sc);
1595}
1596
1597static void
1598nlge_sc_init(struct nlge_softc *sc, device_t dev,
1599 struct xlr_gmac_port *port_info)
1600{
1601 memset(sc, 0, sizeof(*sc));
1602 sc->nlge_dev = dev;
1603 sc->id = device_get_unit(dev);
1604 nlge_set_port_attribs(sc, port_info);
1605}
1606
1607static void
1608nlge_media_specific_init(struct nlge_softc *sc)
1609{
1610 struct mii_data *media;
1611 struct bucket_size *bucket_sizes;
1612
1613 bucket_sizes = xlr_board_info.bucket_sizes;
1614 switch (sc->port_type) {
1615 case XLR_RGMII:
1616 case XLR_SGMII:
1617 case XLR_XAUI:
1618 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1619 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1620 (W_DESC_PACK_CTRL__ByteOffset <<
1621 O_DESC_PACK_CTRL__ByteOffset));
1622 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1623 bucket_sizes->bucket[sc->tx_bucket_id]);
1624 if (sc->port_type != XLR_XAUI) {
1625 nlge_gmac_config_speed(sc, 1);
1626 if (sc->mii_bus) {
1627 media = (struct mii_data *)device_get_softc(
1628 sc->mii_bus);
1629 }
1630 }
1631 break;
1632
1633 case XLR_XGMII:
1634 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1635 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1636 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1637 bucket_sizes->bucket[sc->tx_bucket_id]);
1638 break;
1639 default:
1640 break;
1641 }
1642}
1643
1644/*
1645 * Read the MAC address from the XLR boot registers. All port addresses
1646 * are identical except for the lowest octet.
1647 */
1648static void
1649nlge_read_mac_addr(struct nlge_softc *sc)
1650{
1651 int i, j;
1652
1653 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1654 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1655
1656 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1657}
1658
1659/*
1660 * Write the MAC address to the XLR MAC port. Also, set the address
1661 * masks and MAC filter configuration.
1662 */
1663static void
1664nlge_set_mac_addr(struct nlge_softc *sc)
1665{
1666 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1667 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1668 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1669 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1670 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1671
1672 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1673 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1674 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1675 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1676
1677 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1678 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1679 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1680 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1681
1682 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1683 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1684 }
1685}
1686
1687static int
1688nlge_if_init(struct nlge_softc *sc)
1689{
1690 struct ifnet *ifp;
1691 device_t dev;
1692 int error;
1693
1694 error = 0;
1695 dev = sc->nlge_dev;
1696 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1697
1698 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1699 if (ifp == NULL) {
1700 device_printf(dev, "can not if_alloc()\n");
1701 error = ENOSPC;
1702 goto fail;
1703 }
1704 ifp->if_softc = sc;
1705 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1706 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1707 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1708 ifp->if_capenable = ifp->if_capabilities;
1709 ifp->if_ioctl = nlge_ioctl;
1710 ifp->if_start = nlge_start;
1711 ifp->if_init = nlge_init;
1712 ifp->if_hwassist = 0;
1713 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1714 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1715 IFQ_SET_READY(&ifp->if_snd);
1716
1717 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1718 nlge_mediastatus);
1719 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1720 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1721 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1722 nlge_read_mac_addr(sc);
1723
1724 ether_ifattach(ifp, sc->dev_addr);
1725
1726fail:
1727 return (error);
1728}
1729
1730static void
1731nlge_mii_init(device_t dev, struct nlge_softc *sc)
1732{
1733 int error;
1734
1735 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1736 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1737 }
1738 error = mii_phy_probe(dev, &sc->mii_bus, nlge_mediachange, nlge_mediastatus);
1739 if (error) {
1740 device_printf(dev, "no PHY device found\n");
1741 sc->mii_bus = NULL;
1742 }
1743 if (sc->mii_bus != NULL) {
1744 /*
1745 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1746 * set about every 1 sec in GigE mode, ignore it for now...
1747 */
1748 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1749 0xfffffffe);
1750 }
1751}
1752
1753/*
1754 * Read a PHY register.
1755 *
1756 * Input parameters:
1757 * mii_base - Base address of MII
1758 * phyaddr - PHY's address
1759 * regidx = index of register to read
1760 *
1761 * Return value:
1762 * value read, or 0 if an error occurred.
1763 */
1764
1765static int
1766nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1767{
1768 int i, val;
1769
1770 /* setup the phy reg to be used */
1771 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1772 (phyaddr << 8) | (regidx << 0));
1773 /* Issue the read command */
1774 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1775 (1 << O_MII_MGMT_COMMAND__rstat));
1776
1777 /* poll for the read cycle to complete */
1778 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1779 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1780 break;
1781 }
1782
1783 /* clear the read cycle */
1784 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1785
1786 if (i == PHY_STATUS_RETRIES) {
1787 return (0xffffffff);
1788 }
1789
1790 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1791
1792 return (val);
1793}
1794
1795/*
1796 * Write a value to a PHY register.
1797 *
1798 * Input parameters:
1799 * mii_base - Base address of MII
1800 * phyaddr - PHY to use
1801 * regidx - register within the PHY
1802 * regval - data to write to register
1803 *
1804 * Return value:
1805 * nothing
1806 */
1807static void
1808nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1809 int regval)
1810{
1811 int i;
1812
1813 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1814 (phyaddr << 8) | (regidx << 0));
1815
1816 /* Write the data which starts the write cycle */
1817 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1818
1819 /* poll for the write cycle to complete */
1820 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1821 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1822 break;
1823 }
1824}
1825
1826/*
1827 * Function to optimize the use of p2d descriptors for the given PDU.
1828 * As it is on the fast-path (called during packet transmission), it
1829 * described in more detail than the initialization functions.
1830 *
1831 * Input: mbuf chain (MC), pointer to fmn message
1832 * Input constraints: None
1833 * Output: FMN message to transmit the data in MC
1834 * Return values: 0 - success
1835 * 1 - MC cannot be handled (see Limitations below)
1836 * 2 - MC cannot be handled presently (maybe worth re-trying)
1837 * Other output: Number of entries filled in the FMN message
1838 *
1839 * Output structure/constraints:
1840 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
1841 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
1842 * 3. Each p2d points to physically contiguous chunk of data (subject to
1843 * entire MC requiring max 17 p2d's).
1844 * Limitations:
1845 * 1. MC's that require more than 17 p2d's are not handled.
1846 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
1847 * the p2p structure. Small packets (which typically give low
1848 * performance) are expected to have a small MC that takes
1849 * advantage of this.
1850 */
1851static int
1852prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
1853 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
1854 struct nlge_tx_desc **tx_desc)
1855{
1856 struct mbuf *m;
1857 struct nlge_tx_desc *p2p;
1858 uint64_t *cur_p2d;
1859 vm_offset_t buf;
1860 vm_paddr_t paddr;
1861 int msg_sz, p2p_sz, is_p2p;
1862 int len, frag_sz;
1863 /* Num entries per FMN msg is 4 for XLR/XLS */
1864 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
1865
1866 msg_sz = p2p_sz = is_p2p = 0;
1867 p2p = NULL;
1868 cur_p2d = &fmn_msg->msg0;
1869
1870 for (m = mbuf_chain; m != NULL; m = m->m_next) {
1871 buf = (vm_offset_t) m->m_data;
1872 len = m->m_len;
1873
1874 while (len) {
1875 if (msg_sz == (FMN_SZ - 1)) {
1876 p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
1877 if (p2p == NULL) {
1878 return 2;
1879 }
1880 /*
1898 * As we currently use xlr_paddr_lw on a 32-bit
1899 * OS, both the pointers are laid out in one
1900 * 64-bit location - this makes it easy to
1901 * retrieve the pointers when processing the
1902 * tx free-back descriptor.
1881 * Save the virtual address in the descriptor,
1882 * it makes freeing easy.
1883 */
1884 p2p->frag[XLR_MAX_TX_FRAGS] =
1905 (((uint64_t) (vm_offset_t) p2p) << 32) |
1906 ((vm_offset_t) mbuf_chain);
1885 (uint64_t)(vm_offset_t)p2p;
1886 cur_p2d = &p2p->frag[0];
1887 is_p2p = 1;
1888 } else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) {
1889 uma_zfree(nl_tx_desc_zone, p2p);
1890 return 1;
1891 }
1892 paddr = vtophys(buf);
1893 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
1894 if (len < frag_sz)
1895 frag_sz = len;
1896 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
1897 | paddr;
1898 msg_sz++;
1899 if (is_p2p)
1900 p2p_sz++;
1901 len -= frag_sz;
1902 buf += frag_sz;
1903 }
1904 }
1905
1906 if (msg_sz == 0) {
1907 printf("Zero-length mbuf chain ??\n");
1908 *n_entries = msg_sz ;
1909 return 0;
1910 }
1911
1912 cur_p2d[-1] |= (1ULL << 63); /* set eop in most-recent p2d */
1913 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) |
1935 (vm_offset_t) mbuf_chain;
1914 (vm_offset_t) mbuf_chain; /* XXX: fix 64 bit */
1915 *tx_desc = p2p;
1916
1917 if (is_p2p) {
1918 paddr = vtophys(p2p);
1919 p2p_sz++;
1920 fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) |
1921 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
1922 *n_entries = FMN_SZ;
1923 } else {
1924 *n_entries = msg_sz + 1;
1925 }
1926
1927 return (0);
1928}
1929
1930static int
1931send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
1932 uint32_t n_entries)
1933{
1934 uint32_t msgrng_flags;
1935 int ret;
1936#ifdef INVARIANTS
1937 int i = 0;
1938#endif
1939
1940 do {
1941 msgrng_flags = msgrng_access_enable();
1942 ret = message_send(n_entries, MSGRNG_CODE_MAC,
1943 sc->tx_bucket_id, msg);
1944 msgrng_restore(msgrng_flags);
1945 KASSERT(i++ < 100000, ("Too many credit fails\n"));
1946 } while (ret != 0);
1947 return (0);
1948}
1949
1950static void
1951release_tx_desc(vm_paddr_t paddr)
1952{
1953 struct nlge_tx_desc *tx_desc;
1954 uint32_t sr;
1976 uint32_t val1, val2;
1955 uint64_t vaddr;
1956
1957 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
1958 sr = xlr_enable_kx();
1980 val1 = xlr_paddr_lw(paddr);
1981 paddr += sizeof(void *);
1982 val2 = xlr_paddr_lw(paddr);
1983 mips_wr_status(sr);
1959 vaddr = xlr_paddr_ld(paddr);
1960 xlr_restore_kx(sr);
1961
1985 tx_desc = (struct nlge_tx_desc*)(intptr_t) val1;
1962 tx_desc = (struct nlge_tx_desc*)(intptr_t)vaddr;
1963 uma_zfree(nl_tx_desc_zone, tx_desc);
1964}
1965
1966static void *
1967get_buf(void)
1968{
1992 struct mbuf *m_new;
1993 vm_paddr_t temp1, temp2;
1994 unsigned int *md;
1969 struct mbuf *m_new;
1970 uint64_t *md;
1971#ifdef INVARIANTS
1972 vm_paddr_t temp1, temp2;
1973#endif
1974
1975 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
1997 return NULL;
1976 return (NULL);
1977 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1978 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
2000 md = (unsigned int *)m_new->m_data;
2001 md[0] = (unsigned int)m_new; /* Back Ptr */
1979 md = (uint64_t *)m_new->m_data;
1980 md[0] = (intptr_t)m_new; /* Back Ptr */
1981 md[1] = 0xf00bad;
1982 m_adj(m_new, XLR_CACHELINE_SIZE);
1983
1984#ifdef INVARIANTS
1985 temp1 = vtophys((vm_offset_t) m_new->m_data);
1986 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
1987 if ((temp1 + 1536) != temp2)
1988 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
1989#endif
1990
1991 return ((void *)m_new->m_data);
1992}
1993
1994static int
1995nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
1996{
1997 struct mii_data *md;
1998 xlr_reg_t *mmio;
1999 int bmsr, n_tries, max_tries;
2000 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2001 int sgmii_speed[] = { SGMII_SPEED_10,
2002 SGMII_SPEED_100,
2003 SGMII_SPEED_1000,
2004 SGMII_SPEED_100 }; /* default to 100Mbps */
2005 char *speed_str[] = { "10",
2006 "100",
2007 "1000",
2008 "unknown, defaulting to 100" };
2009 int link_state = LINK_STATE_DOWN;
2010
2011 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2012 return 0;
2013
2014 md = NULL;
2015 mmio = sc->base;
2016 if (sc->mii_base != NULL) {
2017 max_tries = (quick == 1) ? 100 : 4000;
2018 bmsr = 0;
2019 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2020 bmsr = nlge_mii_read_internal(sc->mii_base,
2021 sc->phy_addr, MII_BMSR);
2022 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2023 break; /* Auto-negotiation is complete
2024 and link is up */
2025 DELAY(1000);
2026 }
2027 bmsr &= BMSR_LINK;
2028 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2029 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2030 sc->speed = (sc->speed >> 3) & 0x03;
2031 if (sc->link == xlr_mac_link_up) {
2032 link_state = LINK_STATE_UP;
2033 nlge_sgmii_init(sc);
2034 }
2035 if (sc->mii_bus)
2036 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2037 }
2038
2039 if (sc->port_type != XLR_RGMII)
2040 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2041 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2042 sc->speed == xlr_mac_speed_rsvd) {
2043 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2044 } else if (sc->speed == xlr_mac_speed_1000) {
2045 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2046 if (md != NULL) {
2047 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2048 IFM_1000_T, IFM_FDX, md->mii_instance));
2049 }
2050 }
2051 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2052 if_link_state_change(sc->nlge_if, link_state);
2053 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2054 speed_str[sc->speed]);
2055
2056 return (0);
2057}
2058
2059/*
2060 * This function is called for each port that was added to the device tree
2061 * and it initializes the following port attributes:
2062 * - type
2063 * - base (base address to access port-specific registers)
2064 * - mii_base
2065 * - phy_addr
2066 */
2067static void
2068nlge_set_port_attribs(struct nlge_softc *sc,
2069 struct xlr_gmac_port *port_info)
2070{
2071 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2072 sc->port_type = port_info->type;
2073 sc->base = (xlr_reg_t *) (port_info->base_addr +
2074 (uint32_t)DEFAULT_XLR_IO_BASE);
2075 sc->mii_base = (xlr_reg_t *) (port_info->mii_addr +
2076 (uint32_t)DEFAULT_XLR_IO_BASE);
2077 if (port_info->pcs_addr != 0)
2078 sc->pcs_addr = (xlr_reg_t *) (port_info->pcs_addr +
2079 (uint32_t)DEFAULT_XLR_IO_BASE);
2080 if (port_info->serdes_addr != 0)
2081 sc->serdes_addr = (xlr_reg_t *) (port_info->serdes_addr +
2082 (uint32_t)DEFAULT_XLR_IO_BASE);
2083 sc->phy_addr = port_info->phy_addr;
2084
2085 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2086 sc->mii_base, sc->phy_addr);
2087}
2088
2089/* ------------------------------------------------------------------------ */
2090
2091/* Debug dump functions */
2092
2093#ifdef DEBUG
2094
2095static void
2096dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2097{
2098 int val;
2099
2100 val = NLGE_READ(base, offset);
2101 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2102}
2103
2104#define STRINGIFY(x) #x
2105
2106static void
2107dump_na_registers(xlr_reg_t *base_addr, int port_id)
2108{
2109 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2110 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2111 PDEBUG("Tx bucket sizes\n");
2112 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2113 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2114 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2115 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2116 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2117 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2118 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2119 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2120 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2121 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2122 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2123 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2124 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2125 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2126 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2127 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2128 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2129 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2130}
2131
2132static void
2133dump_gmac_registers(struct nlge_softc *sc)
2134{
2135 xlr_reg_t *base_addr = sc->base;
2136 int port_id = sc->instance;
2137
2138 PDEBUG("Register dump for port=%d\n", port_id);
2139 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2140 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2141 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2142 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2143 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2144 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2145 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2146 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2147 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2148 STRINGIFY(R_MII_MGMT_CONFIG));
2149 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2150 STRINGIFY(R_MII_MGMT_COMMAND));
2151 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2152 STRINGIFY(R_MII_MGMT_ADDRESS));
2153 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2154 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2155 dump_reg(base_addr, R_MII_MGMT_STATUS,
2156 STRINGIFY(R_MII_MGMT_STATUS));
2157 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2158 STRINGIFY(R_MII_MGMT_INDICATORS));
2159 dump_reg(base_addr, R_INTERFACE_CONTROL,
2160 STRINGIFY(R_INTERFACE_CONTROL));
2161 dump_reg(base_addr, R_INTERFACE_STATUS,
2162 STRINGIFY(R_INTERFACE_STATUS));
2163 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2164 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2165 STRINGIFY(R_XGMAC_CONFIG_0));
2166 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2167 STRINGIFY(R_XGMAC_CONFIG_1));
2168 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2169 STRINGIFY(R_XGMAC_CONFIG_2));
2170 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2171 STRINGIFY(R_XGMAC_CONFIG_3));
2172 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2173 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2174 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2175 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2176 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2177 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2178 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2179 STRINGIFY(R_XGMAC_REV_LEVEL));
2180 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2181 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2182 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2183 STRINGIFY(R_XGMAC_MIIM_FILED));
2184 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2185 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2186 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2187 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2188 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2189 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2190 }
2191
2192 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2193 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2194 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2195 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2196 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2197 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2198 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2199 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2200 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2201 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2202 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2203 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2204 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2205 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2206 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2207 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2208 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2209 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2210 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2211 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2212 dump_na_registers(base_addr, port_id);
2213}
2214
2215static void
2216dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2217{
2218 struct stn_cc *cc;
2219 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2220 int j, k, r, c;
2221 int n_gmac_buckets;
2222
2223 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2224 for (j = 0; j < 8; j++) { // for each cpu
2225 cc = board->credit_configs[j];
2226 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2227 for (k = 0; k < n_gmac_buckets; k++) {
2228 r = gmac_bucket_ids[k] / 8;
2229 c = gmac_bucket_ids[k] % 8;
2230 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2231 gmac_bucket_ids[k], cc->counters[r][c]);
2232 }
2233 }
2234}
2235
2236static void
2237dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2238{
2239 struct stn_cc *cc;
2240 int j, k;
2241
2242 cc = board->gmac_block[gmac_id].credit_config;
2243 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2244 for (j = 0; j < 8; j++) { // for each cpu
2245 printf(" ---> cpu_%d\n", j);
2246 for (k = 0; k < 8; k++) { // for each bucket in cpu
2247 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2248 cc->counters[j][k]);
2249 }
2250 }
2251}
2252
2253static void
2254dump_board_info(struct xlr_board_info *board)
2255{
2256 struct xlr_gmac_block_t *gm;
2257 int i, k;
2258
2259 printf("cpu=%x ", xlr_revision());
2260 printf("board_version: major=%llx, minor=%llx\n",
2261 xlr_boot1_info.board_major_version,
2262 xlr_boot1_info.board_minor_version);
2263 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2264 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2265 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2266 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2267 printf("FMN: Core-station bucket sizes\n");
2268 for (i = 0; i < 128; i++) {
2269 if (i && ((i % 16) == 0))
2270 printf("\n");
2271 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2272 }
2273 printf("\n");
2274 for (i = 0; i < 3; i++) {
2275 gm = &board->gmac_block[i];
2276 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2277 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2278 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2279 gm->station_txbase, gm->station_rfr);
2280 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2281 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2282 gm->baseinst);
2283 }
2284 for (k = 0; k < 3; k++) { // for each NA
2285 dump_fmn_cpu_credits_for_gmac(board, k);
2286 dump_fmn_gmac_credits(board, k);
2287 }
2288}
2289
2290static void
2291dump_mac_stats(struct nlge_softc *sc)
2292{
2293 xlr_reg_t *addr;
2294 uint32_t pkts_tx, pkts_rx;
2295
2296 addr = sc->base;
2297 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2298 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2299
2300 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2301 pkts_rx);
2302 if (pkts_rx > 0) {
2303 uint32_t r;
2304
2305 /* dump all rx counters. we need this because pkts_rx includes
2306 bad packets. */
2307 for (r = R_RFCS; r <= R_ROVR; r++)
2308 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2309 NLGE_READ(sc->base, r));
2310 }
2311 if (pkts_tx > 0) {
2312 uint32_t r;
2313
2314 /* dump all tx counters. might be useful for debugging. */
2315 for (r = R_TMCA; r <= R_TFRG; r++) {
2316 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2317 continue;
2318 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2319 NLGE_READ(sc->base, r));
2320 }
2321 }
2322
2323}
2324
2325static void
2326dump_mii_regs(struct nlge_softc *sc)
2327{
2328 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2329 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2330 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2331 0x1c, 0x1d, 0x1e};
2332 int i, n_regs;
2333
2334 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2335 return;
2336
2337 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2338 for (i = 0; i < n_regs; i++) {
2339 printf("[mii_0x%x] = %x\n", mii_regs[i],
2340 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2341 mii_regs[i]));
2342 }
2343}
2344
2345static void
2346dump_ifmedia(struct ifmedia *ifm)
2347{
2348 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2349 ifm->ifm_media, ifm->ifm_cur);
2350 if (ifm->ifm_cur != NULL) {
2351 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2352 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2353 ifm->ifm_cur->ifm_data);
2354 }
2355}
2356
2357static void
2358dump_mii_data(struct mii_data *mii)
2359{
2360 dump_ifmedia(&mii->mii_media);
2361 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2362 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2363 mii->mii_media_status, mii->mii_media_active);
2364}
2365
2366static void
2367dump_pcs_regs(struct nlge_softc *sc, int phy)
2368{
2369 int i, val;
2370
2371 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2372 for (i = 0; i < 18; i++) {
2373 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2374 continue;
2375 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2376 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);
2377 }
2378}
2379#endif