Deleted Added
full compact
rge.c (212758) rge.c (213377)
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 212758 2010-09-16 19:13:55Z jchandra $");
33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 213377 2010-10-03 04:33:58Z jchandra $");
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81#include <mips/rmi/rmi_mips_exts.h>
82#include <machine/cpuregs.h>
83
84#include <machine/param.h>
85#include <machine/intr_machdep.h>
86#include <machine/clock.h> /* for DELAY */
87#include <machine/cpuregs.h>
88#include <machine/bus.h> /* */
89#include <machine/resource.h>
90
91#include <dev/mii/mii.h>
92#include <dev/mii/miivar.h>
93#include <dev/mii/brgphyreg.h>
94
95#include <mips/rmi/interrupt.h>
96#include <mips/rmi/msgring.h>
97#include <mips/rmi/iomap.h>
98#include <mips/rmi/pic.h>
99#include <mips/rmi/rmi_mips_exts.h>
100#include <mips/rmi/rmi_boot_info.h>
101#include <mips/rmi/board.h>
102
103#include <mips/rmi/dev/xlr/debug.h>
104#include <mips/rmi/dev/xlr/atx_cpld.h>
105#include <mips/rmi/dev/xlr/xgmac_mdio.h>
106#include <mips/rmi/dev/xlr/rge.h>
107
108#include "miibus_if.h"
109
110MODULE_DEPEND(rge, ether, 1, 1, 1);
111MODULE_DEPEND(rge, miibus, 1, 1, 1);
112
113/* #define DEBUG */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123 do {\
124 if (mac_debug) {\
125 printf("[%s@%d|%s]: cpu_%d: " fmt, \
126 __FILE__, __LINE__, __FUNCTION__, xlr_cpu_id(), ##args);\
127 }\
128 } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG 88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE 1536
142#define MAX_FRAME_SIZE_JUMBO 9216
143
144#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
145#define MAC_PREPAD 0
146#define BYTE_OFFSET 2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN 4
149#define MAX_NUM_MSGRNG_STN_CC 128
150
151#define MAX_NUM_DESC 1024
152#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1)
173#define port_set_counter(port, counter, value) atomic_set_int(&port_counters[port][(counter)], (value))
174#else
175#define port_inc_counter(port, counter) /* Nothing */
176#define port_set_counter(port, counter, value) /* Nothing */
177#endif
178
179int xlr_rge_tx_prepend[MAXCPU];
180int xlr_rge_tx_done[MAXCPU];
181int xlr_rge_get_p2d_failed[MAXCPU];
182int xlr_rge_msg_snd_failed[MAXCPU];
183int xlr_rge_tx_ok_done[MAXCPU];
184int xlr_rge_rx_done[MAXCPU];
185int xlr_rge_repl_done[MAXCPU];
186
187/* #define mac_stats_add(x, val) ({(x) += (val);}) */
188#define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
189
190#define XLR_MAX_CORE 8
191#define RGE_LOCK_INIT(_sc, _name) \
192 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
193#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx)
194#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
195#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx)
196#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
197
198#define XLR_MAX_MACS 8
199#define XLR_MAX_TX_FRAGS 14
200#define MAX_P2D_DESC_PER_PORT 512
201struct p2d_tx_desc {
202 uint64_t frag[XLR_MAX_TX_FRAGS + 2];
203};
204
205#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
206
207struct rge_softc *dev_mac[XLR_MAX_MACS];
208static int dev_mac_xgs0;
209static int dev_mac_gmac0;
210
211static int gmac_common_init_done;
212
213
214static int rge_probe(device_t);
215static int rge_attach(device_t);
216static int rge_detach(device_t);
217static int rge_suspend(device_t);
218static int rge_resume(device_t);
219static void rge_release_resources(struct rge_softc *);
220static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
221static void rge_intr(void *);
222static void rge_start_locked(struct ifnet *, int);
223static void rge_start(struct ifnet *);
224static int rge_ioctl(struct ifnet *, u_long, caddr_t);
225static void rge_init(void *);
226static void rge_stop(struct rge_softc *);
227static int rge_shutdown(device_t);
228static void rge_reset(struct rge_softc *);
229
230static struct mbuf *get_mbuf(void);
231static void free_buf(vm_paddr_t paddr);
232static void *get_buf(void);
233
234static void xlr_mac_get_hwaddr(struct rge_softc *);
235static void xlr_mac_setup_hwaddr(struct driver_data *);
236static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
237static void rmi_xlr_xgmac_init(struct driver_data *priv);
238static void rmi_xlr_gmac_init(struct driver_data *priv);
239static void mac_common_init(void);
240static int rge_mii_write(device_t, int, int, int);
241static int rge_mii_read(device_t, int, int);
242static void rmi_xlr_mac_mii_statchg(device_t);
243static int rmi_xlr_mac_mediachange(struct ifnet *);
244static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
245static void xlr_mac_set_rx_mode(struct rge_softc *sc);
246void
247rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
248 int stid, struct msgrng_msg *msg,
249 void *data);
250static void mac_frin_replenish(void *);
251static int rmi_xlr_mac_open(struct rge_softc *);
252static int rmi_xlr_mac_close(struct rge_softc *);
253static int
254mac_xmit(struct mbuf *, struct rge_softc *,
255 struct driver_data *, int, struct p2d_tx_desc *);
256static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
257static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
258static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
259static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
260static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
261static void rmi_xlr_config_spill_area(struct driver_data *priv);
262static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
263static int
264rmi_xlr_mac_set_duplex(struct driver_data *s,
265 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
266static void serdes_regs_init(struct driver_data *priv);
267static int rmi_xlr_gmac_reset(struct driver_data *priv);
268
269/*Statistics...*/
270static int get_p2d_desc_failed = 0;
271static int msg_snd_failed = 0;
272
273SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
274 &get_p2d_desc_failed, 0, "p2d desc failed");
275SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
276 &msg_snd_failed, 0, "msg snd failed");
277
278struct callout xlr_tx_stop_bkp;
279
280static device_method_t rge_methods[] = {
281 /* Device interface */
282 DEVMETHOD(device_probe, rge_probe),
283 DEVMETHOD(device_attach, rge_attach),
284 DEVMETHOD(device_detach, rge_detach),
285 DEVMETHOD(device_shutdown, rge_shutdown),
286 DEVMETHOD(device_suspend, rge_suspend),
287 DEVMETHOD(device_resume, rge_resume),
288
289 /* MII interface */
290 DEVMETHOD(miibus_readreg, rge_mii_read),
291 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
292 DEVMETHOD(miibus_writereg, rge_mii_write),
293 {0, 0}
294};
295
296static driver_t rge_driver = {
297 "rge",
298 rge_methods,
299 sizeof(struct rge_softc)
300};
301
302static devclass_t rge_devclass;
303
304DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
305DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
306
307#ifndef __STR
308#define __STR(x) #x
309#endif
310#ifndef STR
311#define STR(x) __STR(x)
312#endif
313
314void *xlr_tx_ring_mem;
315
316struct tx_desc_node {
317 struct p2d_tx_desc *ptr;
318 TAILQ_ENTRY(tx_desc_node) list;
319};
320
321#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
322struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
323static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
324static int xlr_total_active_core = 0;
325
326/*
327 * This should contain the list of all free tx frag desc nodes pointing to tx
328 * p2d arrays
329 */
330static
331TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
332{
333 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
334 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
335 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
336 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
337 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
338 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
339 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
340 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
341};
342
343/* This contains a list of free tx frag node descriptors */
344static
345TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
346{
347 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
348 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
349 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
350 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
351 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
352 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
353 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
354 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
355};
356
357static struct mtx tx_desc_lock[XLR_MAX_CORE];
358
359static inline void
360mac_make_desc_rfr(struct msgrng_msg *msg,
361 vm_paddr_t addr)
362{
363 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
364 msg->msg1 = msg->msg2 = msg->msg3 = 0;
365}
366
367#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
368
369static void
370init_p2d_allocation(void)
371{
372 int active_core[8] = {0};
373 int i = 0;
374 uint32_t cpumask;
375 int cpu;
376
377 cpumask = xlr_hw_thread_mask;
378
379 for (i = 0; i < 32; i++) {
380 if (cpumask & (1 << i)) {
381 cpu = i;
382 if (!active_core[cpu / 4]) {
383 active_core[cpu / 4] = 1;
384 xlr_total_active_core++;
385 }
386 }
387 }
388 for (i = 0; i < XLR_MAX_CORE; i++) {
389 if (active_core[i])
390 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
391 }
392 printf("Total Active Core %d\n", xlr_total_active_core);
393}
394
395
396static void
397init_tx_ring(void)
398{
399 int i;
400 int j = 0;
401 struct tx_desc_node *start, *node;
402 struct p2d_tx_desc *tx_desc;
403 vm_paddr_t paddr;
404 vm_offset_t unmapped_addr;
405
406 for (i = 0; i < XLR_MAX_CORE; i++)
407 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
408
409 start = &tx_desc_nodes[0];
410 /* TODO: try to get this from KSEG0 */
411 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
412 M_DEVBUF, M_NOWAIT | M_ZERO, 0,
413 0x10000000, XLR_CACHELINE_SIZE, 0);
414
415 if (xlr_tx_ring_mem == NULL) {
416 panic("TX ring memory allocation failed");
417 }
418 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
419
420 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
421
422
423 tx_desc = (struct p2d_tx_desc *)unmapped_addr;
424
425 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
426 node = start + i;
427 node->ptr = tx_desc;
428 tx_desc++;
429 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
430 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
431 }
432}
433
434static inline struct p2d_tx_desc *
435get_p2d_desc(void)
436{
437 struct tx_desc_node *node;
438 struct p2d_tx_desc *tx_desc = NULL;
439 int cpu = xlr_core_id();
440
441 mtx_lock_spin(&tx_desc_lock[cpu]);
442 node = TAILQ_FIRST(&tx_frag_desc[cpu]);
443 if (node) {
444 xlr_tot_avail_p2d[cpu]--;
445 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
446 tx_desc = node->ptr;
447 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
448 } else {
449 /* Increment p2d desc fail count */
450 get_p2d_desc_failed++;
451 }
452 mtx_unlock_spin(&tx_desc_lock[cpu]);
453 return tx_desc;
454}
455static void
456free_p2d_desc(struct p2d_tx_desc *tx_desc)
457{
458 struct tx_desc_node *node;
459 int cpu = xlr_core_id();
460
461 mtx_lock_spin(&tx_desc_lock[cpu]);
462 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
463 KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
464
465 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
466 node->ptr = tx_desc;
467 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
468 xlr_tot_avail_p2d[cpu]++;
469 mtx_unlock_spin(&tx_desc_lock[cpu]);
470
471}
472
473static int
474build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
475{
476 struct mbuf *m;
477 vm_paddr_t paddr;
478 uint64_t p2d_len;
479 int nfrag;
480 vm_paddr_t p1, p2;
481 uint32_t len1, len2;
482 vm_offset_t taddr;
483 uint64_t fr_stid;
484
485 fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
486
487 if (tx_desc == NULL)
488 return 1;
489
490 nfrag = 0;
491 for (m = m_head; m != NULL; m = m->m_next) {
492 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
493 free_p2d_desc(tx_desc);
494 return 1;
495 }
496 if (m->m_len != 0) {
497 paddr = vtophys(mtod(m, vm_offset_t));
498 p1 = paddr + m->m_len;
499 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
500 if (p1 != p2) {
501 len1 = (uint32_t)
502 (PAGE_SIZE - (paddr & PAGE_MASK));
503 tx_desc->frag[nfrag] = (127ULL << 54) |
504 ((uint64_t) len1 << 40) | paddr;
505 nfrag++;
506 taddr = (vm_offset_t)m->m_data + len1;
507 p2 = vtophys(taddr);
508 len2 = m->m_len - len1;
509 if (len2 == 0)
510 continue;
511 if (nfrag >= XLR_MAX_TX_FRAGS)
512 panic("TX frags exceeded");
513
514 tx_desc->frag[nfrag] = (127ULL << 54) |
515 ((uint64_t) len2 << 40) | p2;
516
517 taddr += len2;
518 p1 = vtophys(taddr);
519
520 if ((p2 + len2) != p1) {
521 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
522 printf("len1 = %x len2 = %x\n", len1,
523 len2);
524 printf("m_data %p\n", m->m_data);
525 DELAY(1000000);
526 panic("Multiple Mbuf segment discontiguous\n");
527 }
528 } else {
529 tx_desc->frag[nfrag] = (127ULL << 54) |
530 ((uint64_t) m->m_len << 40) | paddr;
531 }
532 nfrag++;
533 }
534 }
535 /* set eop in the last tx p2d desc */
536 tx_desc->frag[nfrag - 1] |= (1ULL << 63);
537 paddr = vtophys((vm_offset_t)tx_desc);
538 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
539 nfrag++;
540 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
541 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
542
543 p2d_len = (nfrag * 8);
544 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
545 (p2d_len << 40) | paddr;
546
547 return 0;
548}
549static void
550release_tx_desc(struct msgrng_msg *msg, int rel_buf)
551{
552 struct p2d_tx_desc *tx_desc, *chk_addr;
553 struct mbuf *m;
554
555 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
556 chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
557 if (tx_desc != chk_addr) {
558 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
559 tx_desc, chk_addr);
560 return;
561 }
562 if (rel_buf) {
563 m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
564 m_freem(m);
565 }
566 free_p2d_desc(tx_desc);
567}
568
569
570static struct mbuf *
571get_mbuf(void)
572{
573 struct mbuf *m_new = NULL;
574
575 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
576 return NULL;
577
578 m_new->m_len = MCLBYTES;
579 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
580 return m_new;
581}
582
583static void
584free_buf(vm_paddr_t paddr)
585{
586 struct mbuf *m;
587 uint64_t mag;
588 uint32_t sr;
589
590 sr = xlr_enable_kx();
591 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
592 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
593 xlr_restore_kx(sr);
594 if (mag != 0xf00bad) {
595 printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
596 (u_long)paddr, (u_long)mag);
597 return;
598 }
599 if (m != NULL)
600 m_freem(m);
601}
602
603static void *
604get_buf(void)
605{
606 struct mbuf *m_new = NULL;
607 uint64_t *md;
608#ifdef INVARIANTS
609 vm_paddr_t temp1, temp2;
610#endif
611
612 m_new = get_mbuf();
613 if (m_new == NULL)
614 return NULL;
615
616 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
617 md = (uint64_t *)m_new->m_data;
618 md[0] = (uintptr_t)m_new; /* Back Ptr */
619 md[1] = 0xf00bad;
620 m_adj(m_new, XLR_CACHELINE_SIZE);
621
622#ifdef INVARIANTS
623 temp1 = vtophys((vm_offset_t)m_new->m_data);
624 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
625 if ((temp1 + 1536) != temp2)
626 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
627#endif
628 return (void *)m_new->m_data;
629}
630
631/**********************************************************************
632 **********************************************************************/
633static void
634rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
635{
636 uint32_t regval;
637 int tx_threshold = 1518;
638
639 if (flag) {
640 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
641 regval |= (1 << O_TX_CONTROL__TxEnable) |
642 (tx_threshold << O_TX_CONTROL__TxThreshold);
643
644 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
645
646 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
647 regval |= 1 << O_RX_CONTROL__RxEnable;
648 if (priv->mode == XLR_PORT0_RGMII)
649 regval |= 1 << O_RX_CONTROL__RGMII;
650 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
651
652 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
653 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
654 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
655 } else {
656 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
657 regval &= ~((1 << O_TX_CONTROL__TxEnable) |
658 (tx_threshold << O_TX_CONTROL__TxThreshold));
659
660 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
661
662 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
663 regval &= ~(1 << O_RX_CONTROL__RxEnable);
664 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
665
666 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
667 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
668 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
669 }
670}
671
672/**********************************************************************
673 **********************************************************************/
674static __inline__ int
675xlr_mac_send_fr(struct driver_data *priv,
676 vm_paddr_t addr, int len)
677{
678 struct msgrng_msg msg;
679 int stid = priv->rfrbucket;
680 int code, ret;
681 uint32_t msgrng_flags;
682#ifdef INVARIANTS
683 int i = 0;
684#endif
685
686 mac_make_desc_rfr(&msg, addr);
687
688 /* Send the packet to MAC */
689 dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
690 priv->instance, (u_long)addr, stid);
691 if (priv->type == XLR_XGMAC)
692 code = MSGRNG_CODE_XGMAC; /* WHY? */
693 else
694 code = MSGRNG_CODE_MAC;
695
696 do {
697 msgrng_flags = msgrng_access_enable();
698 ret = message_send(1, code, stid, &msg);
699 msgrng_restore(msgrng_flags);
700 KASSERT(i++ < 100000, ("Too many credit fails\n"));
701 } while (ret != 0);
702
703 return 0;
704}
705
706/**************************************************************/
707
708static void
709xgmac_mdio_setup(volatile unsigned int *_mmio)
710{
711 int i;
712 uint32_t rd_data;
713
714 for (i = 0; i < 4; i++) {
715 rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
716 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */
717 xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
718 }
719}
720
721/**********************************************************************
722 * Init MII interface
723 *
724 * Input parameters:
725 * s - priv structure
726 ********************************************************************* */
727#define PHY_STATUS_RETRIES 25000
728
729static void
730rmi_xlr_mac_mii_init(struct driver_data *priv)
731{
732 xlr_reg_t *mii_mmio = priv->mii_mmio;
733
734 /* use the lowest clock divisor - divisor 28 */
735 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
736}
737
738/**********************************************************************
739 * Read a PHY register.
740 *
741 * Input parameters:
742 * s - priv structure
743 * phyaddr - PHY's address
744 * regidx = index of register to read
745 *
746 * Return value:
747 * value read, or 0 if an error occurred.
748 ********************************************************************* */
749
750static int
751rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
752{
753 int i = 0;
754
755 /* setup the phy reg to be used */
756 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
757 (phyaddr << 8) | (regidx << 0));
758 /* Issue the read command */
759 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
760 (1 << O_MII_MGMT_COMMAND__rstat));
761
762 /* poll for the read cycle to complete */
763 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
764 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
765 break;
766 }
767
768 /* clear the read cycle */
769 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
770
771 if (i == PHY_STATUS_RETRIES) {
772 return 0xffffffff;
773 }
774 /* Read the data back */
775 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
776}
777
778static int
779rge_mii_read(device_t dev, int phyaddr, int regidx)
780{
781 struct rge_softc *sc = device_get_softc(dev);
782
783 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
784}
785
786/**********************************************************************
787 * Set MII hooks to newly selected media
788 *
789 * Input parameters:
790 * ifp - Interface Pointer
791 *
792 * Return value:
793 * nothing
794 ********************************************************************* */
795static int
796rmi_xlr_mac_mediachange(struct ifnet *ifp)
797{
798 struct rge_softc *sc = ifp->if_softc;
799
800 if (ifp->if_flags & IFF_UP)
801 mii_mediachg(&sc->rge_mii);
802
803 return 0;
804}
805
806/**********************************************************************
807 * Get the current interface media status
808 *
809 * Input parameters:
810 * ifp - Interface Pointer
811 * ifmr - Interface media request ptr
812 *
813 * Return value:
814 * nothing
815 ********************************************************************* */
816static void
817rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
818{
819 struct rge_softc *sc = ifp->if_softc;
820
821 /* Check whether this is interface is active or not. */
822 ifmr->ifm_status = IFM_AVALID;
823 if (sc->link_up) {
824 ifmr->ifm_status |= IFM_ACTIVE;
825 } else {
826 ifmr->ifm_active = IFM_ETHER;
827 }
828}
829
830/**********************************************************************
831 * Write a value to a PHY register.
832 *
833 * Input parameters:
834 * s - priv structure
835 * phyaddr - PHY to use
836 * regidx - register within the PHY
837 * regval - data to write to register
838 *
839 * Return value:
840 * nothing
841 ********************************************************************* */
842static void
843rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
844{
845 int i = 0;
846
847 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
848 (phyaddr << 8) | (regidx << 0));
849
850 /* Write the data which starts the write cycle */
851 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
852
853 /* poll for the write cycle to complete */
854 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
855 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
856 break;
857 }
858
859 return;
860}
861
862static int
863rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
864{
865 struct rge_softc *sc = device_get_softc(dev);
866
867 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
868 return (0);
869}
870
871static void
872rmi_xlr_mac_mii_statchg(struct device *dev)
873{
874}
875
876static void
877serdes_regs_init(struct driver_data *priv)
878{
879 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
880
881 /* Initialize SERDES CONTROL Registers */
882 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
883 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
884 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
885 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
886 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
887 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
888 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
889 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
890 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
891 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
892 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
893
894 /*
895 * GPIO setting which affect the serdes - needs figuring out
896 */
897 DELAY(100);
898 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
899 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
900 DELAY(100);
901
902 /*
903 * This kludge is needed to setup serdes (?) clock correctly on some
904 * XLS boards
905 */
906 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
907 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
908 xlr_boot1_info.board_minor_version == 4) {
909 /* use 125 Mhz instead of 156.25Mhz ref clock */
910 DELAY(100);
911 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
912 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
913 DELAY(100);
914 }
915
916 return;
917}
918
919static void
920serdes_autoconfig(struct driver_data *priv)
921{
922 int delay = 100000;
923
924 /* Enable Auto negotiation in the PCS Layer */
925 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
926 DELAY(delay);
927 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
928 DELAY(delay);
929
930 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
931 DELAY(delay);
932 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
933 DELAY(delay);
934
935 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
936 DELAY(delay);
937 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
938 DELAY(delay);
939
940 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
941 DELAY(delay);
942 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
943 DELAY(delay);
944
945}
946
947/*****************************************************************
948 * Initialize GMAC
949 *****************************************************************/
950static void
951rmi_xlr_config_pde(struct driver_data *priv)
952{
953 int i = 0, cpu = 0, bucket = 0;
954 uint64_t bucket_map = 0;
955
956 /* uint32_t desc_pack_ctrl = 0; */
957 uint32_t cpumask;
958
959 cpumask = 0x1;
960#ifdef SMP
961 /*
962 * rge may be called before SMP start in a BOOTP/NFSROOT
963 * setup. we will distribute packets to other cpus only when
964 * the SMP is started.
965 */
966 if (smp_started)
967 cpumask = xlr_hw_thread_mask;
968#endif
969
970 for (i = 0; i < MAXCPU; i++) {
971 if (cpumask & (1 << i)) {
972 cpu = i;
973 bucket = ((cpu >> 2) << 3);
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81#include <mips/rmi/rmi_mips_exts.h>
82#include <machine/cpuregs.h>
83
84#include <machine/param.h>
85#include <machine/intr_machdep.h>
86#include <machine/clock.h> /* for DELAY */
87#include <machine/cpuregs.h>
88#include <machine/bus.h> /* */
89#include <machine/resource.h>
90
91#include <dev/mii/mii.h>
92#include <dev/mii/miivar.h>
93#include <dev/mii/brgphyreg.h>
94
95#include <mips/rmi/interrupt.h>
96#include <mips/rmi/msgring.h>
97#include <mips/rmi/iomap.h>
98#include <mips/rmi/pic.h>
99#include <mips/rmi/rmi_mips_exts.h>
100#include <mips/rmi/rmi_boot_info.h>
101#include <mips/rmi/board.h>
102
103#include <mips/rmi/dev/xlr/debug.h>
104#include <mips/rmi/dev/xlr/atx_cpld.h>
105#include <mips/rmi/dev/xlr/xgmac_mdio.h>
106#include <mips/rmi/dev/xlr/rge.h>
107
108#include "miibus_if.h"
109
110MODULE_DEPEND(rge, ether, 1, 1, 1);
111MODULE_DEPEND(rge, miibus, 1, 1, 1);
112
113/* #define DEBUG */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123 do {\
124 if (mac_debug) {\
125 printf("[%s@%d|%s]: cpu_%d: " fmt, \
126 __FILE__, __LINE__, __FUNCTION__, xlr_cpu_id(), ##args);\
127 }\
128 } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG 88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE 1536
142#define MAX_FRAME_SIZE_JUMBO 9216
143
144#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
145#define MAC_PREPAD 0
146#define BYTE_OFFSET 2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN 4
149#define MAX_NUM_MSGRNG_STN_CC 128
150
151#define MAX_NUM_DESC 1024
152#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1)
173#define port_set_counter(port, counter, value) atomic_set_int(&port_counters[port][(counter)], (value))
174#else
175#define port_inc_counter(port, counter) /* Nothing */
176#define port_set_counter(port, counter, value) /* Nothing */
177#endif
178
179int xlr_rge_tx_prepend[MAXCPU];
180int xlr_rge_tx_done[MAXCPU];
181int xlr_rge_get_p2d_failed[MAXCPU];
182int xlr_rge_msg_snd_failed[MAXCPU];
183int xlr_rge_tx_ok_done[MAXCPU];
184int xlr_rge_rx_done[MAXCPU];
185int xlr_rge_repl_done[MAXCPU];
186
187/* #define mac_stats_add(x, val) ({(x) += (val);}) */
188#define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
189
190#define XLR_MAX_CORE 8
191#define RGE_LOCK_INIT(_sc, _name) \
192 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
193#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx)
194#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
195#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx)
196#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
197
198#define XLR_MAX_MACS 8
199#define XLR_MAX_TX_FRAGS 14
200#define MAX_P2D_DESC_PER_PORT 512
201struct p2d_tx_desc {
202 uint64_t frag[XLR_MAX_TX_FRAGS + 2];
203};
204
205#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
206
207struct rge_softc *dev_mac[XLR_MAX_MACS];
208static int dev_mac_xgs0;
209static int dev_mac_gmac0;
210
211static int gmac_common_init_done;
212
213
214static int rge_probe(device_t);
215static int rge_attach(device_t);
216static int rge_detach(device_t);
217static int rge_suspend(device_t);
218static int rge_resume(device_t);
219static void rge_release_resources(struct rge_softc *);
220static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
221static void rge_intr(void *);
222static void rge_start_locked(struct ifnet *, int);
223static void rge_start(struct ifnet *);
224static int rge_ioctl(struct ifnet *, u_long, caddr_t);
225static void rge_init(void *);
226static void rge_stop(struct rge_softc *);
227static int rge_shutdown(device_t);
228static void rge_reset(struct rge_softc *);
229
230static struct mbuf *get_mbuf(void);
231static void free_buf(vm_paddr_t paddr);
232static void *get_buf(void);
233
234static void xlr_mac_get_hwaddr(struct rge_softc *);
235static void xlr_mac_setup_hwaddr(struct driver_data *);
236static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
237static void rmi_xlr_xgmac_init(struct driver_data *priv);
238static void rmi_xlr_gmac_init(struct driver_data *priv);
239static void mac_common_init(void);
240static int rge_mii_write(device_t, int, int, int);
241static int rge_mii_read(device_t, int, int);
242static void rmi_xlr_mac_mii_statchg(device_t);
243static int rmi_xlr_mac_mediachange(struct ifnet *);
244static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
245static void xlr_mac_set_rx_mode(struct rge_softc *sc);
246void
247rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
248 int stid, struct msgrng_msg *msg,
249 void *data);
250static void mac_frin_replenish(void *);
251static int rmi_xlr_mac_open(struct rge_softc *);
252static int rmi_xlr_mac_close(struct rge_softc *);
253static int
254mac_xmit(struct mbuf *, struct rge_softc *,
255 struct driver_data *, int, struct p2d_tx_desc *);
256static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
257static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
258static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
259static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
260static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
261static void rmi_xlr_config_spill_area(struct driver_data *priv);
262static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
263static int
264rmi_xlr_mac_set_duplex(struct driver_data *s,
265 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
266static void serdes_regs_init(struct driver_data *priv);
267static int rmi_xlr_gmac_reset(struct driver_data *priv);
268
269/*Statistics...*/
270static int get_p2d_desc_failed = 0;
271static int msg_snd_failed = 0;
272
273SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
274 &get_p2d_desc_failed, 0, "p2d desc failed");
275SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
276 &msg_snd_failed, 0, "msg snd failed");
277
278struct callout xlr_tx_stop_bkp;
279
280static device_method_t rge_methods[] = {
281 /* Device interface */
282 DEVMETHOD(device_probe, rge_probe),
283 DEVMETHOD(device_attach, rge_attach),
284 DEVMETHOD(device_detach, rge_detach),
285 DEVMETHOD(device_shutdown, rge_shutdown),
286 DEVMETHOD(device_suspend, rge_suspend),
287 DEVMETHOD(device_resume, rge_resume),
288
289 /* MII interface */
290 DEVMETHOD(miibus_readreg, rge_mii_read),
291 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
292 DEVMETHOD(miibus_writereg, rge_mii_write),
293 {0, 0}
294};
295
296static driver_t rge_driver = {
297 "rge",
298 rge_methods,
299 sizeof(struct rge_softc)
300};
301
302static devclass_t rge_devclass;
303
304DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
305DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
306
307#ifndef __STR
308#define __STR(x) #x
309#endif
310#ifndef STR
311#define STR(x) __STR(x)
312#endif
313
314void *xlr_tx_ring_mem;
315
316struct tx_desc_node {
317 struct p2d_tx_desc *ptr;
318 TAILQ_ENTRY(tx_desc_node) list;
319};
320
321#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
322struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
323static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
324static int xlr_total_active_core = 0;
325
326/*
327 * This should contain the list of all free tx frag desc nodes pointing to tx
328 * p2d arrays
329 */
330static
331TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
332{
333 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
334 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
335 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
336 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
337 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
338 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
339 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
340 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
341};
342
343/* This contains a list of free tx frag node descriptors */
344static
345TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
346{
347 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
348 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
349 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
350 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
351 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
352 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
353 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
354 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
355};
356
357static struct mtx tx_desc_lock[XLR_MAX_CORE];
358
359static inline void
360mac_make_desc_rfr(struct msgrng_msg *msg,
361 vm_paddr_t addr)
362{
363 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
364 msg->msg1 = msg->msg2 = msg->msg3 = 0;
365}
366
367#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
368
369static void
370init_p2d_allocation(void)
371{
372 int active_core[8] = {0};
373 int i = 0;
374 uint32_t cpumask;
375 int cpu;
376
377 cpumask = xlr_hw_thread_mask;
378
379 for (i = 0; i < 32; i++) {
380 if (cpumask & (1 << i)) {
381 cpu = i;
382 if (!active_core[cpu / 4]) {
383 active_core[cpu / 4] = 1;
384 xlr_total_active_core++;
385 }
386 }
387 }
388 for (i = 0; i < XLR_MAX_CORE; i++) {
389 if (active_core[i])
390 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
391 }
392 printf("Total Active Core %d\n", xlr_total_active_core);
393}
394
395
396static void
397init_tx_ring(void)
398{
399 int i;
400 int j = 0;
401 struct tx_desc_node *start, *node;
402 struct p2d_tx_desc *tx_desc;
403 vm_paddr_t paddr;
404 vm_offset_t unmapped_addr;
405
406 for (i = 0; i < XLR_MAX_CORE; i++)
407 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
408
409 start = &tx_desc_nodes[0];
410 /* TODO: try to get this from KSEG0 */
411 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
412 M_DEVBUF, M_NOWAIT | M_ZERO, 0,
413 0x10000000, XLR_CACHELINE_SIZE, 0);
414
415 if (xlr_tx_ring_mem == NULL) {
416 panic("TX ring memory allocation failed");
417 }
418 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
419
420 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
421
422
423 tx_desc = (struct p2d_tx_desc *)unmapped_addr;
424
425 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
426 node = start + i;
427 node->ptr = tx_desc;
428 tx_desc++;
429 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
430 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
431 }
432}
433
434static inline struct p2d_tx_desc *
435get_p2d_desc(void)
436{
437 struct tx_desc_node *node;
438 struct p2d_tx_desc *tx_desc = NULL;
439 int cpu = xlr_core_id();
440
441 mtx_lock_spin(&tx_desc_lock[cpu]);
442 node = TAILQ_FIRST(&tx_frag_desc[cpu]);
443 if (node) {
444 xlr_tot_avail_p2d[cpu]--;
445 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
446 tx_desc = node->ptr;
447 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
448 } else {
449 /* Increment p2d desc fail count */
450 get_p2d_desc_failed++;
451 }
452 mtx_unlock_spin(&tx_desc_lock[cpu]);
453 return tx_desc;
454}
455static void
456free_p2d_desc(struct p2d_tx_desc *tx_desc)
457{
458 struct tx_desc_node *node;
459 int cpu = xlr_core_id();
460
461 mtx_lock_spin(&tx_desc_lock[cpu]);
462 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
463 KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
464
465 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
466 node->ptr = tx_desc;
467 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
468 xlr_tot_avail_p2d[cpu]++;
469 mtx_unlock_spin(&tx_desc_lock[cpu]);
470
471}
472
473static int
474build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
475{
476 struct mbuf *m;
477 vm_paddr_t paddr;
478 uint64_t p2d_len;
479 int nfrag;
480 vm_paddr_t p1, p2;
481 uint32_t len1, len2;
482 vm_offset_t taddr;
483 uint64_t fr_stid;
484
485 fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
486
487 if (tx_desc == NULL)
488 return 1;
489
490 nfrag = 0;
491 for (m = m_head; m != NULL; m = m->m_next) {
492 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
493 free_p2d_desc(tx_desc);
494 return 1;
495 }
496 if (m->m_len != 0) {
497 paddr = vtophys(mtod(m, vm_offset_t));
498 p1 = paddr + m->m_len;
499 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
500 if (p1 != p2) {
501 len1 = (uint32_t)
502 (PAGE_SIZE - (paddr & PAGE_MASK));
503 tx_desc->frag[nfrag] = (127ULL << 54) |
504 ((uint64_t) len1 << 40) | paddr;
505 nfrag++;
506 taddr = (vm_offset_t)m->m_data + len1;
507 p2 = vtophys(taddr);
508 len2 = m->m_len - len1;
509 if (len2 == 0)
510 continue;
511 if (nfrag >= XLR_MAX_TX_FRAGS)
512 panic("TX frags exceeded");
513
514 tx_desc->frag[nfrag] = (127ULL << 54) |
515 ((uint64_t) len2 << 40) | p2;
516
517 taddr += len2;
518 p1 = vtophys(taddr);
519
520 if ((p2 + len2) != p1) {
521 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
522 printf("len1 = %x len2 = %x\n", len1,
523 len2);
524 printf("m_data %p\n", m->m_data);
525 DELAY(1000000);
526 panic("Multiple Mbuf segment discontiguous\n");
527 }
528 } else {
529 tx_desc->frag[nfrag] = (127ULL << 54) |
530 ((uint64_t) m->m_len << 40) | paddr;
531 }
532 nfrag++;
533 }
534 }
535 /* set eop in the last tx p2d desc */
536 tx_desc->frag[nfrag - 1] |= (1ULL << 63);
537 paddr = vtophys((vm_offset_t)tx_desc);
538 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
539 nfrag++;
540 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
541 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
542
543 p2d_len = (nfrag * 8);
544 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
545 (p2d_len << 40) | paddr;
546
547 return 0;
548}
549static void
550release_tx_desc(struct msgrng_msg *msg, int rel_buf)
551{
552 struct p2d_tx_desc *tx_desc, *chk_addr;
553 struct mbuf *m;
554
555 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
556 chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
557 if (tx_desc != chk_addr) {
558 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
559 tx_desc, chk_addr);
560 return;
561 }
562 if (rel_buf) {
563 m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
564 m_freem(m);
565 }
566 free_p2d_desc(tx_desc);
567}
568
569
570static struct mbuf *
571get_mbuf(void)
572{
573 struct mbuf *m_new = NULL;
574
575 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
576 return NULL;
577
578 m_new->m_len = MCLBYTES;
579 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
580 return m_new;
581}
582
583static void
584free_buf(vm_paddr_t paddr)
585{
586 struct mbuf *m;
587 uint64_t mag;
588 uint32_t sr;
589
590 sr = xlr_enable_kx();
591 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
592 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
593 xlr_restore_kx(sr);
594 if (mag != 0xf00bad) {
595 printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
596 (u_long)paddr, (u_long)mag);
597 return;
598 }
599 if (m != NULL)
600 m_freem(m);
601}
602
603static void *
604get_buf(void)
605{
606 struct mbuf *m_new = NULL;
607 uint64_t *md;
608#ifdef INVARIANTS
609 vm_paddr_t temp1, temp2;
610#endif
611
612 m_new = get_mbuf();
613 if (m_new == NULL)
614 return NULL;
615
616 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
617 md = (uint64_t *)m_new->m_data;
618 md[0] = (uintptr_t)m_new; /* Back Ptr */
619 md[1] = 0xf00bad;
620 m_adj(m_new, XLR_CACHELINE_SIZE);
621
622#ifdef INVARIANTS
623 temp1 = vtophys((vm_offset_t)m_new->m_data);
624 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
625 if ((temp1 + 1536) != temp2)
626 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
627#endif
628 return (void *)m_new->m_data;
629}
630
631/**********************************************************************
632 **********************************************************************/
633static void
634rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
635{
636 uint32_t regval;
637 int tx_threshold = 1518;
638
639 if (flag) {
640 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
641 regval |= (1 << O_TX_CONTROL__TxEnable) |
642 (tx_threshold << O_TX_CONTROL__TxThreshold);
643
644 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
645
646 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
647 regval |= 1 << O_RX_CONTROL__RxEnable;
648 if (priv->mode == XLR_PORT0_RGMII)
649 regval |= 1 << O_RX_CONTROL__RGMII;
650 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
651
652 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
653 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
654 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
655 } else {
656 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
657 regval &= ~((1 << O_TX_CONTROL__TxEnable) |
658 (tx_threshold << O_TX_CONTROL__TxThreshold));
659
660 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
661
662 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
663 regval &= ~(1 << O_RX_CONTROL__RxEnable);
664 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
665
666 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
667 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
668 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
669 }
670}
671
672/**********************************************************************
673 **********************************************************************/
674static __inline__ int
675xlr_mac_send_fr(struct driver_data *priv,
676 vm_paddr_t addr, int len)
677{
678 struct msgrng_msg msg;
679 int stid = priv->rfrbucket;
680 int code, ret;
681 uint32_t msgrng_flags;
682#ifdef INVARIANTS
683 int i = 0;
684#endif
685
686 mac_make_desc_rfr(&msg, addr);
687
688 /* Send the packet to MAC */
689 dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
690 priv->instance, (u_long)addr, stid);
691 if (priv->type == XLR_XGMAC)
692 code = MSGRNG_CODE_XGMAC; /* WHY? */
693 else
694 code = MSGRNG_CODE_MAC;
695
696 do {
697 msgrng_flags = msgrng_access_enable();
698 ret = message_send(1, code, stid, &msg);
699 msgrng_restore(msgrng_flags);
700 KASSERT(i++ < 100000, ("Too many credit fails\n"));
701 } while (ret != 0);
702
703 return 0;
704}
705
706/**************************************************************/
707
708static void
709xgmac_mdio_setup(volatile unsigned int *_mmio)
710{
711 int i;
712 uint32_t rd_data;
713
714 for (i = 0; i < 4; i++) {
715 rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
716 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */
717 xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
718 }
719}
720
721/**********************************************************************
722 * Init MII interface
723 *
724 * Input parameters:
725 * s - priv structure
726 ********************************************************************* */
727#define PHY_STATUS_RETRIES 25000
728
729static void
730rmi_xlr_mac_mii_init(struct driver_data *priv)
731{
732 xlr_reg_t *mii_mmio = priv->mii_mmio;
733
734 /* use the lowest clock divisor - divisor 28 */
735 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
736}
737
738/**********************************************************************
739 * Read a PHY register.
740 *
741 * Input parameters:
742 * s - priv structure
743 * phyaddr - PHY's address
744 * regidx = index of register to read
745 *
746 * Return value:
747 * value read, or 0 if an error occurred.
748 ********************************************************************* */
749
750static int
751rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
752{
753 int i = 0;
754
755 /* setup the phy reg to be used */
756 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
757 (phyaddr << 8) | (regidx << 0));
758 /* Issue the read command */
759 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
760 (1 << O_MII_MGMT_COMMAND__rstat));
761
762 /* poll for the read cycle to complete */
763 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
764 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
765 break;
766 }
767
768 /* clear the read cycle */
769 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
770
771 if (i == PHY_STATUS_RETRIES) {
772 return 0xffffffff;
773 }
774 /* Read the data back */
775 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
776}
777
778static int
779rge_mii_read(device_t dev, int phyaddr, int regidx)
780{
781 struct rge_softc *sc = device_get_softc(dev);
782
783 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
784}
785
786/**********************************************************************
787 * Set MII hooks to newly selected media
788 *
789 * Input parameters:
790 * ifp - Interface Pointer
791 *
792 * Return value:
793 * nothing
794 ********************************************************************* */
795static int
796rmi_xlr_mac_mediachange(struct ifnet *ifp)
797{
798 struct rge_softc *sc = ifp->if_softc;
799
800 if (ifp->if_flags & IFF_UP)
801 mii_mediachg(&sc->rge_mii);
802
803 return 0;
804}
805
806/**********************************************************************
807 * Get the current interface media status
808 *
809 * Input parameters:
810 * ifp - Interface Pointer
811 * ifmr - Interface media request ptr
812 *
813 * Return value:
814 * nothing
815 ********************************************************************* */
816static void
817rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
818{
819 struct rge_softc *sc = ifp->if_softc;
820
821 /* Check whether this is interface is active or not. */
822 ifmr->ifm_status = IFM_AVALID;
823 if (sc->link_up) {
824 ifmr->ifm_status |= IFM_ACTIVE;
825 } else {
826 ifmr->ifm_active = IFM_ETHER;
827 }
828}
829
830/**********************************************************************
831 * Write a value to a PHY register.
832 *
833 * Input parameters:
834 * s - priv structure
835 * phyaddr - PHY to use
836 * regidx - register within the PHY
837 * regval - data to write to register
838 *
839 * Return value:
840 * nothing
841 ********************************************************************* */
842static void
843rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
844{
845 int i = 0;
846
847 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
848 (phyaddr << 8) | (regidx << 0));
849
850 /* Write the data which starts the write cycle */
851 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
852
853 /* poll for the write cycle to complete */
854 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
855 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
856 break;
857 }
858
859 return;
860}
861
862static int
863rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
864{
865 struct rge_softc *sc = device_get_softc(dev);
866
867 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
868 return (0);
869}
870
871static void
872rmi_xlr_mac_mii_statchg(struct device *dev)
873{
874}
875
876static void
877serdes_regs_init(struct driver_data *priv)
878{
879 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
880
881 /* Initialize SERDES CONTROL Registers */
882 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
883 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
884 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
885 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
886 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
887 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
888 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
889 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
890 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
891 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
892 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
893
894 /*
895 * GPIO setting which affect the serdes - needs figuring out
896 */
897 DELAY(100);
898 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
899 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
900 DELAY(100);
901
902 /*
903 * This kludge is needed to setup serdes (?) clock correctly on some
904 * XLS boards
905 */
906 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
907 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
908 xlr_boot1_info.board_minor_version == 4) {
909 /* use 125 Mhz instead of 156.25Mhz ref clock */
910 DELAY(100);
911 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
912 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
913 DELAY(100);
914 }
915
916 return;
917}
918
919static void
920serdes_autoconfig(struct driver_data *priv)
921{
922 int delay = 100000;
923
924 /* Enable Auto negotiation in the PCS Layer */
925 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
926 DELAY(delay);
927 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
928 DELAY(delay);
929
930 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
931 DELAY(delay);
932 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
933 DELAY(delay);
934
935 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
936 DELAY(delay);
937 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
938 DELAY(delay);
939
940 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
941 DELAY(delay);
942 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
943 DELAY(delay);
944
945}
946
947/*****************************************************************
948 * Initialize GMAC
949 *****************************************************************/
950static void
951rmi_xlr_config_pde(struct driver_data *priv)
952{
953 int i = 0, cpu = 0, bucket = 0;
954 uint64_t bucket_map = 0;
955
956 /* uint32_t desc_pack_ctrl = 0; */
957 uint32_t cpumask;
958
959 cpumask = 0x1;
960#ifdef SMP
961 /*
962 * rge may be called before SMP start in a BOOTP/NFSROOT
963 * setup. we will distribute packets to other cpus only when
964 * the SMP is started.
965 */
966 if (smp_started)
967 cpumask = xlr_hw_thread_mask;
968#endif
969
970 for (i = 0; i < MAXCPU; i++) {
971 if (cpumask & (1 << i)) {
972 cpu = i;
973 bucket = ((cpu >> 2) << 3);
974 bucket_map |= (1ULL << bucket);
974 bucket_map |= (3ULL << bucket);
975 }
976 }
977 printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
978
979 /* bucket_map = 0x1; */
980 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
981 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
982 ((bucket_map >> 32) & 0xffffffff));
983
984 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
985 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
986 ((bucket_map >> 32) & 0xffffffff));
987
988 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
989 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
990 ((bucket_map >> 32) & 0xffffffff));
991
992 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
993 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
994 ((bucket_map >> 32) & 0xffffffff));
995}
996
997static void
998rge_smp_update_pde(void *dummy __unused)
999{
1000 int i;
1001 struct driver_data *priv;
1002 struct rge_softc *sc;
1003
1004 printf("Updating packet distribution for SMP\n");
1005 for (i = 0; i < XLR_MAX_MACS; i++) {
1006 sc = dev_mac[i];
1007 if (!sc)
1008 continue;
1009 priv = &(sc->priv);
1010 rmi_xlr_mac_set_enable(priv, 0);
1011 rmi_xlr_config_pde(priv);
1012 rmi_xlr_mac_set_enable(priv, 1);
1013 }
1014}
1015
1016SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1017
1018
1019static void
1020rmi_xlr_config_parser(struct driver_data *priv)
1021{
1022 /*
1023 * Mark it as no classification The parser extract is gauranteed to
1024 * be zero with no classfication
1025 */
1026 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1027
1028 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1029
1030 /* configure the parser : L2 Type is configured in the bootloader */
1031 /* extract IP: src, dest protocol */
1032 xlr_write_reg(priv->mmio, R_L3CTABLE,
1033 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1034 (0x0800 << 0));
1035 xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1036 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1037
1038}
1039
1040static void
1041rmi_xlr_config_classifier(struct driver_data *priv)
1042{
1043 int i = 0;
1044
1045 if (priv->type == XLR_XGMAC) {
1046 /* xgmac translation table doesn't have sane values on reset */
1047 for (i = 0; i < 64; i++)
1048 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1049
1050 /*
1051 * use upper 7 bits of the parser extract to index the
1052 * translate table
1053 */
1054 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1055 }
1056}
1057
1058enum {
1059 SGMII_SPEED_10 = 0x00000000,
1060 SGMII_SPEED_100 = 0x02000000,
1061 SGMII_SPEED_1000 = 0x04000000,
1062};
1063
1064static void
1065rmi_xlr_gmac_config_speed(struct driver_data *priv)
1066{
1067 int phy_addr = priv->phy_addr;
1068 xlr_reg_t *mmio = priv->mmio;
1069 struct rge_softc *sc = priv->sc;
1070
1071 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1072 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1073 priv->speed = (priv->speed >> 3) & 0x03;
1074
1075 if (priv->speed == xlr_mac_speed_10) {
1076 if (priv->mode != XLR_RGMII)
1077 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1078 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1079 xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1080 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1081 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1082 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1083 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1084 } else if (priv->speed == xlr_mac_speed_100) {
1085 if (priv->mode != XLR_RGMII)
1086 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1087 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1088 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1089 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1090 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1091 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1092 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1093 } else {
1094 if (priv->speed != xlr_mac_speed_1000) {
1095 if (priv->mode != XLR_RGMII)
1096 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1097 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1098 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1099 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1100 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1101 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1102 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1103 } else {
1104 if (priv->mode != XLR_RGMII)
1105 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1106 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1107 xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1108 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1109 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1110 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1111 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1112 }
1113 }
1114
1115 if (!priv->link) {
1116 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1117 sc->link_up = 0;
1118 } else {
1119 sc->link_up = 1;
1120 }
1121}
1122
1123/*****************************************************************
1124 * Initialize XGMAC
1125 *****************************************************************/
1126static void
1127rmi_xlr_xgmac_init(struct driver_data *priv)
1128{
1129 int i = 0;
1130 xlr_reg_t *mmio = priv->mmio;
1131 int id = priv->instance;
1132 struct rge_softc *sc = priv->sc;
1133 volatile unsigned short *cpld;
1134
1135 cpld = (volatile unsigned short *)0xBD840000;
1136
1137 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1138 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1139 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1140 rmi_xlr_config_pde(priv);
1141 rmi_xlr_config_parser(priv);
1142 rmi_xlr_config_classifier(priv);
1143
1144 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1145
1146 /* configure the XGMAC Registers */
1147 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1148
1149 /* configure the XGMAC_GLUE Registers */
1150 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1151 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1152 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1153 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1154 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1155 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1156
1157 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1158 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1159 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1160 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1161
1162 /*
1163 * take XGMII phy out of reset
1164 */
1165 /*
1166 * we are pulling everything out of reset because writing a 0 would
1167 * reset other devices on the chip
1168 */
1169 cpld[ATX_CPLD_RESET_1] = 0xffff;
1170 cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1171 cpld[ATX_CPLD_RESET_2] = 0xffff;
1172
1173 xgmac_mdio_setup(mmio);
1174
1175 rmi_xlr_config_spill_area(priv);
1176
1177 if (id == 0) {
1178 for (i = 0; i < 16; i++) {
1179 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1180 bucket_sizes.
1181 bucket[MSGRNG_STNID_XGS0_TX + i]);
1182 }
1183
1184 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1185 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1186 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1187 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1188
1189 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1190 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1191 cc_table_xgs_0.
1192 counters[i >> 3][i & 0x07]);
1193 }
1194 } else if (id == 1) {
1195 for (i = 0; i < 16; i++) {
1196 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1197 bucket_sizes.
1198 bucket[MSGRNG_STNID_XGS1_TX + i]);
1199 }
1200
1201 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1202 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1203 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1204 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1205
1206 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1207 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1208 cc_table_xgs_1.
1209 counters[i >> 3][i & 0x07]);
1210 }
1211 }
1212 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1213 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1214 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1215 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1216 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1217
1218 priv->init_frin_desc = 1;
1219}
1220
1221/*******************************************************
1222 * Initialization gmac
1223 *******************************************************/
1224static int
1225rmi_xlr_gmac_reset(struct driver_data *priv)
1226{
1227 volatile uint32_t val;
1228 xlr_reg_t *mmio = priv->mmio;
1229 int i, maxloops = 100;
1230
1231 /* Disable MAC RX */
1232 val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1233 val &= ~0x4;
1234 xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1235
1236 /* Disable Core RX */
1237 val = xlr_read_reg(mmio, R_RX_CONTROL);
1238 val &= ~0x1;
1239 xlr_write_reg(mmio, R_RX_CONTROL, val);
1240
1241 /* wait for rx to halt */
1242 for (i = 0; i < maxloops; i++) {
1243 val = xlr_read_reg(mmio, R_RX_CONTROL);
1244 if (val & 0x2)
1245 break;
1246 DELAY(1000);
1247 }
1248 if (i == maxloops)
1249 return -1;
1250
1251 /* Issue a soft reset */
1252 val = xlr_read_reg(mmio, R_RX_CONTROL);
1253 val |= 0x4;
1254 xlr_write_reg(mmio, R_RX_CONTROL, val);
1255
1256 /* wait for reset to complete */
1257 for (i = 0; i < maxloops; i++) {
1258 val = xlr_read_reg(mmio, R_RX_CONTROL);
1259 if (val & 0x8)
1260 break;
1261 DELAY(1000);
1262 }
1263 if (i == maxloops)
1264 return -1;
1265
1266 /* Clear the soft reset bit */
1267 val = xlr_read_reg(mmio, R_RX_CONTROL);
1268 val &= ~0x4;
1269 xlr_write_reg(mmio, R_RX_CONTROL, val);
1270 return 0;
1271}
1272
1273static void
1274rmi_xlr_gmac_init(struct driver_data *priv)
1275{
1276 int i = 0;
1277 xlr_reg_t *mmio = priv->mmio;
1278 int id = priv->instance;
1279 struct stn_cc *gmac_cc_config;
1280 uint32_t value = 0;
1281 int blk = id / 4, port = id % 4;
1282
1283 rmi_xlr_mac_set_enable(priv, 0);
1284
1285 rmi_xlr_config_spill_area(priv);
1286
1287 xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1288 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1289 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1290 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1291
1292 rmi_xlr_config_pde(priv);
1293 rmi_xlr_config_parser(priv);
1294 rmi_xlr_config_classifier(priv);
1295
1296 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1297 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1298 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1299
1300 if (priv->mode == XLR_PORT0_RGMII) {
1301 printf("Port 0 set in RGMII mode\n");
1302 value = xlr_read_reg(mmio, R_RX_CONTROL);
1303 value |= 1 << O_RX_CONTROL__RGMII;
1304 xlr_write_reg(mmio, R_RX_CONTROL, value);
1305 }
1306 rmi_xlr_mac_mii_init(priv);
1307
1308
1309#if 0
1310 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1311 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1312 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1313 ADVERTISED_MII;
1314#endif
1315
1316 /*
1317 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1318 * set about every 1 sec in GigE mode, ignore it for now...
1319 */
1320 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1321
1322 if (priv->mode != XLR_RGMII) {
1323 serdes_regs_init(priv);
1324 serdes_autoconfig(priv);
1325 }
1326 rmi_xlr_gmac_config_speed(priv);
1327
1328 value = xlr_read_reg(mmio, R_IPG_IFG);
1329 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1330 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1331 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1332 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1333 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1334 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1335 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1336 xlr_write_reg(mmio, R_INTMASK, 0);
1337 xlr_write_reg(mmio, R_FREEQCARVE, 0);
1338
1339 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1340 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1341 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1342 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1343 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1344 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1345 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1346 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1347 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1348 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1349
1350 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1351 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1352
1353 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1354 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1355 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1356 gmac_cc_config->counters[i >> 3][i & 0x07]);
1357 dbg_msg("%d: %d -> %d\n", priv->instance,
1358 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1359 }
1360 priv->init_frin_desc = 1;
1361}
1362
1363/**********************************************************************
1364 * Set promiscuous mode
1365 **********************************************************************/
1366static void
1367xlr_mac_set_rx_mode(struct rge_softc *sc)
1368{
1369 struct driver_data *priv = &(sc->priv);
1370 uint32_t regval;
1371
1372 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1373
1374 if (sc->flags & IFF_PROMISC) {
1375 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1376 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1377 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1378 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1379 } else {
1380 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1381 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1382 }
1383
1384 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1385}
1386
1387/**********************************************************************
1388 * Configure LAN speed for the specified MAC.
1389 ********************************************************************* */
1390static int
1391rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1392{
1393 return 0;
1394}
1395
1396/**********************************************************************
1397 * Set Ethernet duplex and flow control options for this MAC
1398 ********************************************************************* */
1399static int
1400rmi_xlr_mac_set_duplex(struct driver_data *s,
1401 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1402{
1403 return 0;
1404}
1405
1406/*****************************************************************
1407 * Kernel Net Stack <-> MAC Driver Interface
1408 *****************************************************************/
1409/**********************************************************************
1410 **********************************************************************/
1411#define MAC_TX_FAIL 2
1412#define MAC_TX_PASS 0
1413#define MAC_TX_RETRY 1
1414
1415int xlr_dev_queue_xmit_hack = 0;
1416
1417static int
1418mac_xmit(struct mbuf *m, struct rge_softc *sc,
1419 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1420{
1421 struct msgrng_msg msg = {0,0,0,0};
1422 int stid = priv->txbucket;
1423 uint32_t tx_cycles = 0;
1424 uint32_t mflags;
1425 int vcpu = xlr_cpu_id();
1426 int rv;
1427
1428 tx_cycles = mips_rd_count();
1429
1430 if (build_frag_list(m, &msg, tx_desc) != 0)
1431 return MAC_TX_FAIL;
1432
1433 else {
1434 mflags = msgrng_access_enable();
1435 if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1436 msg_snd_failed++;
1437 msgrng_restore(mflags);
1438 release_tx_desc(&msg, 0);
1439 xlr_rge_msg_snd_failed[vcpu]++;
1440 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1441 vcpu, rv, stid, (uintmax_t)msg.msg0);
1442 return MAC_TX_FAIL;
1443 }
1444 msgrng_restore(mflags);
1445 port_inc_counter(priv->instance, PORT_TX);
1446 }
1447
1448 /* Send the packet to MAC */
1449 dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1450 (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1451#ifdef DUMP_PACKETS
1452 {
1453 int i = 0;
1454 unsigned char *buf = (char *)m->m_data;
1455
1456 printf("Tx Packet: length=%d\n", len);
1457 for (i = 0; i < 64; i++) {
1458 if (i && (i % 16) == 0)
1459 printf("\n");
1460 printf("%02x ", buf[i]);
1461 }
1462 printf("\n");
1463 }
1464#endif
1465 xlr_inc_counter(NETIF_TX);
1466 return MAC_TX_PASS;
1467}
1468
1469static int
1470rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1471{
1472 struct driver_data *priv = &(sc->priv);
1473 int ret = -ENOSPC;
1474
1475 dbg_msg("IN\n");
1476
1477 xlr_inc_counter(NETIF_STACK_TX);
1478
1479retry:
1480 ret = mac_xmit(m, sc, priv, len, tx_desc);
1481
1482 if (ret == MAC_TX_RETRY)
1483 goto retry;
1484
1485 dbg_msg("OUT, ret = %d\n", ret);
1486 if (ret == MAC_TX_FAIL) {
1487 /* FULL */
1488 dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1489 port_inc_counter(priv->instance, PORT_STOPQ);
1490 }
1491 return ret;
1492}
1493
1494static void
1495mac_frin_replenish(void *args /* ignored */ )
1496{
1497 int cpu = xlr_core_id();
1498 int done = 0;
1499 int i = 0;
1500
1501 xlr_inc_counter(REPLENISH_ENTER);
1502 /*
1503 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1504 * atomic_read(frin_to_be_sent));
1505 */
1506 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1507
1508 for (;;) {
1509
1510 done = 0;
1511
1512 for (i = 0; i < XLR_MAX_MACS; i++) {
1513 /* int offset = 0; */
1514 void *m;
1515 uint32_t cycles;
1516 struct rge_softc *sc;
1517 struct driver_data *priv;
1518 int frin_to_be_sent;
1519
1520 sc = dev_mac[i];
1521 if (!sc)
1522 goto skip;
1523
1524 priv = &(sc->priv);
1525 frin_to_be_sent = priv->frin_to_be_sent[cpu];
1526
1527 /* if (atomic_read(frin_to_be_sent) < 0) */
1528 if (frin_to_be_sent < 0) {
1529 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1530 __FUNCTION__, i,
1531 frin_to_be_sent);
1532 }
1533 /* if (!atomic_read(frin_to_be_sent)) */
1534 if (!frin_to_be_sent)
1535 goto skip;
1536
1537 cycles = mips_rd_count();
1538 {
1539 m = get_buf();
1540 if (!m) {
1541 device_printf(sc->rge_dev, "No buffer\n");
1542 goto skip;
1543 }
1544 }
1545 xlr_inc_counter(REPLENISH_FRIN);
1546 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1547 free_buf(vtophys(m));
1548 printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1549 break;
1550 }
1551 xlr_set_counter(REPLENISH_CYCLES,
1552 (read_c0_count() - cycles));
1553 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1554
1555 continue;
1556 skip:
1557 done++;
1558 }
1559 if (done == XLR_MAX_MACS)
1560 break;
1561 }
1562}
1563
1564static volatile uint32_t g_tx_frm_tx_ok=0;
1565
1566static void
1567rge_tx_bkp_func(void *arg, int npending)
1568{
1569 int i = 0;
1570
1571 for (i = 0; i < xlr_board_info.gmacports; i++) {
1572 if (!dev_mac[i] || !dev_mac[i]->active)
1573 continue;
1574 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1575 }
1576 atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1577}
1578
1579/* This function is called from an interrupt handler */
1580void
1581rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1582 int stid, struct msgrng_msg *msg,
1583 void *data /* ignored */ )
1584{
1585 uint64_t phys_addr = 0;
1586 unsigned long addr = 0;
1587 uint32_t length = 0;
1588 int ctrl = 0, port = 0;
1589 struct rge_softc *sc = NULL;
1590 struct driver_data *priv = 0;
1591 struct ifnet *ifp;
1592 int vcpu = xlr_cpu_id();
1593 int cpu = xlr_core_id();
1594
1595 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1596 bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1597
1598 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1599 length = (msg->msg0 >> 40) & 0x3fff;
1600 if (length == 0) {
1601 ctrl = CTRL_REG_FREE;
1602 port = (msg->msg0 >> 54) & 0x0f;
1603 addr = 0;
1604 } else {
1605 ctrl = CTRL_SNGL;
1606 length = length - BYTE_OFFSET - MAC_CRC_LEN;
1607 port = msg->msg0 & 0x0f;
1608 addr = 0;
1609 }
1610
1611 if (xlr_board_info.is_xls) {
1612 if (stid == MSGRNG_STNID_GMAC1)
1613 port += 4;
1614 sc = dev_mac[dev_mac_gmac0 + port];
1615 } else {
1616 if (stid == MSGRNG_STNID_XGS0FR)
1617 sc = dev_mac[dev_mac_xgs0];
1618 else if (stid == MSGRNG_STNID_XGS1FR)
1619 sc = dev_mac[dev_mac_xgs0 + 1];
1620 else
1621 sc = dev_mac[dev_mac_gmac0 + port];
1622 }
1623 if (sc == NULL)
1624 return;
1625 priv = &(sc->priv);
1626
1627 dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1628 (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1629
1630 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1631 xlr_rge_tx_ok_done[vcpu]++;
1632 release_tx_desc(msg, 1);
1633 ifp = sc->rge_ifp;
1634 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1635 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1636 }
1637 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1638 rge_tx_bkp_func(NULL, 0);
1639 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1640 (read_c0_count() - msgrng_msg_cycles));
1641 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1642 /* Rx Packet */
1643 /* struct mbuf *m = 0; */
1644 /* int logical_cpu = 0; */
1645
1646 dbg_msg("Received packet, port = %d\n", port);
1647 /*
1648 * if num frins to be sent exceeds threshold, wake up the
1649 * helper thread
1650 */
1651 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1652 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1653 mac_frin_replenish(NULL);
1654 }
1655 dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1656 priv->instance, (uintmax_t)phys_addr, length);
1657 mac_stats_add(priv->stats.rx_packets, 1);
1658 mac_stats_add(priv->stats.rx_bytes, length);
1659 xlr_inc_counter(NETIF_RX);
1660 xlr_set_counter(NETIF_RX_CYCLES,
1661 (read_c0_count() - msgrng_msg_cycles));
1662 rge_rx(sc, phys_addr, length);
1663 xlr_rge_rx_done[vcpu]++;
1664 } else {
1665 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1666 }
1667
1668}
1669
1670/**********************************************************************
1671 **********************************************************************/
1672static int
1673rge_probe(dev)
1674 device_t dev;
1675{
1676 device_set_desc(dev, "RMI Gigabit Ethernet");
1677
1678 /* Always return 0 */
1679 return 0;
1680}
1681
1682volatile unsigned long xlr_debug_enabled;
1683struct callout rge_dbg_count;
1684static void
1685xlr_debug_count(void *addr)
1686{
1687 struct driver_data *priv = &dev_mac[0]->priv;
1688
1689 /* uint32_t crdt; */
1690 if (xlr_debug_enabled) {
1691 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1692 }
1693 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1694}
1695
1696
1697static void
1698xlr_tx_q_wakeup(void *addr)
1699{
1700 int i = 0;
1701 int j = 0;
1702
1703 for (i = 0; i < xlr_board_info.gmacports; i++) {
1704 if (!dev_mac[i] || !dev_mac[i]->active)
1705 continue;
1706 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1707 for (j = 0; j < XLR_MAX_CORE; j++) {
1708 if (xlr_tot_avail_p2d[j]) {
1709 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1710 break;
1711 }
1712 }
1713 }
1714 }
1715 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1716 rge_tx_bkp_func(NULL, 0);
1717 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1718}
1719
1720static int
1721rge_attach(device_t dev)
1722{
1723 struct ifnet *ifp;
1724 struct rge_softc *sc;
1725 struct driver_data *priv = 0;
1726 int ret = 0;
1727 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1728
1729 sc = device_get_softc(dev);
1730 sc->rge_dev = dev;
1731
1732 /* Initialize mac's */
1733 sc->unit = device_get_unit(dev);
1734
1735 if (sc->unit > XLR_MAX_MACS) {
1736 ret = ENXIO;
1737 goto out;
1738 }
1739 RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1740
1741 priv = &(sc->priv);
1742 priv->sc = sc;
1743
1744 sc->flags = 0; /* TODO : fix me up later */
1745
1746 priv->id = sc->unit;
1747 if (gmac_conf->type == XLR_GMAC) {
1748 priv->instance = priv->id;
1749 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1750 0x1000 * (sc->unit % 4));
1751 if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1752 goto out;
1753 } else if (gmac_conf->type == XLR_XGMAC) {
1754 priv->instance = priv->id - xlr_board_info.gmacports;
1755 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1756 }
1757 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1758 (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1759 priv->instance >=4)) {
1760 dbg_msg("Arizona board - offset 4 \n");
1761 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1762 } else
1763 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1764
1765 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1766 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1767
1768 sc->base_addr = (unsigned long)priv->mmio;
1769 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1770
1771 sc->xmit = rge_start;
1772 sc->stop = rge_stop;
1773 sc->get_stats = rmi_xlr_mac_get_stats;
1774 sc->ioctl = rge_ioctl;
1775
1776 /* Initialize the device specific driver data */
1777 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1778
1779 priv->type = gmac_conf->type;
1780
1781 priv->mode = gmac_conf->mode;
1782 if (xlr_board_info.is_xls == 0) {
1783 /* TODO - check II and IIB boards */
1784 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1785 xlr_boot1_info.board_minor_version != 1)
1786 priv->phy_addr = priv->instance - 2;
1787 else
1788 priv->phy_addr = priv->instance;
1789 priv->mode = XLR_RGMII;
1790 } else {
1791 if (gmac_conf->mode == XLR_PORT0_RGMII &&
1792 priv->instance == 0) {
1793 priv->mode = XLR_PORT0_RGMII;
1794 priv->phy_addr = 0;
1795 } else {
1796 priv->mode = XLR_SGMII;
1797 /* Board 11 has SGMII daughter cards with the XLS chips, in this case
1798 the phy number is 0-3 for both GMAC blocks */
1799 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1800 priv->phy_addr = priv->instance % 4 + 16;
1801 else
1802 priv->phy_addr = priv->instance + 16;
1803 }
1804 }
1805
1806 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1807 priv->rfrbucket = gmac_conf->station_rfr;
1808 priv->spill_configured = 0;
1809
1810 dbg_msg("priv->mmio=%p\n", priv->mmio);
1811
1812 /* Set up ifnet structure */
1813 ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1814 if (ifp == NULL) {
1815 device_printf(sc->rge_dev, "failed to if_alloc()\n");
1816 rge_release_resources(sc);
1817 ret = ENXIO;
1818 RGE_LOCK_DESTROY(sc);
1819 goto out;
1820 }
1821 ifp->if_softc = sc;
1822 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1823 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1824 ifp->if_ioctl = rge_ioctl;
1825 ifp->if_start = rge_start;
1826 ifp->if_init = rge_init;
1827 ifp->if_mtu = ETHERMTU;
1828 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1829 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1830 IFQ_SET_READY(&ifp->if_snd);
1831 sc->active = 1;
1832 ifp->if_hwassist = 0;
1833 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1834 ifp->if_capenable = ifp->if_capabilities;
1835
1836 /* Initialize the rge_softc */
1837 sc->irq = gmac_conf->baseirq + priv->instance % 4;
1838
1839 /* Set the IRQ into the rid field */
1840 /*
1841 * note this is a hack to pass the irq to the iodi interrupt setup
1842 * routines
1843 */
1844 sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1845
1846 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1847 NULL, rge_intr, sc, &sc->rge_intrhand);
1848
1849 if (ret) {
1850 rge_detach(dev);
1851 device_printf(sc->rge_dev, "couldn't set up irq\n");
1852 RGE_LOCK_DESTROY(sc);
1853 goto out;
1854 }
1855 xlr_mac_get_hwaddr(sc);
1856 xlr_mac_setup_hwaddr(priv);
1857
1858 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1859 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1860 (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1861 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1862 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1863
1864 /*
1865 * Set up ifmedia support.
1866 */
1867 /*
1868 * Initialize MII/media info.
1869 */
1870 sc->rge_mii.mii_ifp = ifp;
1871 sc->rge_mii.mii_readreg = rge_mii_read;
1872 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1873 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1874 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1875 rmi_xlr_mac_mediastatus);
1876 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1877 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1878 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1879
1880 /*
1881 * Call MI attach routine.
1882 */
1883 ether_ifattach(ifp, sc->dev_addr);
1884
1885 if (priv->type == XLR_GMAC) {
1886 rmi_xlr_gmac_init(priv);
1887 } else if (priv->type == XLR_XGMAC) {
1888 rmi_xlr_xgmac_init(priv);
1889 }
1890 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1891 sc->unit, priv->mmio, sc->mtu);
1892 dev_mac[sc->unit] = sc;
1893 if (priv->type == XLR_XGMAC && priv->instance == 0)
1894 dev_mac_xgs0 = sc->unit;
1895 if (priv->type == XLR_GMAC && priv->instance == 0)
1896 dev_mac_gmac0 = sc->unit;
1897
1898 if (!gmac_common_init_done) {
1899 mac_common_init();
1900 gmac_common_init_done = 1;
1901 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1902 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1903 callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1904 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1905 }
1906 if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1907 RGE_LOCK_DESTROY(sc);
1908 goto out;
1909 }
1910out:
1911 if (ret < 0) {
1912 device_printf(dev, "error - skipping\n");
1913 }
1914 return ret;
1915}
1916
1917static void
1918rge_reset(struct rge_softc *sc)
1919{
1920}
1921
1922static int
1923rge_detach(dev)
1924 device_t dev;
1925{
1926#ifdef FREEBSD_MAC_NOT_YET
1927 struct rge_softc *sc;
1928 struct ifnet *ifp;
1929
1930 sc = device_get_softc(dev);
1931 ifp = sc->rge_ifp;
1932
1933 RGE_LOCK(sc);
1934 rge_stop(sc);
1935 rge_reset(sc);
1936 RGE_UNLOCK(sc);
1937
1938 ether_ifdetach(ifp);
1939
1940 if (sc->rge_tbi) {
1941 ifmedia_removeall(&sc->rge_ifmedia);
1942 } else {
1943 bus_generic_detach(dev);
1944 device_delete_child(dev, sc->rge_miibus);
1945 }
1946
1947 rge_release_resources(sc);
1948
1949#endif /* FREEBSD_MAC_NOT_YET */
1950 return (0);
1951}
1952static int
1953rge_suspend(device_t dev)
1954{
1955 struct rge_softc *sc;
1956
1957 sc = device_get_softc(dev);
1958 RGE_LOCK(sc);
1959 rge_stop(sc);
1960 RGE_UNLOCK(sc);
1961
1962 return 0;
1963}
1964
1965static int
1966rge_resume(device_t dev)
1967{
1968 panic("rge_resume(): unimplemented\n");
1969 return 0;
1970}
1971
1972static void
1973rge_release_resources(struct rge_softc *sc)
1974{
1975
1976 if (sc->rge_ifp != NULL)
1977 if_free(sc->rge_ifp);
1978
1979 if (mtx_initialized(&sc->rge_mtx)) /* XXX */
1980 RGE_LOCK_DESTROY(sc);
1981}
1982uint32_t gmac_rx_fail[32];
1983uint32_t gmac_rx_pass[32];
1984
1985static void
1986rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1987{
1988 struct mbuf *m;
1989 struct ifnet *ifp = sc->rge_ifp;
1990 uint64_t mag;
1991 uint32_t sr;
1992 /*
1993 * On 32 bit machines we use XKPHYS to get the values stores with
1994 * the mbuf, need to explicitly enable KX. Disable interrupts while
1995 * KX is enabled to prevent this setting leaking to other code.
1996 */
1997 sr = xlr_enable_kx();
1998 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1999 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
2000 xlr_restore_kx(sr);
2001 if (mag != 0xf00bad) {
2002 /* somebody else packet Error - FIXME in intialization */
2003 printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2004 xlr_cpu_id(), (void *)paddr);
2005 return;
2006 }
2007 /* align the data */
2008 m->m_data += BYTE_OFFSET;
2009 m->m_pkthdr.len = m->m_len = len;
2010 m->m_pkthdr.rcvif = ifp;
2011
2012#ifdef DUMP_PACKETS
2013 {
2014 int i = 0;
2015 unsigned char *buf = (char *)m->m_data;
2016
2017 printf("Rx Packet: length=%d\n", len);
2018 for (i = 0; i < 64; i++) {
2019 if (i && (i % 16) == 0)
2020 printf("\n");
2021 printf("%02x ", buf[i]);
2022 }
2023 printf("\n");
2024 }
2025#endif
2026 ifp->if_ipackets++;
2027 (*ifp->if_input) (ifp, m);
2028}
2029
2030static void
2031rge_intr(void *arg)
2032{
2033 struct rge_softc *sc = (struct rge_softc *)arg;
2034 struct driver_data *priv = &(sc->priv);
2035 xlr_reg_t *mmio = priv->mmio;
2036 uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2037
2038 if (intreg & (1 << O_INTREG__MDInt)) {
2039 uint32_t phy_int_status = 0;
2040 int i = 0;
2041
2042 for (i = 0; i < XLR_MAX_MACS; i++) {
2043 struct rge_softc *phy_dev = 0;
2044 struct driver_data *phy_priv = 0;
2045
2046 phy_dev = dev_mac[i];
2047 if (phy_dev == NULL)
2048 continue;
2049
2050 phy_priv = &phy_dev->priv;
2051
2052 if (phy_priv->type == XLR_XGMAC)
2053 continue;
2054
2055 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2056 phy_priv->phy_addr, 26);
2057 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2058 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2059 rmi_xlr_gmac_config_speed(phy_priv);
2060 }
2061 } else {
2062 printf("[%s]: mac type = %d, instance %d error "
2063 "interrupt: INTREG = 0x%08x\n",
2064 __FUNCTION__, priv->type, priv->instance, intreg);
2065 }
2066
2067 /* clear all interrupts and hope to make progress */
2068 xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2069
2070 /* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2071 if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2072 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2073 struct driver_data *xgs0_priv = &xgs0_dev->priv;
2074 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2075 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2076
2077 if (xgs0_intreg) {
2078 printf("[%s]: mac type = %d, instance %d error "
2079 "interrupt: INTREG = 0x%08x\n",
2080 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2081
2082 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2083 }
2084 }
2085}
2086
2087static void
2088rge_start_locked(struct ifnet *ifp, int threshold)
2089{
2090 struct rge_softc *sc = ifp->if_softc;
2091 struct mbuf *m = NULL;
2092 int prepend_pkt = 0;
2093 int i = 0;
2094 struct p2d_tx_desc *tx_desc = NULL;
2095 int cpu = xlr_core_id();
2096 uint32_t vcpu = xlr_cpu_id();
2097
2098 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2099 return;
2100
2101 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2102 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2103 return;
2104 tx_desc = get_p2d_desc();
2105 if (!tx_desc) {
2106 xlr_rge_get_p2d_failed[vcpu]++;
2107 return;
2108 }
2109 /* Grab a packet off the queue. */
2110 IFQ_DEQUEUE(&ifp->if_snd, m);
2111 if (m == NULL) {
2112 free_p2d_desc(tx_desc);
2113 return;
2114 }
2115 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2116
2117 if (prepend_pkt) {
2118 xlr_rge_tx_prepend[vcpu]++;
2119 IF_PREPEND(&ifp->if_snd, m);
2120 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2121 return;
2122 } else {
2123 ifp->if_opackets++;
2124 xlr_rge_tx_done[vcpu]++;
2125 }
2126 }
2127}
2128
2129static void
2130rge_start(struct ifnet *ifp)
2131{
2132 rge_start_locked(ifp, RGE_TX_Q_SIZE);
2133}
2134
2135static int
2136rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2137{
2138 struct rge_softc *sc = ifp->if_softc;
2139 struct ifreq *ifr = (struct ifreq *)data;
2140 int mask, error = 0;
2141
2142 /* struct mii_data *mii; */
2143 switch (command) {
2144 case SIOCSIFMTU:
2145 ifp->if_mtu = ifr->ifr_mtu;
2146 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2147 break;
2148 case SIOCSIFFLAGS:
2149
2150 RGE_LOCK(sc);
2151 if (ifp->if_flags & IFF_UP) {
2152 /*
2153 * If only the state of the PROMISC flag changed,
2154 * then just use the 'set promisc mode' command
2155 * instead of reinitializing the entire NIC. Doing a
2156 * full re-init means reloading the firmware and
2157 * waiting for it to start up, which may take a
2158 * second or two. Similarly for ALLMULTI.
2159 */
2160 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2161 ifp->if_flags & IFF_PROMISC &&
2162 !(sc->flags & IFF_PROMISC)) {
2163 sc->flags |= IFF_PROMISC;
2164 xlr_mac_set_rx_mode(sc);
2165 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2166 !(ifp->if_flags & IFF_PROMISC) &&
2167 sc->flags & IFF_PROMISC) {
2168 sc->flags &= IFF_PROMISC;
2169 xlr_mac_set_rx_mode(sc);
2170 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2171 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2172 rmi_xlr_mac_set_multicast_list(sc);
2173 } else
2174 xlr_mac_set_rx_mode(sc);
2175 } else {
2176 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2177 xlr_mac_set_rx_mode(sc);
2178 }
2179 }
2180 sc->flags = ifp->if_flags;
2181 RGE_UNLOCK(sc);
2182 error = 0;
2183 break;
2184 case SIOCADDMULTI:
2185 case SIOCDELMULTI:
2186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2187 RGE_LOCK(sc);
2188 rmi_xlr_mac_set_multicast_list(sc);
2189 RGE_UNLOCK(sc);
2190 error = 0;
2191 }
2192 break;
2193 case SIOCSIFMEDIA:
2194 case SIOCGIFMEDIA:
2195 error = ifmedia_ioctl(ifp, ifr,
2196 &sc->rge_mii.mii_media, command);
2197 break;
2198 case SIOCSIFCAP:
2199 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2200 ifp->if_hwassist = 0;
2201 break;
2202 default:
2203 error = ether_ioctl(ifp, command, data);
2204 break;
2205 }
2206
2207 return (error);
2208}
2209
2210static void
2211rge_init(void *addr)
2212{
2213 struct rge_softc *sc = (struct rge_softc *)addr;
2214 struct ifnet *ifp;
2215 struct driver_data *priv = &(sc->priv);
2216
2217 ifp = sc->rge_ifp;
2218
2219 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2220 return;
2221 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2222 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2223
2224 rmi_xlr_mac_set_enable(priv, 1);
2225}
2226
2227static void
2228rge_stop(struct rge_softc *sc)
2229{
2230 rmi_xlr_mac_close(sc);
2231}
2232
2233static int
2234rge_shutdown(device_t dev)
2235{
2236 struct rge_softc *sc;
2237
2238 sc = device_get_softc(dev);
2239
2240 RGE_LOCK(sc);
2241 rge_stop(sc);
2242 rge_reset(sc);
2243 RGE_UNLOCK(sc);
2244
2245 return (0);
2246}
2247
2248static int
2249rmi_xlr_mac_open(struct rge_softc *sc)
2250{
2251 struct driver_data *priv = &(sc->priv);
2252 int i;
2253
2254 dbg_msg("IN\n");
2255
2256 if (rmi_xlr_mac_fill_rxfr(sc)) {
2257 return -1;
2258 }
2259 mtx_lock_spin(&priv->lock);
2260
2261 xlr_mac_set_rx_mode(sc);
2262
2263 if (sc->unit == xlr_board_info.gmacports - 1) {
2264 printf("Enabling MDIO interrupts\n");
2265 struct rge_softc *tmp = NULL;
2266
2267 for (i = 0; i < xlr_board_info.gmacports; i++) {
2268 tmp = dev_mac[i];
2269 if (tmp)
2270 xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2271 ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2272 }
2273 }
2274 /*
2275 * Configure the speed, duplex, and flow control
2276 */
2277 rmi_xlr_mac_set_speed(priv, priv->speed);
2278 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2279 rmi_xlr_mac_set_enable(priv, 0);
2280
2281 mtx_unlock_spin(&priv->lock);
2282
2283 for (i = 0; i < 8; i++) {
2284 atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2285 }
2286
2287 return 0;
2288}
2289
2290/**********************************************************************
2291 **********************************************************************/
2292static int
2293rmi_xlr_mac_close(struct rge_softc *sc)
2294{
2295 struct driver_data *priv = &(sc->priv);
2296
2297 mtx_lock_spin(&priv->lock);
2298
2299 /*
2300 * There may have left over mbufs in the ring as well as in free in
2301 * they will be reused next time open is called
2302 */
2303
2304 rmi_xlr_mac_set_enable(priv, 0);
2305
2306 xlr_inc_counter(NETIF_STOP_Q);
2307 port_inc_counter(priv->instance, PORT_STOPQ);
2308
2309 mtx_unlock_spin(&priv->lock);
2310
2311 return 0;
2312}
2313
2314/**********************************************************************
2315 **********************************************************************/
2316static struct rge_softc_stats *
2317rmi_xlr_mac_get_stats(struct rge_softc *sc)
2318{
2319 struct driver_data *priv = &(sc->priv);
2320
2321 /* unsigned long flags; */
2322
2323 mtx_lock_spin(&priv->lock);
2324
2325 /* XXX update other stats here */
2326
2327 mtx_unlock_spin(&priv->lock);
2328
2329 return &priv->stats;
2330}
2331
2332/**********************************************************************
2333 **********************************************************************/
2334static void
2335rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2336{
2337}
2338
2339/**********************************************************************
2340 **********************************************************************/
2341static int
2342rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2343{
2344 struct driver_data *priv = &(sc->priv);
2345
2346 if ((new_mtu > 9500) || (new_mtu < 64)) {
2347 return -EINVAL;
2348 }
2349 mtx_lock_spin(&priv->lock);
2350
2351 sc->mtu = new_mtu;
2352
2353 /* Disable MAC TX/RX */
2354 rmi_xlr_mac_set_enable(priv, 0);
2355
2356 /* Flush RX FR IN */
2357 /* Flush TX IN */
2358 rmi_xlr_mac_set_enable(priv, 1);
2359
2360 mtx_unlock_spin(&priv->lock);
2361 return 0;
2362}
2363
2364/**********************************************************************
2365 **********************************************************************/
2366static int
2367rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2368{
2369 struct driver_data *priv = &(sc->priv);
2370 int i;
2371 int ret = 0;
2372 void *ptr;
2373
2374 dbg_msg("\n");
2375 if (!priv->init_frin_desc)
2376 return ret;
2377 priv->init_frin_desc = 0;
2378
2379 dbg_msg("\n");
2380 for (i = 0; i < MAX_NUM_DESC; i++) {
2381 ptr = get_buf();
2382 if (!ptr) {
2383 ret = -ENOMEM;
2384 break;
2385 }
2386 /* Send the free Rx desc to the MAC */
2387 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2388 }
2389
2390 return ret;
2391}
2392
2393/**********************************************************************
2394 **********************************************************************/
2395static __inline__ void *
2396rmi_xlr_config_spill(xlr_reg_t * mmio,
2397 int reg_start_0, int reg_start_1,
2398 int reg_size, int size)
2399{
2400 uint32_t spill_size = size;
2401 void *spill = NULL;
2402 uint64_t phys_addr = 0;
2403
2404
2405 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2406 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2407 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2408 panic("Unable to allocate memory for spill area!\n");
2409 }
2410 phys_addr = vtophys(spill);
2411 dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2412 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2413 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2414 xlr_write_reg(mmio, reg_size, spill_size);
2415
2416 return spill;
2417}
2418
2419static void
2420rmi_xlr_config_spill_area(struct driver_data *priv)
2421{
2422 /*
2423 * if driver initialization is done parallely on multiple cpus
2424 * spill_configured needs synchronization
2425 */
2426 if (priv->spill_configured)
2427 return;
2428
2429 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2430 priv->spill_configured = 1;
2431 return;
2432 }
2433 priv->spill_configured = 1;
2434
2435 priv->frin_spill =
2436 rmi_xlr_config_spill(priv->mmio,
2437 R_REG_FRIN_SPILL_MEM_START_0,
2438 R_REG_FRIN_SPILL_MEM_START_1,
2439 R_REG_FRIN_SPILL_MEM_SIZE,
2440 MAX_FRIN_SPILL *
2441 sizeof(struct fr_desc));
2442
2443 priv->class_0_spill =
2444 rmi_xlr_config_spill(priv->mmio,
2445 R_CLASS0_SPILL_MEM_START_0,
2446 R_CLASS0_SPILL_MEM_START_1,
2447 R_CLASS0_SPILL_MEM_SIZE,
2448 MAX_CLASS_0_SPILL *
2449 sizeof(union rx_tx_desc));
2450 priv->class_1_spill =
2451 rmi_xlr_config_spill(priv->mmio,
2452 R_CLASS1_SPILL_MEM_START_0,
2453 R_CLASS1_SPILL_MEM_START_1,
2454 R_CLASS1_SPILL_MEM_SIZE,
2455 MAX_CLASS_1_SPILL *
2456 sizeof(union rx_tx_desc));
2457
2458 priv->frout_spill =
2459 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2460 R_FROUT_SPILL_MEM_START_1,
2461 R_FROUT_SPILL_MEM_SIZE,
2462 MAX_FROUT_SPILL *
2463 sizeof(struct fr_desc));
2464
2465 priv->class_2_spill =
2466 rmi_xlr_config_spill(priv->mmio,
2467 R_CLASS2_SPILL_MEM_START_0,
2468 R_CLASS2_SPILL_MEM_START_1,
2469 R_CLASS2_SPILL_MEM_SIZE,
2470 MAX_CLASS_2_SPILL *
2471 sizeof(union rx_tx_desc));
2472 priv->class_3_spill =
2473 rmi_xlr_config_spill(priv->mmio,
2474 R_CLASS3_SPILL_MEM_START_0,
2475 R_CLASS3_SPILL_MEM_START_1,
2476 R_CLASS3_SPILL_MEM_SIZE,
2477 MAX_CLASS_3_SPILL *
2478 sizeof(union rx_tx_desc));
2479 priv->spill_configured = 1;
2480}
2481
2482/*****************************************************************
2483 * Write the MAC address to the XLR registers
2484 * All 4 addresses are the same for now
2485 *****************************************************************/
2486static void
2487xlr_mac_setup_hwaddr(struct driver_data *priv)
2488{
2489 struct rge_softc *sc = priv->sc;
2490
2491 xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2492 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2493 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2494 );
2495
2496 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2497 ((sc->dev_addr[1] << 24) | (sc->
2498 dev_addr[0] << 16)));
2499
2500 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2501
2502 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2503
2504 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2505
2506 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2507
2508 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2509 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2510 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2511 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2512 );
2513}
2514
2515/*****************************************************************
2516 * Read the MAC address from the XLR registers
2517 * All 4 addresses are the same for now
2518 *****************************************************************/
2519static void
2520xlr_mac_get_hwaddr(struct rge_softc *sc)
2521{
2522 struct driver_data *priv = &(sc->priv);
2523
2524 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2525 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2526 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2527 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2528 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2529 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2530}
2531
2532/*****************************************************************
2533 * Mac Module Initialization
2534 *****************************************************************/
2535static void
2536mac_common_init(void)
2537{
2538 init_p2d_allocation();
2539 init_tx_ring();
2540
2541 if (xlr_board_info.is_xls) {
975 }
976 }
977 printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
978
979 /* bucket_map = 0x1; */
980 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
981 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
982 ((bucket_map >> 32) & 0xffffffff));
983
984 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
985 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
986 ((bucket_map >> 32) & 0xffffffff));
987
988 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
989 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
990 ((bucket_map >> 32) & 0xffffffff));
991
992 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
993 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
994 ((bucket_map >> 32) & 0xffffffff));
995}
996
997static void
998rge_smp_update_pde(void *dummy __unused)
999{
1000 int i;
1001 struct driver_data *priv;
1002 struct rge_softc *sc;
1003
1004 printf("Updating packet distribution for SMP\n");
1005 for (i = 0; i < XLR_MAX_MACS; i++) {
1006 sc = dev_mac[i];
1007 if (!sc)
1008 continue;
1009 priv = &(sc->priv);
1010 rmi_xlr_mac_set_enable(priv, 0);
1011 rmi_xlr_config_pde(priv);
1012 rmi_xlr_mac_set_enable(priv, 1);
1013 }
1014}
1015
1016SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1017
1018
1019static void
1020rmi_xlr_config_parser(struct driver_data *priv)
1021{
1022 /*
1023 * Mark it as no classification The parser extract is gauranteed to
1024 * be zero with no classfication
1025 */
1026 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1027
1028 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1029
1030 /* configure the parser : L2 Type is configured in the bootloader */
1031 /* extract IP: src, dest protocol */
1032 xlr_write_reg(priv->mmio, R_L3CTABLE,
1033 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1034 (0x0800 << 0));
1035 xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1036 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1037
1038}
1039
1040static void
1041rmi_xlr_config_classifier(struct driver_data *priv)
1042{
1043 int i = 0;
1044
1045 if (priv->type == XLR_XGMAC) {
1046 /* xgmac translation table doesn't have sane values on reset */
1047 for (i = 0; i < 64; i++)
1048 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1049
1050 /*
1051 * use upper 7 bits of the parser extract to index the
1052 * translate table
1053 */
1054 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1055 }
1056}
1057
1058enum {
1059 SGMII_SPEED_10 = 0x00000000,
1060 SGMII_SPEED_100 = 0x02000000,
1061 SGMII_SPEED_1000 = 0x04000000,
1062};
1063
1064static void
1065rmi_xlr_gmac_config_speed(struct driver_data *priv)
1066{
1067 int phy_addr = priv->phy_addr;
1068 xlr_reg_t *mmio = priv->mmio;
1069 struct rge_softc *sc = priv->sc;
1070
1071 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1072 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1073 priv->speed = (priv->speed >> 3) & 0x03;
1074
1075 if (priv->speed == xlr_mac_speed_10) {
1076 if (priv->mode != XLR_RGMII)
1077 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1078 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1079 xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1080 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1081 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1082 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1083 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1084 } else if (priv->speed == xlr_mac_speed_100) {
1085 if (priv->mode != XLR_RGMII)
1086 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1087 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1088 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1089 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1090 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1091 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1092 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1093 } else {
1094 if (priv->speed != xlr_mac_speed_1000) {
1095 if (priv->mode != XLR_RGMII)
1096 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1097 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1098 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1099 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1100 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1101 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1102 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1103 } else {
1104 if (priv->mode != XLR_RGMII)
1105 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1106 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1107 xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1108 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1109 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1110 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1111 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1112 }
1113 }
1114
1115 if (!priv->link) {
1116 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1117 sc->link_up = 0;
1118 } else {
1119 sc->link_up = 1;
1120 }
1121}
1122
1123/*****************************************************************
1124 * Initialize XGMAC
1125 *****************************************************************/
1126static void
1127rmi_xlr_xgmac_init(struct driver_data *priv)
1128{
1129 int i = 0;
1130 xlr_reg_t *mmio = priv->mmio;
1131 int id = priv->instance;
1132 struct rge_softc *sc = priv->sc;
1133 volatile unsigned short *cpld;
1134
1135 cpld = (volatile unsigned short *)0xBD840000;
1136
1137 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1138 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1139 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1140 rmi_xlr_config_pde(priv);
1141 rmi_xlr_config_parser(priv);
1142 rmi_xlr_config_classifier(priv);
1143
1144 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1145
1146 /* configure the XGMAC Registers */
1147 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1148
1149 /* configure the XGMAC_GLUE Registers */
1150 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1151 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1152 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1153 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1154 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1155 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1156
1157 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1158 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1159 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1160 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1161
1162 /*
1163 * take XGMII phy out of reset
1164 */
1165 /*
1166 * we are pulling everything out of reset because writing a 0 would
1167 * reset other devices on the chip
1168 */
1169 cpld[ATX_CPLD_RESET_1] = 0xffff;
1170 cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1171 cpld[ATX_CPLD_RESET_2] = 0xffff;
1172
1173 xgmac_mdio_setup(mmio);
1174
1175 rmi_xlr_config_spill_area(priv);
1176
1177 if (id == 0) {
1178 for (i = 0; i < 16; i++) {
1179 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1180 bucket_sizes.
1181 bucket[MSGRNG_STNID_XGS0_TX + i]);
1182 }
1183
1184 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1185 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1186 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1187 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1188
1189 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1190 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1191 cc_table_xgs_0.
1192 counters[i >> 3][i & 0x07]);
1193 }
1194 } else if (id == 1) {
1195 for (i = 0; i < 16; i++) {
1196 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1197 bucket_sizes.
1198 bucket[MSGRNG_STNID_XGS1_TX + i]);
1199 }
1200
1201 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1202 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1203 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1204 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1205
1206 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1207 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1208 cc_table_xgs_1.
1209 counters[i >> 3][i & 0x07]);
1210 }
1211 }
1212 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1213 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1214 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1215 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1216 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1217
1218 priv->init_frin_desc = 1;
1219}
1220
1221/*******************************************************
1222 * Initialization gmac
1223 *******************************************************/
1224static int
1225rmi_xlr_gmac_reset(struct driver_data *priv)
1226{
1227 volatile uint32_t val;
1228 xlr_reg_t *mmio = priv->mmio;
1229 int i, maxloops = 100;
1230
1231 /* Disable MAC RX */
1232 val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1233 val &= ~0x4;
1234 xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1235
1236 /* Disable Core RX */
1237 val = xlr_read_reg(mmio, R_RX_CONTROL);
1238 val &= ~0x1;
1239 xlr_write_reg(mmio, R_RX_CONTROL, val);
1240
1241 /* wait for rx to halt */
1242 for (i = 0; i < maxloops; i++) {
1243 val = xlr_read_reg(mmio, R_RX_CONTROL);
1244 if (val & 0x2)
1245 break;
1246 DELAY(1000);
1247 }
1248 if (i == maxloops)
1249 return -1;
1250
1251 /* Issue a soft reset */
1252 val = xlr_read_reg(mmio, R_RX_CONTROL);
1253 val |= 0x4;
1254 xlr_write_reg(mmio, R_RX_CONTROL, val);
1255
1256 /* wait for reset to complete */
1257 for (i = 0; i < maxloops; i++) {
1258 val = xlr_read_reg(mmio, R_RX_CONTROL);
1259 if (val & 0x8)
1260 break;
1261 DELAY(1000);
1262 }
1263 if (i == maxloops)
1264 return -1;
1265
1266 /* Clear the soft reset bit */
1267 val = xlr_read_reg(mmio, R_RX_CONTROL);
1268 val &= ~0x4;
1269 xlr_write_reg(mmio, R_RX_CONTROL, val);
1270 return 0;
1271}
1272
1273static void
1274rmi_xlr_gmac_init(struct driver_data *priv)
1275{
1276 int i = 0;
1277 xlr_reg_t *mmio = priv->mmio;
1278 int id = priv->instance;
1279 struct stn_cc *gmac_cc_config;
1280 uint32_t value = 0;
1281 int blk = id / 4, port = id % 4;
1282
1283 rmi_xlr_mac_set_enable(priv, 0);
1284
1285 rmi_xlr_config_spill_area(priv);
1286
1287 xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1288 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1289 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1290 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1291
1292 rmi_xlr_config_pde(priv);
1293 rmi_xlr_config_parser(priv);
1294 rmi_xlr_config_classifier(priv);
1295
1296 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1297 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1298 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1299
1300 if (priv->mode == XLR_PORT0_RGMII) {
1301 printf("Port 0 set in RGMII mode\n");
1302 value = xlr_read_reg(mmio, R_RX_CONTROL);
1303 value |= 1 << O_RX_CONTROL__RGMII;
1304 xlr_write_reg(mmio, R_RX_CONTROL, value);
1305 }
1306 rmi_xlr_mac_mii_init(priv);
1307
1308
1309#if 0
1310 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1311 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1312 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1313 ADVERTISED_MII;
1314#endif
1315
1316 /*
1317 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1318 * set about every 1 sec in GigE mode, ignore it for now...
1319 */
1320 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1321
1322 if (priv->mode != XLR_RGMII) {
1323 serdes_regs_init(priv);
1324 serdes_autoconfig(priv);
1325 }
1326 rmi_xlr_gmac_config_speed(priv);
1327
1328 value = xlr_read_reg(mmio, R_IPG_IFG);
1329 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1330 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1331 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1332 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1333 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1334 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1335 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1336 xlr_write_reg(mmio, R_INTMASK, 0);
1337 xlr_write_reg(mmio, R_FREEQCARVE, 0);
1338
1339 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1340 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1341 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1342 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1343 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1344 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1345 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1346 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1347 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1348 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1349
1350 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1351 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1352
1353 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1354 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1355 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1356 gmac_cc_config->counters[i >> 3][i & 0x07]);
1357 dbg_msg("%d: %d -> %d\n", priv->instance,
1358 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1359 }
1360 priv->init_frin_desc = 1;
1361}
1362
1363/**********************************************************************
1364 * Set promiscuous mode
1365 **********************************************************************/
1366static void
1367xlr_mac_set_rx_mode(struct rge_softc *sc)
1368{
1369 struct driver_data *priv = &(sc->priv);
1370 uint32_t regval;
1371
1372 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1373
1374 if (sc->flags & IFF_PROMISC) {
1375 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1376 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1377 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1378 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1379 } else {
1380 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1381 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1382 }
1383
1384 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1385}
1386
1387/**********************************************************************
1388 * Configure LAN speed for the specified MAC.
1389 ********************************************************************* */
1390static int
1391rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1392{
1393 return 0;
1394}
1395
1396/**********************************************************************
1397 * Set Ethernet duplex and flow control options for this MAC
1398 ********************************************************************* */
1399static int
1400rmi_xlr_mac_set_duplex(struct driver_data *s,
1401 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1402{
1403 return 0;
1404}
1405
1406/*****************************************************************
1407 * Kernel Net Stack <-> MAC Driver Interface
1408 *****************************************************************/
1409/**********************************************************************
1410 **********************************************************************/
1411#define MAC_TX_FAIL 2
1412#define MAC_TX_PASS 0
1413#define MAC_TX_RETRY 1
1414
1415int xlr_dev_queue_xmit_hack = 0;
1416
1417static int
1418mac_xmit(struct mbuf *m, struct rge_softc *sc,
1419 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1420{
1421 struct msgrng_msg msg = {0,0,0,0};
1422 int stid = priv->txbucket;
1423 uint32_t tx_cycles = 0;
1424 uint32_t mflags;
1425 int vcpu = xlr_cpu_id();
1426 int rv;
1427
1428 tx_cycles = mips_rd_count();
1429
1430 if (build_frag_list(m, &msg, tx_desc) != 0)
1431 return MAC_TX_FAIL;
1432
1433 else {
1434 mflags = msgrng_access_enable();
1435 if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1436 msg_snd_failed++;
1437 msgrng_restore(mflags);
1438 release_tx_desc(&msg, 0);
1439 xlr_rge_msg_snd_failed[vcpu]++;
1440 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1441 vcpu, rv, stid, (uintmax_t)msg.msg0);
1442 return MAC_TX_FAIL;
1443 }
1444 msgrng_restore(mflags);
1445 port_inc_counter(priv->instance, PORT_TX);
1446 }
1447
1448 /* Send the packet to MAC */
1449 dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1450 (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1451#ifdef DUMP_PACKETS
1452 {
1453 int i = 0;
1454 unsigned char *buf = (char *)m->m_data;
1455
1456 printf("Tx Packet: length=%d\n", len);
1457 for (i = 0; i < 64; i++) {
1458 if (i && (i % 16) == 0)
1459 printf("\n");
1460 printf("%02x ", buf[i]);
1461 }
1462 printf("\n");
1463 }
1464#endif
1465 xlr_inc_counter(NETIF_TX);
1466 return MAC_TX_PASS;
1467}
1468
1469static int
1470rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1471{
1472 struct driver_data *priv = &(sc->priv);
1473 int ret = -ENOSPC;
1474
1475 dbg_msg("IN\n");
1476
1477 xlr_inc_counter(NETIF_STACK_TX);
1478
1479retry:
1480 ret = mac_xmit(m, sc, priv, len, tx_desc);
1481
1482 if (ret == MAC_TX_RETRY)
1483 goto retry;
1484
1485 dbg_msg("OUT, ret = %d\n", ret);
1486 if (ret == MAC_TX_FAIL) {
1487 /* FULL */
1488 dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1489 port_inc_counter(priv->instance, PORT_STOPQ);
1490 }
1491 return ret;
1492}
1493
1494static void
1495mac_frin_replenish(void *args /* ignored */ )
1496{
1497 int cpu = xlr_core_id();
1498 int done = 0;
1499 int i = 0;
1500
1501 xlr_inc_counter(REPLENISH_ENTER);
1502 /*
1503 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1504 * atomic_read(frin_to_be_sent));
1505 */
1506 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1507
1508 for (;;) {
1509
1510 done = 0;
1511
1512 for (i = 0; i < XLR_MAX_MACS; i++) {
1513 /* int offset = 0; */
1514 void *m;
1515 uint32_t cycles;
1516 struct rge_softc *sc;
1517 struct driver_data *priv;
1518 int frin_to_be_sent;
1519
1520 sc = dev_mac[i];
1521 if (!sc)
1522 goto skip;
1523
1524 priv = &(sc->priv);
1525 frin_to_be_sent = priv->frin_to_be_sent[cpu];
1526
1527 /* if (atomic_read(frin_to_be_sent) < 0) */
1528 if (frin_to_be_sent < 0) {
1529 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1530 __FUNCTION__, i,
1531 frin_to_be_sent);
1532 }
1533 /* if (!atomic_read(frin_to_be_sent)) */
1534 if (!frin_to_be_sent)
1535 goto skip;
1536
1537 cycles = mips_rd_count();
1538 {
1539 m = get_buf();
1540 if (!m) {
1541 device_printf(sc->rge_dev, "No buffer\n");
1542 goto skip;
1543 }
1544 }
1545 xlr_inc_counter(REPLENISH_FRIN);
1546 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1547 free_buf(vtophys(m));
1548 printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1549 break;
1550 }
1551 xlr_set_counter(REPLENISH_CYCLES,
1552 (read_c0_count() - cycles));
1553 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1554
1555 continue;
1556 skip:
1557 done++;
1558 }
1559 if (done == XLR_MAX_MACS)
1560 break;
1561 }
1562}
1563
1564static volatile uint32_t g_tx_frm_tx_ok=0;
1565
1566static void
1567rge_tx_bkp_func(void *arg, int npending)
1568{
1569 int i = 0;
1570
1571 for (i = 0; i < xlr_board_info.gmacports; i++) {
1572 if (!dev_mac[i] || !dev_mac[i]->active)
1573 continue;
1574 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1575 }
1576 atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1577}
1578
1579/* This function is called from an interrupt handler */
1580void
1581rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1582 int stid, struct msgrng_msg *msg,
1583 void *data /* ignored */ )
1584{
1585 uint64_t phys_addr = 0;
1586 unsigned long addr = 0;
1587 uint32_t length = 0;
1588 int ctrl = 0, port = 0;
1589 struct rge_softc *sc = NULL;
1590 struct driver_data *priv = 0;
1591 struct ifnet *ifp;
1592 int vcpu = xlr_cpu_id();
1593 int cpu = xlr_core_id();
1594
1595 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1596 bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1597
1598 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1599 length = (msg->msg0 >> 40) & 0x3fff;
1600 if (length == 0) {
1601 ctrl = CTRL_REG_FREE;
1602 port = (msg->msg0 >> 54) & 0x0f;
1603 addr = 0;
1604 } else {
1605 ctrl = CTRL_SNGL;
1606 length = length - BYTE_OFFSET - MAC_CRC_LEN;
1607 port = msg->msg0 & 0x0f;
1608 addr = 0;
1609 }
1610
1611 if (xlr_board_info.is_xls) {
1612 if (stid == MSGRNG_STNID_GMAC1)
1613 port += 4;
1614 sc = dev_mac[dev_mac_gmac0 + port];
1615 } else {
1616 if (stid == MSGRNG_STNID_XGS0FR)
1617 sc = dev_mac[dev_mac_xgs0];
1618 else if (stid == MSGRNG_STNID_XGS1FR)
1619 sc = dev_mac[dev_mac_xgs0 + 1];
1620 else
1621 sc = dev_mac[dev_mac_gmac0 + port];
1622 }
1623 if (sc == NULL)
1624 return;
1625 priv = &(sc->priv);
1626
1627 dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1628 (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1629
1630 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1631 xlr_rge_tx_ok_done[vcpu]++;
1632 release_tx_desc(msg, 1);
1633 ifp = sc->rge_ifp;
1634 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1635 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1636 }
1637 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1638 rge_tx_bkp_func(NULL, 0);
1639 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1640 (read_c0_count() - msgrng_msg_cycles));
1641 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1642 /* Rx Packet */
1643 /* struct mbuf *m = 0; */
1644 /* int logical_cpu = 0; */
1645
1646 dbg_msg("Received packet, port = %d\n", port);
1647 /*
1648 * if num frins to be sent exceeds threshold, wake up the
1649 * helper thread
1650 */
1651 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1652 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1653 mac_frin_replenish(NULL);
1654 }
1655 dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1656 priv->instance, (uintmax_t)phys_addr, length);
1657 mac_stats_add(priv->stats.rx_packets, 1);
1658 mac_stats_add(priv->stats.rx_bytes, length);
1659 xlr_inc_counter(NETIF_RX);
1660 xlr_set_counter(NETIF_RX_CYCLES,
1661 (read_c0_count() - msgrng_msg_cycles));
1662 rge_rx(sc, phys_addr, length);
1663 xlr_rge_rx_done[vcpu]++;
1664 } else {
1665 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1666 }
1667
1668}
1669
1670/**********************************************************************
1671 **********************************************************************/
1672static int
1673rge_probe(dev)
1674 device_t dev;
1675{
1676 device_set_desc(dev, "RMI Gigabit Ethernet");
1677
1678 /* Always return 0 */
1679 return 0;
1680}
1681
1682volatile unsigned long xlr_debug_enabled;
1683struct callout rge_dbg_count;
1684static void
1685xlr_debug_count(void *addr)
1686{
1687 struct driver_data *priv = &dev_mac[0]->priv;
1688
1689 /* uint32_t crdt; */
1690 if (xlr_debug_enabled) {
1691 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1692 }
1693 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1694}
1695
1696
1697static void
1698xlr_tx_q_wakeup(void *addr)
1699{
1700 int i = 0;
1701 int j = 0;
1702
1703 for (i = 0; i < xlr_board_info.gmacports; i++) {
1704 if (!dev_mac[i] || !dev_mac[i]->active)
1705 continue;
1706 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1707 for (j = 0; j < XLR_MAX_CORE; j++) {
1708 if (xlr_tot_avail_p2d[j]) {
1709 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1710 break;
1711 }
1712 }
1713 }
1714 }
1715 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1716 rge_tx_bkp_func(NULL, 0);
1717 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1718}
1719
1720static int
1721rge_attach(device_t dev)
1722{
1723 struct ifnet *ifp;
1724 struct rge_softc *sc;
1725 struct driver_data *priv = 0;
1726 int ret = 0;
1727 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1728
1729 sc = device_get_softc(dev);
1730 sc->rge_dev = dev;
1731
1732 /* Initialize mac's */
1733 sc->unit = device_get_unit(dev);
1734
1735 if (sc->unit > XLR_MAX_MACS) {
1736 ret = ENXIO;
1737 goto out;
1738 }
1739 RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1740
1741 priv = &(sc->priv);
1742 priv->sc = sc;
1743
1744 sc->flags = 0; /* TODO : fix me up later */
1745
1746 priv->id = sc->unit;
1747 if (gmac_conf->type == XLR_GMAC) {
1748 priv->instance = priv->id;
1749 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1750 0x1000 * (sc->unit % 4));
1751 if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1752 goto out;
1753 } else if (gmac_conf->type == XLR_XGMAC) {
1754 priv->instance = priv->id - xlr_board_info.gmacports;
1755 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1756 }
1757 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1758 (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1759 priv->instance >=4)) {
1760 dbg_msg("Arizona board - offset 4 \n");
1761 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1762 } else
1763 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1764
1765 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1766 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1767
1768 sc->base_addr = (unsigned long)priv->mmio;
1769 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1770
1771 sc->xmit = rge_start;
1772 sc->stop = rge_stop;
1773 sc->get_stats = rmi_xlr_mac_get_stats;
1774 sc->ioctl = rge_ioctl;
1775
1776 /* Initialize the device specific driver data */
1777 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1778
1779 priv->type = gmac_conf->type;
1780
1781 priv->mode = gmac_conf->mode;
1782 if (xlr_board_info.is_xls == 0) {
1783 /* TODO - check II and IIB boards */
1784 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1785 xlr_boot1_info.board_minor_version != 1)
1786 priv->phy_addr = priv->instance - 2;
1787 else
1788 priv->phy_addr = priv->instance;
1789 priv->mode = XLR_RGMII;
1790 } else {
1791 if (gmac_conf->mode == XLR_PORT0_RGMII &&
1792 priv->instance == 0) {
1793 priv->mode = XLR_PORT0_RGMII;
1794 priv->phy_addr = 0;
1795 } else {
1796 priv->mode = XLR_SGMII;
1797 /* Board 11 has SGMII daughter cards with the XLS chips, in this case
1798 the phy number is 0-3 for both GMAC blocks */
1799 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1800 priv->phy_addr = priv->instance % 4 + 16;
1801 else
1802 priv->phy_addr = priv->instance + 16;
1803 }
1804 }
1805
1806 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1807 priv->rfrbucket = gmac_conf->station_rfr;
1808 priv->spill_configured = 0;
1809
1810 dbg_msg("priv->mmio=%p\n", priv->mmio);
1811
1812 /* Set up ifnet structure */
1813 ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1814 if (ifp == NULL) {
1815 device_printf(sc->rge_dev, "failed to if_alloc()\n");
1816 rge_release_resources(sc);
1817 ret = ENXIO;
1818 RGE_LOCK_DESTROY(sc);
1819 goto out;
1820 }
1821 ifp->if_softc = sc;
1822 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1823 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1824 ifp->if_ioctl = rge_ioctl;
1825 ifp->if_start = rge_start;
1826 ifp->if_init = rge_init;
1827 ifp->if_mtu = ETHERMTU;
1828 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1829 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1830 IFQ_SET_READY(&ifp->if_snd);
1831 sc->active = 1;
1832 ifp->if_hwassist = 0;
1833 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1834 ifp->if_capenable = ifp->if_capabilities;
1835
1836 /* Initialize the rge_softc */
1837 sc->irq = gmac_conf->baseirq + priv->instance % 4;
1838
1839 /* Set the IRQ into the rid field */
1840 /*
1841 * note this is a hack to pass the irq to the iodi interrupt setup
1842 * routines
1843 */
1844 sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1845
1846 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1847 NULL, rge_intr, sc, &sc->rge_intrhand);
1848
1849 if (ret) {
1850 rge_detach(dev);
1851 device_printf(sc->rge_dev, "couldn't set up irq\n");
1852 RGE_LOCK_DESTROY(sc);
1853 goto out;
1854 }
1855 xlr_mac_get_hwaddr(sc);
1856 xlr_mac_setup_hwaddr(priv);
1857
1858 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1859 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1860 (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1861 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1862 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1863
1864 /*
1865 * Set up ifmedia support.
1866 */
1867 /*
1868 * Initialize MII/media info.
1869 */
1870 sc->rge_mii.mii_ifp = ifp;
1871 sc->rge_mii.mii_readreg = rge_mii_read;
1872 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1873 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1874 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1875 rmi_xlr_mac_mediastatus);
1876 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1877 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1878 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1879
1880 /*
1881 * Call MI attach routine.
1882 */
1883 ether_ifattach(ifp, sc->dev_addr);
1884
1885 if (priv->type == XLR_GMAC) {
1886 rmi_xlr_gmac_init(priv);
1887 } else if (priv->type == XLR_XGMAC) {
1888 rmi_xlr_xgmac_init(priv);
1889 }
1890 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1891 sc->unit, priv->mmio, sc->mtu);
1892 dev_mac[sc->unit] = sc;
1893 if (priv->type == XLR_XGMAC && priv->instance == 0)
1894 dev_mac_xgs0 = sc->unit;
1895 if (priv->type == XLR_GMAC && priv->instance == 0)
1896 dev_mac_gmac0 = sc->unit;
1897
1898 if (!gmac_common_init_done) {
1899 mac_common_init();
1900 gmac_common_init_done = 1;
1901 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1902 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1903 callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1904 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1905 }
1906 if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1907 RGE_LOCK_DESTROY(sc);
1908 goto out;
1909 }
1910out:
1911 if (ret < 0) {
1912 device_printf(dev, "error - skipping\n");
1913 }
1914 return ret;
1915}
1916
1917static void
1918rge_reset(struct rge_softc *sc)
1919{
1920}
1921
1922static int
1923rge_detach(dev)
1924 device_t dev;
1925{
1926#ifdef FREEBSD_MAC_NOT_YET
1927 struct rge_softc *sc;
1928 struct ifnet *ifp;
1929
1930 sc = device_get_softc(dev);
1931 ifp = sc->rge_ifp;
1932
1933 RGE_LOCK(sc);
1934 rge_stop(sc);
1935 rge_reset(sc);
1936 RGE_UNLOCK(sc);
1937
1938 ether_ifdetach(ifp);
1939
1940 if (sc->rge_tbi) {
1941 ifmedia_removeall(&sc->rge_ifmedia);
1942 } else {
1943 bus_generic_detach(dev);
1944 device_delete_child(dev, sc->rge_miibus);
1945 }
1946
1947 rge_release_resources(sc);
1948
1949#endif /* FREEBSD_MAC_NOT_YET */
1950 return (0);
1951}
1952static int
1953rge_suspend(device_t dev)
1954{
1955 struct rge_softc *sc;
1956
1957 sc = device_get_softc(dev);
1958 RGE_LOCK(sc);
1959 rge_stop(sc);
1960 RGE_UNLOCK(sc);
1961
1962 return 0;
1963}
1964
1965static int
1966rge_resume(device_t dev)
1967{
1968 panic("rge_resume(): unimplemented\n");
1969 return 0;
1970}
1971
1972static void
1973rge_release_resources(struct rge_softc *sc)
1974{
1975
1976 if (sc->rge_ifp != NULL)
1977 if_free(sc->rge_ifp);
1978
1979 if (mtx_initialized(&sc->rge_mtx)) /* XXX */
1980 RGE_LOCK_DESTROY(sc);
1981}
1982uint32_t gmac_rx_fail[32];
1983uint32_t gmac_rx_pass[32];
1984
1985static void
1986rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1987{
1988 struct mbuf *m;
1989 struct ifnet *ifp = sc->rge_ifp;
1990 uint64_t mag;
1991 uint32_t sr;
1992 /*
1993 * On 32 bit machines we use XKPHYS to get the values stores with
1994 * the mbuf, need to explicitly enable KX. Disable interrupts while
1995 * KX is enabled to prevent this setting leaking to other code.
1996 */
1997 sr = xlr_enable_kx();
1998 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1999 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
2000 xlr_restore_kx(sr);
2001 if (mag != 0xf00bad) {
2002 /* somebody else packet Error - FIXME in intialization */
2003 printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2004 xlr_cpu_id(), (void *)paddr);
2005 return;
2006 }
2007 /* align the data */
2008 m->m_data += BYTE_OFFSET;
2009 m->m_pkthdr.len = m->m_len = len;
2010 m->m_pkthdr.rcvif = ifp;
2011
2012#ifdef DUMP_PACKETS
2013 {
2014 int i = 0;
2015 unsigned char *buf = (char *)m->m_data;
2016
2017 printf("Rx Packet: length=%d\n", len);
2018 for (i = 0; i < 64; i++) {
2019 if (i && (i % 16) == 0)
2020 printf("\n");
2021 printf("%02x ", buf[i]);
2022 }
2023 printf("\n");
2024 }
2025#endif
2026 ifp->if_ipackets++;
2027 (*ifp->if_input) (ifp, m);
2028}
2029
2030static void
2031rge_intr(void *arg)
2032{
2033 struct rge_softc *sc = (struct rge_softc *)arg;
2034 struct driver_data *priv = &(sc->priv);
2035 xlr_reg_t *mmio = priv->mmio;
2036 uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2037
2038 if (intreg & (1 << O_INTREG__MDInt)) {
2039 uint32_t phy_int_status = 0;
2040 int i = 0;
2041
2042 for (i = 0; i < XLR_MAX_MACS; i++) {
2043 struct rge_softc *phy_dev = 0;
2044 struct driver_data *phy_priv = 0;
2045
2046 phy_dev = dev_mac[i];
2047 if (phy_dev == NULL)
2048 continue;
2049
2050 phy_priv = &phy_dev->priv;
2051
2052 if (phy_priv->type == XLR_XGMAC)
2053 continue;
2054
2055 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2056 phy_priv->phy_addr, 26);
2057 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2058 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2059 rmi_xlr_gmac_config_speed(phy_priv);
2060 }
2061 } else {
2062 printf("[%s]: mac type = %d, instance %d error "
2063 "interrupt: INTREG = 0x%08x\n",
2064 __FUNCTION__, priv->type, priv->instance, intreg);
2065 }
2066
2067 /* clear all interrupts and hope to make progress */
2068 xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2069
2070 /* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2071 if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2072 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2073 struct driver_data *xgs0_priv = &xgs0_dev->priv;
2074 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2075 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2076
2077 if (xgs0_intreg) {
2078 printf("[%s]: mac type = %d, instance %d error "
2079 "interrupt: INTREG = 0x%08x\n",
2080 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2081
2082 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2083 }
2084 }
2085}
2086
2087static void
2088rge_start_locked(struct ifnet *ifp, int threshold)
2089{
2090 struct rge_softc *sc = ifp->if_softc;
2091 struct mbuf *m = NULL;
2092 int prepend_pkt = 0;
2093 int i = 0;
2094 struct p2d_tx_desc *tx_desc = NULL;
2095 int cpu = xlr_core_id();
2096 uint32_t vcpu = xlr_cpu_id();
2097
2098 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2099 return;
2100
2101 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2102 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2103 return;
2104 tx_desc = get_p2d_desc();
2105 if (!tx_desc) {
2106 xlr_rge_get_p2d_failed[vcpu]++;
2107 return;
2108 }
2109 /* Grab a packet off the queue. */
2110 IFQ_DEQUEUE(&ifp->if_snd, m);
2111 if (m == NULL) {
2112 free_p2d_desc(tx_desc);
2113 return;
2114 }
2115 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2116
2117 if (prepend_pkt) {
2118 xlr_rge_tx_prepend[vcpu]++;
2119 IF_PREPEND(&ifp->if_snd, m);
2120 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2121 return;
2122 } else {
2123 ifp->if_opackets++;
2124 xlr_rge_tx_done[vcpu]++;
2125 }
2126 }
2127}
2128
2129static void
2130rge_start(struct ifnet *ifp)
2131{
2132 rge_start_locked(ifp, RGE_TX_Q_SIZE);
2133}
2134
2135static int
2136rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2137{
2138 struct rge_softc *sc = ifp->if_softc;
2139 struct ifreq *ifr = (struct ifreq *)data;
2140 int mask, error = 0;
2141
2142 /* struct mii_data *mii; */
2143 switch (command) {
2144 case SIOCSIFMTU:
2145 ifp->if_mtu = ifr->ifr_mtu;
2146 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2147 break;
2148 case SIOCSIFFLAGS:
2149
2150 RGE_LOCK(sc);
2151 if (ifp->if_flags & IFF_UP) {
2152 /*
2153 * If only the state of the PROMISC flag changed,
2154 * then just use the 'set promisc mode' command
2155 * instead of reinitializing the entire NIC. Doing a
2156 * full re-init means reloading the firmware and
2157 * waiting for it to start up, which may take a
2158 * second or two. Similarly for ALLMULTI.
2159 */
2160 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2161 ifp->if_flags & IFF_PROMISC &&
2162 !(sc->flags & IFF_PROMISC)) {
2163 sc->flags |= IFF_PROMISC;
2164 xlr_mac_set_rx_mode(sc);
2165 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2166 !(ifp->if_flags & IFF_PROMISC) &&
2167 sc->flags & IFF_PROMISC) {
2168 sc->flags &= IFF_PROMISC;
2169 xlr_mac_set_rx_mode(sc);
2170 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2171 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2172 rmi_xlr_mac_set_multicast_list(sc);
2173 } else
2174 xlr_mac_set_rx_mode(sc);
2175 } else {
2176 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2177 xlr_mac_set_rx_mode(sc);
2178 }
2179 }
2180 sc->flags = ifp->if_flags;
2181 RGE_UNLOCK(sc);
2182 error = 0;
2183 break;
2184 case SIOCADDMULTI:
2185 case SIOCDELMULTI:
2186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2187 RGE_LOCK(sc);
2188 rmi_xlr_mac_set_multicast_list(sc);
2189 RGE_UNLOCK(sc);
2190 error = 0;
2191 }
2192 break;
2193 case SIOCSIFMEDIA:
2194 case SIOCGIFMEDIA:
2195 error = ifmedia_ioctl(ifp, ifr,
2196 &sc->rge_mii.mii_media, command);
2197 break;
2198 case SIOCSIFCAP:
2199 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2200 ifp->if_hwassist = 0;
2201 break;
2202 default:
2203 error = ether_ioctl(ifp, command, data);
2204 break;
2205 }
2206
2207 return (error);
2208}
2209
2210static void
2211rge_init(void *addr)
2212{
2213 struct rge_softc *sc = (struct rge_softc *)addr;
2214 struct ifnet *ifp;
2215 struct driver_data *priv = &(sc->priv);
2216
2217 ifp = sc->rge_ifp;
2218
2219 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2220 return;
2221 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2222 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2223
2224 rmi_xlr_mac_set_enable(priv, 1);
2225}
2226
2227static void
2228rge_stop(struct rge_softc *sc)
2229{
2230 rmi_xlr_mac_close(sc);
2231}
2232
2233static int
2234rge_shutdown(device_t dev)
2235{
2236 struct rge_softc *sc;
2237
2238 sc = device_get_softc(dev);
2239
2240 RGE_LOCK(sc);
2241 rge_stop(sc);
2242 rge_reset(sc);
2243 RGE_UNLOCK(sc);
2244
2245 return (0);
2246}
2247
2248static int
2249rmi_xlr_mac_open(struct rge_softc *sc)
2250{
2251 struct driver_data *priv = &(sc->priv);
2252 int i;
2253
2254 dbg_msg("IN\n");
2255
2256 if (rmi_xlr_mac_fill_rxfr(sc)) {
2257 return -1;
2258 }
2259 mtx_lock_spin(&priv->lock);
2260
2261 xlr_mac_set_rx_mode(sc);
2262
2263 if (sc->unit == xlr_board_info.gmacports - 1) {
2264 printf("Enabling MDIO interrupts\n");
2265 struct rge_softc *tmp = NULL;
2266
2267 for (i = 0; i < xlr_board_info.gmacports; i++) {
2268 tmp = dev_mac[i];
2269 if (tmp)
2270 xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2271 ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2272 }
2273 }
2274 /*
2275 * Configure the speed, duplex, and flow control
2276 */
2277 rmi_xlr_mac_set_speed(priv, priv->speed);
2278 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2279 rmi_xlr_mac_set_enable(priv, 0);
2280
2281 mtx_unlock_spin(&priv->lock);
2282
2283 for (i = 0; i < 8; i++) {
2284 atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2285 }
2286
2287 return 0;
2288}
2289
2290/**********************************************************************
2291 **********************************************************************/
2292static int
2293rmi_xlr_mac_close(struct rge_softc *sc)
2294{
2295 struct driver_data *priv = &(sc->priv);
2296
2297 mtx_lock_spin(&priv->lock);
2298
2299 /*
2300 * There may have left over mbufs in the ring as well as in free in
2301 * they will be reused next time open is called
2302 */
2303
2304 rmi_xlr_mac_set_enable(priv, 0);
2305
2306 xlr_inc_counter(NETIF_STOP_Q);
2307 port_inc_counter(priv->instance, PORT_STOPQ);
2308
2309 mtx_unlock_spin(&priv->lock);
2310
2311 return 0;
2312}
2313
2314/**********************************************************************
2315 **********************************************************************/
2316static struct rge_softc_stats *
2317rmi_xlr_mac_get_stats(struct rge_softc *sc)
2318{
2319 struct driver_data *priv = &(sc->priv);
2320
2321 /* unsigned long flags; */
2322
2323 mtx_lock_spin(&priv->lock);
2324
2325 /* XXX update other stats here */
2326
2327 mtx_unlock_spin(&priv->lock);
2328
2329 return &priv->stats;
2330}
2331
2332/**********************************************************************
2333 **********************************************************************/
2334static void
2335rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2336{
2337}
2338
2339/**********************************************************************
2340 **********************************************************************/
2341static int
2342rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2343{
2344 struct driver_data *priv = &(sc->priv);
2345
2346 if ((new_mtu > 9500) || (new_mtu < 64)) {
2347 return -EINVAL;
2348 }
2349 mtx_lock_spin(&priv->lock);
2350
2351 sc->mtu = new_mtu;
2352
2353 /* Disable MAC TX/RX */
2354 rmi_xlr_mac_set_enable(priv, 0);
2355
2356 /* Flush RX FR IN */
2357 /* Flush TX IN */
2358 rmi_xlr_mac_set_enable(priv, 1);
2359
2360 mtx_unlock_spin(&priv->lock);
2361 return 0;
2362}
2363
2364/**********************************************************************
2365 **********************************************************************/
2366static int
2367rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2368{
2369 struct driver_data *priv = &(sc->priv);
2370 int i;
2371 int ret = 0;
2372 void *ptr;
2373
2374 dbg_msg("\n");
2375 if (!priv->init_frin_desc)
2376 return ret;
2377 priv->init_frin_desc = 0;
2378
2379 dbg_msg("\n");
2380 for (i = 0; i < MAX_NUM_DESC; i++) {
2381 ptr = get_buf();
2382 if (!ptr) {
2383 ret = -ENOMEM;
2384 break;
2385 }
2386 /* Send the free Rx desc to the MAC */
2387 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2388 }
2389
2390 return ret;
2391}
2392
2393/**********************************************************************
2394 **********************************************************************/
2395static __inline__ void *
2396rmi_xlr_config_spill(xlr_reg_t * mmio,
2397 int reg_start_0, int reg_start_1,
2398 int reg_size, int size)
2399{
2400 uint32_t spill_size = size;
2401 void *spill = NULL;
2402 uint64_t phys_addr = 0;
2403
2404
2405 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2406 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2407 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2408 panic("Unable to allocate memory for spill area!\n");
2409 }
2410 phys_addr = vtophys(spill);
2411 dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2412 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2413 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2414 xlr_write_reg(mmio, reg_size, spill_size);
2415
2416 return spill;
2417}
2418
2419static void
2420rmi_xlr_config_spill_area(struct driver_data *priv)
2421{
2422 /*
2423 * if driver initialization is done parallely on multiple cpus
2424 * spill_configured needs synchronization
2425 */
2426 if (priv->spill_configured)
2427 return;
2428
2429 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2430 priv->spill_configured = 1;
2431 return;
2432 }
2433 priv->spill_configured = 1;
2434
2435 priv->frin_spill =
2436 rmi_xlr_config_spill(priv->mmio,
2437 R_REG_FRIN_SPILL_MEM_START_0,
2438 R_REG_FRIN_SPILL_MEM_START_1,
2439 R_REG_FRIN_SPILL_MEM_SIZE,
2440 MAX_FRIN_SPILL *
2441 sizeof(struct fr_desc));
2442
2443 priv->class_0_spill =
2444 rmi_xlr_config_spill(priv->mmio,
2445 R_CLASS0_SPILL_MEM_START_0,
2446 R_CLASS0_SPILL_MEM_START_1,
2447 R_CLASS0_SPILL_MEM_SIZE,
2448 MAX_CLASS_0_SPILL *
2449 sizeof(union rx_tx_desc));
2450 priv->class_1_spill =
2451 rmi_xlr_config_spill(priv->mmio,
2452 R_CLASS1_SPILL_MEM_START_0,
2453 R_CLASS1_SPILL_MEM_START_1,
2454 R_CLASS1_SPILL_MEM_SIZE,
2455 MAX_CLASS_1_SPILL *
2456 sizeof(union rx_tx_desc));
2457
2458 priv->frout_spill =
2459 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2460 R_FROUT_SPILL_MEM_START_1,
2461 R_FROUT_SPILL_MEM_SIZE,
2462 MAX_FROUT_SPILL *
2463 sizeof(struct fr_desc));
2464
2465 priv->class_2_spill =
2466 rmi_xlr_config_spill(priv->mmio,
2467 R_CLASS2_SPILL_MEM_START_0,
2468 R_CLASS2_SPILL_MEM_START_1,
2469 R_CLASS2_SPILL_MEM_SIZE,
2470 MAX_CLASS_2_SPILL *
2471 sizeof(union rx_tx_desc));
2472 priv->class_3_spill =
2473 rmi_xlr_config_spill(priv->mmio,
2474 R_CLASS3_SPILL_MEM_START_0,
2475 R_CLASS3_SPILL_MEM_START_1,
2476 R_CLASS3_SPILL_MEM_SIZE,
2477 MAX_CLASS_3_SPILL *
2478 sizeof(union rx_tx_desc));
2479 priv->spill_configured = 1;
2480}
2481
2482/*****************************************************************
2483 * Write the MAC address to the XLR registers
2484 * All 4 addresses are the same for now
2485 *****************************************************************/
2486static void
2487xlr_mac_setup_hwaddr(struct driver_data *priv)
2488{
2489 struct rge_softc *sc = priv->sc;
2490
2491 xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2492 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2493 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2494 );
2495
2496 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2497 ((sc->dev_addr[1] << 24) | (sc->
2498 dev_addr[0] << 16)));
2499
2500 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2501
2502 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2503
2504 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2505
2506 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2507
2508 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2509 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2510 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2511 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2512 );
2513}
2514
2515/*****************************************************************
2516 * Read the MAC address from the XLR registers
2517 * All 4 addresses are the same for now
2518 *****************************************************************/
2519static void
2520xlr_mac_get_hwaddr(struct rge_softc *sc)
2521{
2522 struct driver_data *priv = &(sc->priv);
2523
2524 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2525 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2526 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2527 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2528 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2529 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2530}
2531
2532/*****************************************************************
2533 * Mac Module Initialization
2534 *****************************************************************/
2535static void
2536mac_common_init(void)
2537{
2538 init_p2d_allocation();
2539 init_tx_ring();
2540
2541 if (xlr_board_info.is_xls) {
2542 if (register_msgring_handler(TX_STN_GMAC0,
2543 rmi_xlr_mac_msgring_handler, NULL)) {
2542 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2543 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2544 NULL)) {
2544 panic("Couldn't register msgring handler\n");
2545 }
2545 panic("Couldn't register msgring handler\n");
2546 }
2546 if (register_msgring_handler(TX_STN_GMAC1,
2547 rmi_xlr_mac_msgring_handler, NULL)) {
2547 if (register_msgring_handler(MSGRNG_STNID_GMAC1,
2548 MSGRNG_STNID_GMAC1 + 1, rmi_xlr_mac_msgring_handler,
2549 NULL)) {
2548 panic("Couldn't register msgring handler\n");
2549 }
2550 } else {
2550 panic("Couldn't register msgring handler\n");
2551 }
2552 } else {
2551 if (register_msgring_handler(TX_STN_GMAC,
2552 rmi_xlr_mac_msgring_handler, NULL)) {
2553 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2554 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2555 NULL)) {
2553 panic("Couldn't register msgring handler\n");
2554 }
2555 }
2556
2557 /*
2558 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2559 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2560 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2561 * if (register_msgring_handler (TX_STN_XGS_1,
2562 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2563 * msgring handler for TX_STN_XGS_1\n"); } }
2564 */
2565}
2556 panic("Couldn't register msgring handler\n");
2557 }
2558 }
2559
2560 /*
2561 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2562 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2563 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2564 * if (register_msgring_handler (TX_STN_XGS_1,
2565 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2566 * msgring handler for TX_STN_XGS_1\n"); } }
2567 */
2568}