Deleted Added
full compact
rge.c (216408) rge.c (217072)
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 216408 2010-12-13 14:30:35Z avg $");
33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 217072 2011-01-06 21:08:06Z jhb $");
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81#include <mips/rmi/rmi_mips_exts.h>
82#include <machine/cpuregs.h>
83
84#include <machine/param.h>
85#include <machine/intr_machdep.h>
86#include <machine/clock.h> /* for DELAY */
87#include <machine/cpuregs.h>
88#include <machine/bus.h> /* */
89#include <machine/resource.h>
90
91#include <dev/mii/mii.h>
92#include <dev/mii/miivar.h>
93#include <dev/mii/brgphyreg.h>
94
95#include <mips/rmi/interrupt.h>
96#include <mips/rmi/msgring.h>
97#include <mips/rmi/iomap.h>
98#include <mips/rmi/pic.h>
99#include <mips/rmi/rmi_mips_exts.h>
100#include <mips/rmi/rmi_boot_info.h>
101#include <mips/rmi/board.h>
102
103#include <mips/rmi/dev/xlr/debug.h>
104#include <mips/rmi/dev/xlr/atx_cpld.h>
105#include <mips/rmi/dev/xlr/xgmac_mdio.h>
106#include <mips/rmi/dev/xlr/rge.h>
107
108#include "miibus_if.h"
109
110MODULE_DEPEND(rge, ether, 1, 1, 1);
111MODULE_DEPEND(rge, miibus, 1, 1, 1);
112
113/* #define DEBUG */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123 do {\
124 if (mac_debug) {\
125 printf("[%s@%d|%s]: cpu_%d: " fmt, \
126 __FILE__, __LINE__, __FUNCTION__, xlr_cpu_id(), ##args);\
127 }\
128 } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG 88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE 1536
142#define MAX_FRAME_SIZE_JUMBO 9216
143
144#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
145#define MAC_PREPAD 0
146#define BYTE_OFFSET 2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN 4
149#define MAX_NUM_MSGRNG_STN_CC 128
150
151#define MAX_NUM_DESC 1024
152#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1)
173#else
174#define port_inc_counter(port, counter) /* Nothing */
175#endif
176
177int xlr_rge_tx_prepend[MAXCPU];
178int xlr_rge_tx_done[MAXCPU];
179int xlr_rge_get_p2d_failed[MAXCPU];
180int xlr_rge_msg_snd_failed[MAXCPU];
181int xlr_rge_tx_ok_done[MAXCPU];
182int xlr_rge_rx_done[MAXCPU];
183int xlr_rge_repl_done[MAXCPU];
184
185/* #define mac_stats_add(x, val) ({(x) += (val);}) */
186#define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
187
188#define XLR_MAX_CORE 8
189#define RGE_LOCK_INIT(_sc, _name) \
190 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
191#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx)
192#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
193#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx)
194#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
195
196#define XLR_MAX_MACS 8
197#define XLR_MAX_TX_FRAGS 14
198#define MAX_P2D_DESC_PER_PORT 512
199struct p2d_tx_desc {
200 uint64_t frag[XLR_MAX_TX_FRAGS + 2];
201};
202
203#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
204
205struct rge_softc *dev_mac[XLR_MAX_MACS];
206static int dev_mac_xgs0;
207static int dev_mac_gmac0;
208
209static int gmac_common_init_done;
210
211
212static int rge_probe(device_t);
213static int rge_attach(device_t);
214static int rge_detach(device_t);
215static int rge_suspend(device_t);
216static int rge_resume(device_t);
217static void rge_release_resources(struct rge_softc *);
218static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
219static void rge_intr(void *);
220static void rge_start_locked(struct ifnet *, int);
221static void rge_start(struct ifnet *);
222static int rge_ioctl(struct ifnet *, u_long, caddr_t);
223static void rge_init(void *);
224static void rge_stop(struct rge_softc *);
225static int rge_shutdown(device_t);
226static void rge_reset(struct rge_softc *);
227
228static struct mbuf *get_mbuf(void);
229static void free_buf(vm_paddr_t paddr);
230static void *get_buf(void);
231
232static void xlr_mac_get_hwaddr(struct rge_softc *);
233static void xlr_mac_setup_hwaddr(struct driver_data *);
234static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
235static void rmi_xlr_xgmac_init(struct driver_data *priv);
236static void rmi_xlr_gmac_init(struct driver_data *priv);
237static void mac_common_init(void);
238static int rge_mii_write(device_t, int, int, int);
239static int rge_mii_read(device_t, int, int);
240static void rmi_xlr_mac_mii_statchg(device_t);
241static int rmi_xlr_mac_mediachange(struct ifnet *);
242static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
243static void xlr_mac_set_rx_mode(struct rge_softc *sc);
244void
245rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
246 int stid, struct msgrng_msg *msg,
247 void *data);
248static void mac_frin_replenish(void *);
249static int rmi_xlr_mac_open(struct rge_softc *);
250static int rmi_xlr_mac_close(struct rge_softc *);
251static int
252mac_xmit(struct mbuf *, struct rge_softc *,
253 struct driver_data *, int, struct p2d_tx_desc *);
254static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
255static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
256static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
257static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
258static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
259static void rmi_xlr_config_spill_area(struct driver_data *priv);
260static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
261static int
262rmi_xlr_mac_set_duplex(struct driver_data *s,
263 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
264static void serdes_regs_init(struct driver_data *priv);
265static int rmi_xlr_gmac_reset(struct driver_data *priv);
266
267/*Statistics...*/
268static int get_p2d_desc_failed = 0;
269static int msg_snd_failed = 0;
270
271SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
272 &get_p2d_desc_failed, 0, "p2d desc failed");
273SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
274 &msg_snd_failed, 0, "msg snd failed");
275
276struct callout xlr_tx_stop_bkp;
277
278static device_method_t rge_methods[] = {
279 /* Device interface */
280 DEVMETHOD(device_probe, rge_probe),
281 DEVMETHOD(device_attach, rge_attach),
282 DEVMETHOD(device_detach, rge_detach),
283 DEVMETHOD(device_shutdown, rge_shutdown),
284 DEVMETHOD(device_suspend, rge_suspend),
285 DEVMETHOD(device_resume, rge_resume),
286
287 /* MII interface */
288 DEVMETHOD(miibus_readreg, rge_mii_read),
289 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
290 DEVMETHOD(miibus_writereg, rge_mii_write),
291 {0, 0}
292};
293
294static driver_t rge_driver = {
295 "rge",
296 rge_methods,
297 sizeof(struct rge_softc)
298};
299
300static devclass_t rge_devclass;
301
302DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
303DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
304
305#ifndef __STR
306#define __STR(x) #x
307#endif
308#ifndef STR
309#define STR(x) __STR(x)
310#endif
311
312void *xlr_tx_ring_mem;
313
314struct tx_desc_node {
315 struct p2d_tx_desc *ptr;
316 TAILQ_ENTRY(tx_desc_node) list;
317};
318
319#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
320struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
321static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
322static int xlr_total_active_core = 0;
323
324/*
325 * This should contain the list of all free tx frag desc nodes pointing to tx
326 * p2d arrays
327 */
328static
329TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
330{
331 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
332 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
333 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
334 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
335 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
336 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
337 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
338 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
339};
340
341/* This contains a list of free tx frag node descriptors */
342static
343TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
344{
345 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
346 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
347 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
348 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
349 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
350 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
351 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
352 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
353};
354
355static struct mtx tx_desc_lock[XLR_MAX_CORE];
356
357static inline void
358mac_make_desc_rfr(struct msgrng_msg *msg,
359 vm_paddr_t addr)
360{
361 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
362 msg->msg1 = msg->msg2 = msg->msg3 = 0;
363}
364
365#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
366
367static void
368init_p2d_allocation(void)
369{
370 int active_core[8] = {0};
371 int i = 0;
372 uint32_t cpumask;
373 int cpu;
374
375 cpumask = xlr_hw_thread_mask;
376
377 for (i = 0; i < 32; i++) {
378 if (cpumask & (1 << i)) {
379 cpu = i;
380 if (!active_core[cpu / 4]) {
381 active_core[cpu / 4] = 1;
382 xlr_total_active_core++;
383 }
384 }
385 }
386 for (i = 0; i < XLR_MAX_CORE; i++) {
387 if (active_core[i])
388 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
389 }
390 printf("Total Active Core %d\n", xlr_total_active_core);
391}
392
393
394static void
395init_tx_ring(void)
396{
397 int i;
398 int j = 0;
399 struct tx_desc_node *start, *node;
400 struct p2d_tx_desc *tx_desc;
401 vm_paddr_t paddr;
402 vm_offset_t unmapped_addr;
403
404 for (i = 0; i < XLR_MAX_CORE; i++)
405 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
406
407 start = &tx_desc_nodes[0];
408 /* TODO: try to get this from KSEG0 */
409 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
410 M_DEVBUF, M_NOWAIT | M_ZERO, 0,
411 0x10000000, XLR_CACHELINE_SIZE, 0);
412
413 if (xlr_tx_ring_mem == NULL) {
414 panic("TX ring memory allocation failed");
415 }
416 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
417
418 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
419
420
421 tx_desc = (struct p2d_tx_desc *)unmapped_addr;
422
423 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
424 node = start + i;
425 node->ptr = tx_desc;
426 tx_desc++;
427 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
428 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
429 }
430}
431
432static inline struct p2d_tx_desc *
433get_p2d_desc(void)
434{
435 struct tx_desc_node *node;
436 struct p2d_tx_desc *tx_desc = NULL;
437 int cpu = xlr_core_id();
438
439 mtx_lock_spin(&tx_desc_lock[cpu]);
440 node = TAILQ_FIRST(&tx_frag_desc[cpu]);
441 if (node) {
442 xlr_tot_avail_p2d[cpu]--;
443 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
444 tx_desc = node->ptr;
445 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
446 } else {
447 /* Increment p2d desc fail count */
448 get_p2d_desc_failed++;
449 }
450 mtx_unlock_spin(&tx_desc_lock[cpu]);
451 return tx_desc;
452}
453static void
454free_p2d_desc(struct p2d_tx_desc *tx_desc)
455{
456 struct tx_desc_node *node;
457 int cpu = xlr_core_id();
458
459 mtx_lock_spin(&tx_desc_lock[cpu]);
460 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
461 KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
462
463 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
464 node->ptr = tx_desc;
465 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
466 xlr_tot_avail_p2d[cpu]++;
467 mtx_unlock_spin(&tx_desc_lock[cpu]);
468
469}
470
471static int
472build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
473{
474 struct mbuf *m;
475 vm_paddr_t paddr;
476 uint64_t p2d_len;
477 int nfrag;
478 vm_paddr_t p1, p2;
479 uint32_t len1, len2;
480 vm_offset_t taddr;
481 uint64_t fr_stid;
482
483 fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
484
485 if (tx_desc == NULL)
486 return 1;
487
488 nfrag = 0;
489 for (m = m_head; m != NULL; m = m->m_next) {
490 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
491 free_p2d_desc(tx_desc);
492 return 1;
493 }
494 if (m->m_len != 0) {
495 paddr = vtophys(mtod(m, vm_offset_t));
496 p1 = paddr + m->m_len;
497 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
498 if (p1 != p2) {
499 len1 = (uint32_t)
500 (PAGE_SIZE - (paddr & PAGE_MASK));
501 tx_desc->frag[nfrag] = (127ULL << 54) |
502 ((uint64_t) len1 << 40) | paddr;
503 nfrag++;
504 taddr = (vm_offset_t)m->m_data + len1;
505 p2 = vtophys(taddr);
506 len2 = m->m_len - len1;
507 if (len2 == 0)
508 continue;
509 if (nfrag >= XLR_MAX_TX_FRAGS)
510 panic("TX frags exceeded");
511
512 tx_desc->frag[nfrag] = (127ULL << 54) |
513 ((uint64_t) len2 << 40) | p2;
514
515 taddr += len2;
516 p1 = vtophys(taddr);
517
518 if ((p2 + len2) != p1) {
519 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
520 printf("len1 = %x len2 = %x\n", len1,
521 len2);
522 printf("m_data %p\n", m->m_data);
523 DELAY(1000000);
524 panic("Multiple Mbuf segment discontiguous\n");
525 }
526 } else {
527 tx_desc->frag[nfrag] = (127ULL << 54) |
528 ((uint64_t) m->m_len << 40) | paddr;
529 }
530 nfrag++;
531 }
532 }
533 /* set eop in the last tx p2d desc */
534 tx_desc->frag[nfrag - 1] |= (1ULL << 63);
535 paddr = vtophys((vm_offset_t)tx_desc);
536 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
537 nfrag++;
538 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
539 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
540
541 p2d_len = (nfrag * 8);
542 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
543 (p2d_len << 40) | paddr;
544
545 return 0;
546}
547static void
548release_tx_desc(struct msgrng_msg *msg, int rel_buf)
549{
550 struct p2d_tx_desc *tx_desc, *chk_addr;
551 struct mbuf *m;
552
553 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
554 chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
555 if (tx_desc != chk_addr) {
556 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
557 tx_desc, chk_addr);
558 return;
559 }
560 if (rel_buf) {
561 m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
562 m_freem(m);
563 }
564 free_p2d_desc(tx_desc);
565}
566
567
568static struct mbuf *
569get_mbuf(void)
570{
571 struct mbuf *m_new = NULL;
572
573 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
574 return NULL;
575
576 m_new->m_len = MCLBYTES;
577 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
578 return m_new;
579}
580
581static void
582free_buf(vm_paddr_t paddr)
583{
584 struct mbuf *m;
585 uint64_t mag;
586 uint32_t sr;
587
588 sr = xlr_enable_kx();
589 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
590 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
591 xlr_restore_kx(sr);
592 if (mag != 0xf00bad) {
593 printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
594 (u_long)paddr, (u_long)mag);
595 return;
596 }
597 if (m != NULL)
598 m_freem(m);
599}
600
601static void *
602get_buf(void)
603{
604 struct mbuf *m_new = NULL;
605 uint64_t *md;
606#ifdef INVARIANTS
607 vm_paddr_t temp1, temp2;
608#endif
609
610 m_new = get_mbuf();
611 if (m_new == NULL)
612 return NULL;
613
614 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
615 md = (uint64_t *)m_new->m_data;
616 md[0] = (uintptr_t)m_new; /* Back Ptr */
617 md[1] = 0xf00bad;
618 m_adj(m_new, XLR_CACHELINE_SIZE);
619
620#ifdef INVARIANTS
621 temp1 = vtophys((vm_offset_t)m_new->m_data);
622 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
623 if ((temp1 + 1536) != temp2)
624 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
625#endif
626 return (void *)m_new->m_data;
627}
628
629/**********************************************************************
630 **********************************************************************/
631static void
632rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
633{
634 uint32_t regval;
635 int tx_threshold = 1518;
636
637 if (flag) {
638 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
639 regval |= (1 << O_TX_CONTROL__TxEnable) |
640 (tx_threshold << O_TX_CONTROL__TxThreshold);
641
642 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
643
644 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
645 regval |= 1 << O_RX_CONTROL__RxEnable;
646 if (priv->mode == XLR_PORT0_RGMII)
647 regval |= 1 << O_RX_CONTROL__RGMII;
648 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
649
650 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
651 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
652 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
653 } else {
654 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
655 regval &= ~((1 << O_TX_CONTROL__TxEnable) |
656 (tx_threshold << O_TX_CONTROL__TxThreshold));
657
658 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
659
660 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
661 regval &= ~(1 << O_RX_CONTROL__RxEnable);
662 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
663
664 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
665 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
666 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
667 }
668}
669
670/**********************************************************************
671 **********************************************************************/
672static __inline__ int
673xlr_mac_send_fr(struct driver_data *priv,
674 vm_paddr_t addr, int len)
675{
676 struct msgrng_msg msg;
677 int stid = priv->rfrbucket;
678 int code, ret;
679 uint32_t msgrng_flags;
680#ifdef INVARIANTS
681 int i = 0;
682#endif
683
684 mac_make_desc_rfr(&msg, addr);
685
686 /* Send the packet to MAC */
687 dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
688 priv->instance, (u_long)addr, stid);
689 if (priv->type == XLR_XGMAC)
690 code = MSGRNG_CODE_XGMAC; /* WHY? */
691 else
692 code = MSGRNG_CODE_MAC;
693
694 do {
695 msgrng_flags = msgrng_access_enable();
696 ret = message_send(1, code, stid, &msg);
697 msgrng_restore(msgrng_flags);
698 KASSERT(i++ < 100000, ("Too many credit fails\n"));
699 } while (ret != 0);
700
701 return 0;
702}
703
704/**************************************************************/
705
706static void
707xgmac_mdio_setup(volatile unsigned int *_mmio)
708{
709 int i;
710 uint32_t rd_data;
711
712 for (i = 0; i < 4; i++) {
713 rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
714 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */
715 xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
716 }
717}
718
719/**********************************************************************
720 * Init MII interface
721 *
722 * Input parameters:
723 * s - priv structure
724 ********************************************************************* */
725#define PHY_STATUS_RETRIES 25000
726
727static void
728rmi_xlr_mac_mii_init(struct driver_data *priv)
729{
730 xlr_reg_t *mii_mmio = priv->mii_mmio;
731
732 /* use the lowest clock divisor - divisor 28 */
733 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
734}
735
736/**********************************************************************
737 * Read a PHY register.
738 *
739 * Input parameters:
740 * s - priv structure
741 * phyaddr - PHY's address
742 * regidx = index of register to read
743 *
744 * Return value:
745 * value read, or 0 if an error occurred.
746 ********************************************************************* */
747
748static int
749rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
750{
751 int i = 0;
752
753 /* setup the phy reg to be used */
754 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
755 (phyaddr << 8) | (regidx << 0));
756 /* Issue the read command */
757 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
758 (1 << O_MII_MGMT_COMMAND__rstat));
759
760 /* poll for the read cycle to complete */
761 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
762 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
763 break;
764 }
765
766 /* clear the read cycle */
767 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
768
769 if (i == PHY_STATUS_RETRIES) {
770 return 0xffffffff;
771 }
772 /* Read the data back */
773 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
774}
775
776static int
777rge_mii_read(device_t dev, int phyaddr, int regidx)
778{
779 struct rge_softc *sc = device_get_softc(dev);
780
781 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
782}
783
784/**********************************************************************
785 * Set MII hooks to newly selected media
786 *
787 * Input parameters:
788 * ifp - Interface Pointer
789 *
790 * Return value:
791 * nothing
792 ********************************************************************* */
793static int
794rmi_xlr_mac_mediachange(struct ifnet *ifp)
795{
796 struct rge_softc *sc = ifp->if_softc;
797
798 if (ifp->if_flags & IFF_UP)
799 mii_mediachg(&sc->rge_mii);
800
801 return 0;
802}
803
804/**********************************************************************
805 * Get the current interface media status
806 *
807 * Input parameters:
808 * ifp - Interface Pointer
809 * ifmr - Interface media request ptr
810 *
811 * Return value:
812 * nothing
813 ********************************************************************* */
814static void
815rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
816{
817 struct rge_softc *sc = ifp->if_softc;
818
819 /* Check whether this is interface is active or not. */
820 ifmr->ifm_status = IFM_AVALID;
821 if (sc->link_up) {
822 ifmr->ifm_status |= IFM_ACTIVE;
823 } else {
824 ifmr->ifm_active = IFM_ETHER;
825 }
826}
827
828/**********************************************************************
829 * Write a value to a PHY register.
830 *
831 * Input parameters:
832 * s - priv structure
833 * phyaddr - PHY to use
834 * regidx - register within the PHY
835 * regval - data to write to register
836 *
837 * Return value:
838 * nothing
839 ********************************************************************* */
840static void
841rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
842{
843 int i = 0;
844
845 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
846 (phyaddr << 8) | (regidx << 0));
847
848 /* Write the data which starts the write cycle */
849 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
850
851 /* poll for the write cycle to complete */
852 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
853 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
854 break;
855 }
856
857 return;
858}
859
860static int
861rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
862{
863 struct rge_softc *sc = device_get_softc(dev);
864
865 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
866 return (0);
867}
868
869static void
870rmi_xlr_mac_mii_statchg(struct device *dev)
871{
872}
873
874static void
875serdes_regs_init(struct driver_data *priv)
876{
877 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
878
879 /* Initialize SERDES CONTROL Registers */
880 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
881 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
882 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
883 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
884 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
885 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
886 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
887 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
888 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
889 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
890 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
891
892 /*
893 * GPIO setting which affect the serdes - needs figuring out
894 */
895 DELAY(100);
896 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
897 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
898 DELAY(100);
899
900 /*
901 * This kludge is needed to setup serdes (?) clock correctly on some
902 * XLS boards
903 */
904 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
905 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
906 xlr_boot1_info.board_minor_version == 4) {
907 /* use 125 Mhz instead of 156.25Mhz ref clock */
908 DELAY(100);
909 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
910 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
911 DELAY(100);
912 }
913
914 return;
915}
916
917static void
918serdes_autoconfig(struct driver_data *priv)
919{
920 int delay = 100000;
921
922 /* Enable Auto negotiation in the PCS Layer */
923 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
924 DELAY(delay);
925 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
926 DELAY(delay);
927
928 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
929 DELAY(delay);
930 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
931 DELAY(delay);
932
933 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
934 DELAY(delay);
935 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
936 DELAY(delay);
937
938 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
939 DELAY(delay);
940 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
941 DELAY(delay);
942
943}
944
945/*****************************************************************
946 * Initialize GMAC
947 *****************************************************************/
948static void
949rmi_xlr_config_pde(struct driver_data *priv)
950{
951 int i = 0, cpu = 0, bucket = 0;
952 uint64_t bucket_map = 0;
953
954 /* uint32_t desc_pack_ctrl = 0; */
955 uint32_t cpumask;
956
957 cpumask = 0x1;
958#ifdef SMP
959 /*
960 * rge may be called before SMP start in a BOOTP/NFSROOT
961 * setup. we will distribute packets to other cpus only when
962 * the SMP is started.
963 */
964 if (smp_started)
965 cpumask = xlr_hw_thread_mask;
966#endif
967
968 for (i = 0; i < MAXCPU; i++) {
969 if (cpumask & (1 << i)) {
970 cpu = i;
971 bucket = ((cpu >> 2) << 3);
972 bucket_map |= (3ULL << bucket);
973 }
974 }
975 printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
976
977 /* bucket_map = 0x1; */
978 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
979 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
980 ((bucket_map >> 32) & 0xffffffff));
981
982 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
983 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
984 ((bucket_map >> 32) & 0xffffffff));
985
986 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
987 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
988 ((bucket_map >> 32) & 0xffffffff));
989
990 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
991 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
992 ((bucket_map >> 32) & 0xffffffff));
993}
994
995static void
996rge_smp_update_pde(void *dummy __unused)
997{
998 int i;
999 struct driver_data *priv;
1000 struct rge_softc *sc;
1001
1002 printf("Updating packet distribution for SMP\n");
1003 for (i = 0; i < XLR_MAX_MACS; i++) {
1004 sc = dev_mac[i];
1005 if (!sc)
1006 continue;
1007 priv = &(sc->priv);
1008 rmi_xlr_mac_set_enable(priv, 0);
1009 rmi_xlr_config_pde(priv);
1010 rmi_xlr_mac_set_enable(priv, 1);
1011 }
1012}
1013
1014SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1015
1016
1017static void
1018rmi_xlr_config_parser(struct driver_data *priv)
1019{
1020 /*
1021 * Mark it as no classification The parser extract is gauranteed to
1022 * be zero with no classfication
1023 */
1024 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1025
1026 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1027
1028 /* configure the parser : L2 Type is configured in the bootloader */
1029 /* extract IP: src, dest protocol */
1030 xlr_write_reg(priv->mmio, R_L3CTABLE,
1031 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1032 (0x0800 << 0));
1033 xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1034 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1035
1036}
1037
1038static void
1039rmi_xlr_config_classifier(struct driver_data *priv)
1040{
1041 int i = 0;
1042
1043 if (priv->type == XLR_XGMAC) {
1044 /* xgmac translation table doesn't have sane values on reset */
1045 for (i = 0; i < 64; i++)
1046 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1047
1048 /*
1049 * use upper 7 bits of the parser extract to index the
1050 * translate table
1051 */
1052 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1053 }
1054}
1055
1056enum {
1057 SGMII_SPEED_10 = 0x00000000,
1058 SGMII_SPEED_100 = 0x02000000,
1059 SGMII_SPEED_1000 = 0x04000000,
1060};
1061
1062static void
1063rmi_xlr_gmac_config_speed(struct driver_data *priv)
1064{
1065 int phy_addr = priv->phy_addr;
1066 xlr_reg_t *mmio = priv->mmio;
1067 struct rge_softc *sc = priv->sc;
1068
1069 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1070 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1071 priv->speed = (priv->speed >> 3) & 0x03;
1072
1073 if (priv->speed == xlr_mac_speed_10) {
1074 if (priv->mode != XLR_RGMII)
1075 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1076 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1077 xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1078 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1079 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1080 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1081 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1082 } else if (priv->speed == xlr_mac_speed_100) {
1083 if (priv->mode != XLR_RGMII)
1084 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1085 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1086 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1087 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1088 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1089 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1090 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1091 } else {
1092 if (priv->speed != xlr_mac_speed_1000) {
1093 if (priv->mode != XLR_RGMII)
1094 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1095 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1096 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1097 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1098 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1099 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1100 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1101 } else {
1102 if (priv->mode != XLR_RGMII)
1103 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1104 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1105 xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1106 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1107 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1108 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1109 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1110 }
1111 }
1112
1113 if (!priv->link) {
1114 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1115 sc->link_up = 0;
1116 } else {
1117 sc->link_up = 1;
1118 }
1119}
1120
1121/*****************************************************************
1122 * Initialize XGMAC
1123 *****************************************************************/
1124static void
1125rmi_xlr_xgmac_init(struct driver_data *priv)
1126{
1127 int i = 0;
1128 xlr_reg_t *mmio = priv->mmio;
1129 int id = priv->instance;
1130 struct rge_softc *sc = priv->sc;
1131 volatile unsigned short *cpld;
1132
1133 cpld = (volatile unsigned short *)0xBD840000;
1134
1135 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1136 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1137 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1138 rmi_xlr_config_pde(priv);
1139 rmi_xlr_config_parser(priv);
1140 rmi_xlr_config_classifier(priv);
1141
1142 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1143
1144 /* configure the XGMAC Registers */
1145 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1146
1147 /* configure the XGMAC_GLUE Registers */
1148 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1149 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1150 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1151 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1152 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1153 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1154
1155 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1156 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1157 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1158 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1159
1160 /*
1161 * take XGMII phy out of reset
1162 */
1163 /*
1164 * we are pulling everything out of reset because writing a 0 would
1165 * reset other devices on the chip
1166 */
1167 cpld[ATX_CPLD_RESET_1] = 0xffff;
1168 cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1169 cpld[ATX_CPLD_RESET_2] = 0xffff;
1170
1171 xgmac_mdio_setup(mmio);
1172
1173 rmi_xlr_config_spill_area(priv);
1174
1175 if (id == 0) {
1176 for (i = 0; i < 16; i++) {
1177 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1178 bucket_sizes.
1179 bucket[MSGRNG_STNID_XGS0_TX + i]);
1180 }
1181
1182 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1183 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1184 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1185 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1186
1187 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1188 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1189 cc_table_xgs_0.
1190 counters[i >> 3][i & 0x07]);
1191 }
1192 } else if (id == 1) {
1193 for (i = 0; i < 16; i++) {
1194 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1195 bucket_sizes.
1196 bucket[MSGRNG_STNID_XGS1_TX + i]);
1197 }
1198
1199 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1200 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1201 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1202 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1203
1204 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1205 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1206 cc_table_xgs_1.
1207 counters[i >> 3][i & 0x07]);
1208 }
1209 }
1210 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1211 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1212 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1213 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1214 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1215
1216 priv->init_frin_desc = 1;
1217}
1218
1219/*******************************************************
1220 * Initialization gmac
1221 *******************************************************/
1222static int
1223rmi_xlr_gmac_reset(struct driver_data *priv)
1224{
1225 volatile uint32_t val;
1226 xlr_reg_t *mmio = priv->mmio;
1227 int i, maxloops = 100;
1228
1229 /* Disable MAC RX */
1230 val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1231 val &= ~0x4;
1232 xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1233
1234 /* Disable Core RX */
1235 val = xlr_read_reg(mmio, R_RX_CONTROL);
1236 val &= ~0x1;
1237 xlr_write_reg(mmio, R_RX_CONTROL, val);
1238
1239 /* wait for rx to halt */
1240 for (i = 0; i < maxloops; i++) {
1241 val = xlr_read_reg(mmio, R_RX_CONTROL);
1242 if (val & 0x2)
1243 break;
1244 DELAY(1000);
1245 }
1246 if (i == maxloops)
1247 return -1;
1248
1249 /* Issue a soft reset */
1250 val = xlr_read_reg(mmio, R_RX_CONTROL);
1251 val |= 0x4;
1252 xlr_write_reg(mmio, R_RX_CONTROL, val);
1253
1254 /* wait for reset to complete */
1255 for (i = 0; i < maxloops; i++) {
1256 val = xlr_read_reg(mmio, R_RX_CONTROL);
1257 if (val & 0x8)
1258 break;
1259 DELAY(1000);
1260 }
1261 if (i == maxloops)
1262 return -1;
1263
1264 /* Clear the soft reset bit */
1265 val = xlr_read_reg(mmio, R_RX_CONTROL);
1266 val &= ~0x4;
1267 xlr_write_reg(mmio, R_RX_CONTROL, val);
1268 return 0;
1269}
1270
1271static void
1272rmi_xlr_gmac_init(struct driver_data *priv)
1273{
1274 int i = 0;
1275 xlr_reg_t *mmio = priv->mmio;
1276 int id = priv->instance;
1277 struct stn_cc *gmac_cc_config;
1278 uint32_t value = 0;
1279 int blk = id / 4, port = id % 4;
1280
1281 rmi_xlr_mac_set_enable(priv, 0);
1282
1283 rmi_xlr_config_spill_area(priv);
1284
1285 xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1286 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1287 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1288 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1289
1290 rmi_xlr_config_pde(priv);
1291 rmi_xlr_config_parser(priv);
1292 rmi_xlr_config_classifier(priv);
1293
1294 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1295 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1296 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1297
1298 if (priv->mode == XLR_PORT0_RGMII) {
1299 printf("Port 0 set in RGMII mode\n");
1300 value = xlr_read_reg(mmio, R_RX_CONTROL);
1301 value |= 1 << O_RX_CONTROL__RGMII;
1302 xlr_write_reg(mmio, R_RX_CONTROL, value);
1303 }
1304 rmi_xlr_mac_mii_init(priv);
1305
1306
1307#if 0
1308 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1309 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1310 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1311 ADVERTISED_MII;
1312#endif
1313
1314 /*
1315 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1316 * set about every 1 sec in GigE mode, ignore it for now...
1317 */
1318 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1319
1320 if (priv->mode != XLR_RGMII) {
1321 serdes_regs_init(priv);
1322 serdes_autoconfig(priv);
1323 }
1324 rmi_xlr_gmac_config_speed(priv);
1325
1326 value = xlr_read_reg(mmio, R_IPG_IFG);
1327 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1328 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1329 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1330 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1331 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1332 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1333 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1334 xlr_write_reg(mmio, R_INTMASK, 0);
1335 xlr_write_reg(mmio, R_FREEQCARVE, 0);
1336
1337 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1338 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1339 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1340 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1341 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1342 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1343 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1344 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1345 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1346 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1347
1348 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1349 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1350
1351 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1352 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1353 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1354 gmac_cc_config->counters[i >> 3][i & 0x07]);
1355 dbg_msg("%d: %d -> %d\n", priv->instance,
1356 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1357 }
1358 priv->init_frin_desc = 1;
1359}
1360
1361/**********************************************************************
1362 * Set promiscuous mode
1363 **********************************************************************/
1364static void
1365xlr_mac_set_rx_mode(struct rge_softc *sc)
1366{
1367 struct driver_data *priv = &(sc->priv);
1368 uint32_t regval;
1369
1370 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1371
1372 if (sc->flags & IFF_PROMISC) {
1373 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1374 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1375 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1376 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1377 } else {
1378 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1379 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1380 }
1381
1382 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1383}
1384
1385/**********************************************************************
1386 * Configure LAN speed for the specified MAC.
1387 ********************************************************************* */
1388static int
1389rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1390{
1391 return 0;
1392}
1393
1394/**********************************************************************
1395 * Set Ethernet duplex and flow control options for this MAC
1396 ********************************************************************* */
1397static int
1398rmi_xlr_mac_set_duplex(struct driver_data *s,
1399 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1400{
1401 return 0;
1402}
1403
1404/*****************************************************************
1405 * Kernel Net Stack <-> MAC Driver Interface
1406 *****************************************************************/
1407/**********************************************************************
1408 **********************************************************************/
1409#define MAC_TX_FAIL 2
1410#define MAC_TX_PASS 0
1411#define MAC_TX_RETRY 1
1412
1413int xlr_dev_queue_xmit_hack = 0;
1414
1415static int
1416mac_xmit(struct mbuf *m, struct rge_softc *sc,
1417 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1418{
1419 struct msgrng_msg msg = {0,0,0,0};
1420 int stid = priv->txbucket;
1421 uint32_t tx_cycles = 0;
1422 uint32_t mflags;
1423 int vcpu = xlr_cpu_id();
1424 int rv;
1425
1426 tx_cycles = mips_rd_count();
1427
1428 if (build_frag_list(m, &msg, tx_desc) != 0)
1429 return MAC_TX_FAIL;
1430
1431 else {
1432 mflags = msgrng_access_enable();
1433 if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1434 msg_snd_failed++;
1435 msgrng_restore(mflags);
1436 release_tx_desc(&msg, 0);
1437 xlr_rge_msg_snd_failed[vcpu]++;
1438 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1439 vcpu, rv, stid, (uintmax_t)msg.msg0);
1440 return MAC_TX_FAIL;
1441 }
1442 msgrng_restore(mflags);
1443 port_inc_counter(priv->instance, PORT_TX);
1444 }
1445
1446 /* Send the packet to MAC */
1447 dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1448 (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1449#ifdef DUMP_PACKETS
1450 {
1451 int i = 0;
1452 unsigned char *buf = (char *)m->m_data;
1453
1454 printf("Tx Packet: length=%d\n", len);
1455 for (i = 0; i < 64; i++) {
1456 if (i && (i % 16) == 0)
1457 printf("\n");
1458 printf("%02x ", buf[i]);
1459 }
1460 printf("\n");
1461 }
1462#endif
1463 xlr_inc_counter(NETIF_TX);
1464 return MAC_TX_PASS;
1465}
1466
1467static int
1468rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1469{
1470 struct driver_data *priv = &(sc->priv);
1471 int ret = -ENOSPC;
1472
1473 dbg_msg("IN\n");
1474
1475 xlr_inc_counter(NETIF_STACK_TX);
1476
1477retry:
1478 ret = mac_xmit(m, sc, priv, len, tx_desc);
1479
1480 if (ret == MAC_TX_RETRY)
1481 goto retry;
1482
1483 dbg_msg("OUT, ret = %d\n", ret);
1484 if (ret == MAC_TX_FAIL) {
1485 /* FULL */
1486 dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1487 port_inc_counter(priv->instance, PORT_STOPQ);
1488 }
1489 return ret;
1490}
1491
1492static void
1493mac_frin_replenish(void *args /* ignored */ )
1494{
1495 int cpu = xlr_core_id();
1496 int done = 0;
1497 int i = 0;
1498
1499 xlr_inc_counter(REPLENISH_ENTER);
1500 /*
1501 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1502 * atomic_read(frin_to_be_sent));
1503 */
1504 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1505
1506 for (;;) {
1507
1508 done = 0;
1509
1510 for (i = 0; i < XLR_MAX_MACS; i++) {
1511 /* int offset = 0; */
1512 void *m;
1513 uint32_t cycles;
1514 struct rge_softc *sc;
1515 struct driver_data *priv;
1516 int frin_to_be_sent;
1517
1518 sc = dev_mac[i];
1519 if (!sc)
1520 goto skip;
1521
1522 priv = &(sc->priv);
1523 frin_to_be_sent = priv->frin_to_be_sent[cpu];
1524
1525 /* if (atomic_read(frin_to_be_sent) < 0) */
1526 if (frin_to_be_sent < 0) {
1527 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1528 __FUNCTION__, i,
1529 frin_to_be_sent);
1530 }
1531 /* if (!atomic_read(frin_to_be_sent)) */
1532 if (!frin_to_be_sent)
1533 goto skip;
1534
1535 cycles = mips_rd_count();
1536 {
1537 m = get_buf();
1538 if (!m) {
1539 device_printf(sc->rge_dev, "No buffer\n");
1540 goto skip;
1541 }
1542 }
1543 xlr_inc_counter(REPLENISH_FRIN);
1544 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1545 free_buf(vtophys(m));
1546 printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1547 break;
1548 }
1549 xlr_set_counter(REPLENISH_CYCLES,
1550 (read_c0_count() - cycles));
1551 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1552
1553 continue;
1554 skip:
1555 done++;
1556 }
1557 if (done == XLR_MAX_MACS)
1558 break;
1559 }
1560}
1561
1562static volatile uint32_t g_tx_frm_tx_ok=0;
1563
1564static void
1565rge_tx_bkp_func(void *arg, int npending)
1566{
1567 int i = 0;
1568
1569 for (i = 0; i < xlr_board_info.gmacports; i++) {
1570 if (!dev_mac[i] || !dev_mac[i]->active)
1571 continue;
1572 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1573 }
1574 atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1575}
1576
1577/* This function is called from an interrupt handler */
1578void
1579rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1580 int stid, struct msgrng_msg *msg,
1581 void *data /* ignored */ )
1582{
1583 uint64_t phys_addr = 0;
1584 unsigned long addr = 0;
1585 uint32_t length = 0;
1586 int ctrl = 0, port = 0;
1587 struct rge_softc *sc = NULL;
1588 struct driver_data *priv = 0;
1589 struct ifnet *ifp;
1590 int vcpu = xlr_cpu_id();
1591 int cpu = xlr_core_id();
1592
1593 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1594 bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1595
1596 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1597 length = (msg->msg0 >> 40) & 0x3fff;
1598 if (length == 0) {
1599 ctrl = CTRL_REG_FREE;
1600 port = (msg->msg0 >> 54) & 0x0f;
1601 addr = 0;
1602 } else {
1603 ctrl = CTRL_SNGL;
1604 length = length - BYTE_OFFSET - MAC_CRC_LEN;
1605 port = msg->msg0 & 0x0f;
1606 addr = 0;
1607 }
1608
1609 if (xlr_board_info.is_xls) {
1610 if (stid == MSGRNG_STNID_GMAC1)
1611 port += 4;
1612 sc = dev_mac[dev_mac_gmac0 + port];
1613 } else {
1614 if (stid == MSGRNG_STNID_XGS0FR)
1615 sc = dev_mac[dev_mac_xgs0];
1616 else if (stid == MSGRNG_STNID_XGS1FR)
1617 sc = dev_mac[dev_mac_xgs0 + 1];
1618 else
1619 sc = dev_mac[dev_mac_gmac0 + port];
1620 }
1621 if (sc == NULL)
1622 return;
1623 priv = &(sc->priv);
1624
1625 dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1626 (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1627
1628 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1629 xlr_rge_tx_ok_done[vcpu]++;
1630 release_tx_desc(msg, 1);
1631 ifp = sc->rge_ifp;
1632 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1633 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1634 }
1635 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1636 rge_tx_bkp_func(NULL, 0);
1637 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1638 (read_c0_count() - msgrng_msg_cycles));
1639 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1640 /* Rx Packet */
1641 /* struct mbuf *m = 0; */
1642 /* int logical_cpu = 0; */
1643
1644 dbg_msg("Received packet, port = %d\n", port);
1645 /*
1646 * if num frins to be sent exceeds threshold, wake up the
1647 * helper thread
1648 */
1649 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1650 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1651 mac_frin_replenish(NULL);
1652 }
1653 dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1654 priv->instance, (uintmax_t)phys_addr, length);
1655 mac_stats_add(priv->stats.rx_packets, 1);
1656 mac_stats_add(priv->stats.rx_bytes, length);
1657 xlr_inc_counter(NETIF_RX);
1658 xlr_set_counter(NETIF_RX_CYCLES,
1659 (read_c0_count() - msgrng_msg_cycles));
1660 rge_rx(sc, phys_addr, length);
1661 xlr_rge_rx_done[vcpu]++;
1662 } else {
1663 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1664 }
1665
1666}
1667
1668/**********************************************************************
1669 **********************************************************************/
1670static int
1671rge_probe(dev)
1672 device_t dev;
1673{
1674 device_set_desc(dev, "RMI Gigabit Ethernet");
1675
1676 /* Always return 0 */
1677 return 0;
1678}
1679
1680volatile unsigned long xlr_debug_enabled;
1681struct callout rge_dbg_count;
1682static void
1683xlr_debug_count(void *addr)
1684{
1685 struct driver_data *priv = &dev_mac[0]->priv;
1686
1687 /* uint32_t crdt; */
1688 if (xlr_debug_enabled) {
1689 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1690 }
1691 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1692}
1693
1694
1695static void
1696xlr_tx_q_wakeup(void *addr)
1697{
1698 int i = 0;
1699 int j = 0;
1700
1701 for (i = 0; i < xlr_board_info.gmacports; i++) {
1702 if (!dev_mac[i] || !dev_mac[i]->active)
1703 continue;
1704 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1705 for (j = 0; j < XLR_MAX_CORE; j++) {
1706 if (xlr_tot_avail_p2d[j]) {
1707 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1708 break;
1709 }
1710 }
1711 }
1712 }
1713 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1714 rge_tx_bkp_func(NULL, 0);
1715 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1716}
1717
1718static int
1719rge_attach(device_t dev)
1720{
1721 struct ifnet *ifp;
1722 struct rge_softc *sc;
1723 struct driver_data *priv = 0;
1724 int ret = 0;
1725 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1726
1727 sc = device_get_softc(dev);
1728 sc->rge_dev = dev;
1729
1730 /* Initialize mac's */
1731 sc->unit = device_get_unit(dev);
1732
1733 if (sc->unit > XLR_MAX_MACS) {
1734 ret = ENXIO;
1735 goto out;
1736 }
1737 RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1738
1739 priv = &(sc->priv);
1740 priv->sc = sc;
1741
1742 sc->flags = 0; /* TODO : fix me up later */
1743
1744 priv->id = sc->unit;
1745 if (gmac_conf->type == XLR_GMAC) {
1746 priv->instance = priv->id;
1747 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1748 0x1000 * (sc->unit % 4));
1749 if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1750 goto out;
1751 } else if (gmac_conf->type == XLR_XGMAC) {
1752 priv->instance = priv->id - xlr_board_info.gmacports;
1753 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1754 }
1755 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1756 (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1757 priv->instance >=4)) {
1758 dbg_msg("Arizona board - offset 4 \n");
1759 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1760 } else
1761 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1762
1763 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1764 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1765
1766 sc->base_addr = (unsigned long)priv->mmio;
1767 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1768
1769 sc->xmit = rge_start;
1770 sc->stop = rge_stop;
1771 sc->get_stats = rmi_xlr_mac_get_stats;
1772 sc->ioctl = rge_ioctl;
1773
1774 /* Initialize the device specific driver data */
1775 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1776
1777 priv->type = gmac_conf->type;
1778
1779 priv->mode = gmac_conf->mode;
1780 if (xlr_board_info.is_xls == 0) {
1781 /* TODO - check II and IIB boards */
1782 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1783 xlr_boot1_info.board_minor_version != 1)
1784 priv->phy_addr = priv->instance - 2;
1785 else
1786 priv->phy_addr = priv->instance;
1787 priv->mode = XLR_RGMII;
1788 } else {
1789 if (gmac_conf->mode == XLR_PORT0_RGMII &&
1790 priv->instance == 0) {
1791 priv->mode = XLR_PORT0_RGMII;
1792 priv->phy_addr = 0;
1793 } else {
1794 priv->mode = XLR_SGMII;
1795 /* Board 11 has SGMII daughter cards with the XLS chips, in this case
1796 the phy number is 0-3 for both GMAC blocks */
1797 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1798 priv->phy_addr = priv->instance % 4 + 16;
1799 else
1800 priv->phy_addr = priv->instance + 16;
1801 }
1802 }
1803
1804 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1805 priv->rfrbucket = gmac_conf->station_rfr;
1806 priv->spill_configured = 0;
1807
1808 dbg_msg("priv->mmio=%p\n", priv->mmio);
1809
1810 /* Set up ifnet structure */
1811 ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1812 if (ifp == NULL) {
1813 device_printf(sc->rge_dev, "failed to if_alloc()\n");
1814 rge_release_resources(sc);
1815 ret = ENXIO;
1816 RGE_LOCK_DESTROY(sc);
1817 goto out;
1818 }
1819 ifp->if_softc = sc;
1820 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1821 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1822 ifp->if_ioctl = rge_ioctl;
1823 ifp->if_start = rge_start;
1824 ifp->if_init = rge_init;
1825 ifp->if_mtu = ETHERMTU;
1826 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1827 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1828 IFQ_SET_READY(&ifp->if_snd);
1829 sc->active = 1;
1830 ifp->if_hwassist = 0;
1831 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1832 ifp->if_capenable = ifp->if_capabilities;
1833
1834 /* Initialize the rge_softc */
1835 sc->irq = gmac_conf->baseirq + priv->instance % 4;
1836
1837 /* Set the IRQ into the rid field */
1838 /*
1839 * note this is a hack to pass the irq to the iodi interrupt setup
1840 * routines
1841 */
1842 sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1843
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81#include <mips/rmi/rmi_mips_exts.h>
82#include <machine/cpuregs.h>
83
84#include <machine/param.h>
85#include <machine/intr_machdep.h>
86#include <machine/clock.h> /* for DELAY */
87#include <machine/cpuregs.h>
88#include <machine/bus.h> /* */
89#include <machine/resource.h>
90
91#include <dev/mii/mii.h>
92#include <dev/mii/miivar.h>
93#include <dev/mii/brgphyreg.h>
94
95#include <mips/rmi/interrupt.h>
96#include <mips/rmi/msgring.h>
97#include <mips/rmi/iomap.h>
98#include <mips/rmi/pic.h>
99#include <mips/rmi/rmi_mips_exts.h>
100#include <mips/rmi/rmi_boot_info.h>
101#include <mips/rmi/board.h>
102
103#include <mips/rmi/dev/xlr/debug.h>
104#include <mips/rmi/dev/xlr/atx_cpld.h>
105#include <mips/rmi/dev/xlr/xgmac_mdio.h>
106#include <mips/rmi/dev/xlr/rge.h>
107
108#include "miibus_if.h"
109
110MODULE_DEPEND(rge, ether, 1, 1, 1);
111MODULE_DEPEND(rge, miibus, 1, 1, 1);
112
113/* #define DEBUG */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123 do {\
124 if (mac_debug) {\
125 printf("[%s@%d|%s]: cpu_%d: " fmt, \
126 __FILE__, __LINE__, __FUNCTION__, xlr_cpu_id(), ##args);\
127 }\
128 } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG 88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE 1536
142#define MAX_FRAME_SIZE_JUMBO 9216
143
144#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
145#define MAC_PREPAD 0
146#define BYTE_OFFSET 2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN 4
149#define MAX_NUM_MSGRNG_STN_CC 128
150
151#define MAX_NUM_DESC 1024
152#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1)
173#else
174#define port_inc_counter(port, counter) /* Nothing */
175#endif
176
177int xlr_rge_tx_prepend[MAXCPU];
178int xlr_rge_tx_done[MAXCPU];
179int xlr_rge_get_p2d_failed[MAXCPU];
180int xlr_rge_msg_snd_failed[MAXCPU];
181int xlr_rge_tx_ok_done[MAXCPU];
182int xlr_rge_rx_done[MAXCPU];
183int xlr_rge_repl_done[MAXCPU];
184
185/* #define mac_stats_add(x, val) ({(x) += (val);}) */
186#define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
187
188#define XLR_MAX_CORE 8
189#define RGE_LOCK_INIT(_sc, _name) \
190 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
191#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx)
192#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
193#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx)
194#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
195
196#define XLR_MAX_MACS 8
197#define XLR_MAX_TX_FRAGS 14
198#define MAX_P2D_DESC_PER_PORT 512
199struct p2d_tx_desc {
200 uint64_t frag[XLR_MAX_TX_FRAGS + 2];
201};
202
203#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
204
205struct rge_softc *dev_mac[XLR_MAX_MACS];
206static int dev_mac_xgs0;
207static int dev_mac_gmac0;
208
209static int gmac_common_init_done;
210
211
212static int rge_probe(device_t);
213static int rge_attach(device_t);
214static int rge_detach(device_t);
215static int rge_suspend(device_t);
216static int rge_resume(device_t);
217static void rge_release_resources(struct rge_softc *);
218static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
219static void rge_intr(void *);
220static void rge_start_locked(struct ifnet *, int);
221static void rge_start(struct ifnet *);
222static int rge_ioctl(struct ifnet *, u_long, caddr_t);
223static void rge_init(void *);
224static void rge_stop(struct rge_softc *);
225static int rge_shutdown(device_t);
226static void rge_reset(struct rge_softc *);
227
228static struct mbuf *get_mbuf(void);
229static void free_buf(vm_paddr_t paddr);
230static void *get_buf(void);
231
232static void xlr_mac_get_hwaddr(struct rge_softc *);
233static void xlr_mac_setup_hwaddr(struct driver_data *);
234static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
235static void rmi_xlr_xgmac_init(struct driver_data *priv);
236static void rmi_xlr_gmac_init(struct driver_data *priv);
237static void mac_common_init(void);
238static int rge_mii_write(device_t, int, int, int);
239static int rge_mii_read(device_t, int, int);
240static void rmi_xlr_mac_mii_statchg(device_t);
241static int rmi_xlr_mac_mediachange(struct ifnet *);
242static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
243static void xlr_mac_set_rx_mode(struct rge_softc *sc);
244void
245rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
246 int stid, struct msgrng_msg *msg,
247 void *data);
248static void mac_frin_replenish(void *);
249static int rmi_xlr_mac_open(struct rge_softc *);
250static int rmi_xlr_mac_close(struct rge_softc *);
251static int
252mac_xmit(struct mbuf *, struct rge_softc *,
253 struct driver_data *, int, struct p2d_tx_desc *);
254static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
255static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
256static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
257static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
258static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
259static void rmi_xlr_config_spill_area(struct driver_data *priv);
260static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
261static int
262rmi_xlr_mac_set_duplex(struct driver_data *s,
263 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
264static void serdes_regs_init(struct driver_data *priv);
265static int rmi_xlr_gmac_reset(struct driver_data *priv);
266
267/*Statistics...*/
268static int get_p2d_desc_failed = 0;
269static int msg_snd_failed = 0;
270
271SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
272 &get_p2d_desc_failed, 0, "p2d desc failed");
273SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
274 &msg_snd_failed, 0, "msg snd failed");
275
276struct callout xlr_tx_stop_bkp;
277
278static device_method_t rge_methods[] = {
279 /* Device interface */
280 DEVMETHOD(device_probe, rge_probe),
281 DEVMETHOD(device_attach, rge_attach),
282 DEVMETHOD(device_detach, rge_detach),
283 DEVMETHOD(device_shutdown, rge_shutdown),
284 DEVMETHOD(device_suspend, rge_suspend),
285 DEVMETHOD(device_resume, rge_resume),
286
287 /* MII interface */
288 DEVMETHOD(miibus_readreg, rge_mii_read),
289 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
290 DEVMETHOD(miibus_writereg, rge_mii_write),
291 {0, 0}
292};
293
294static driver_t rge_driver = {
295 "rge",
296 rge_methods,
297 sizeof(struct rge_softc)
298};
299
300static devclass_t rge_devclass;
301
302DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
303DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
304
305#ifndef __STR
306#define __STR(x) #x
307#endif
308#ifndef STR
309#define STR(x) __STR(x)
310#endif
311
312void *xlr_tx_ring_mem;
313
314struct tx_desc_node {
315 struct p2d_tx_desc *ptr;
316 TAILQ_ENTRY(tx_desc_node) list;
317};
318
319#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
320struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
321static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
322static int xlr_total_active_core = 0;
323
324/*
325 * This should contain the list of all free tx frag desc nodes pointing to tx
326 * p2d arrays
327 */
328static
329TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
330{
331 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
332 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
333 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
334 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
335 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
336 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
337 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
338 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
339};
340
341/* This contains a list of free tx frag node descriptors */
342static
343TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
344{
345 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
346 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
347 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
348 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
349 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
350 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
351 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
352 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
353};
354
355static struct mtx tx_desc_lock[XLR_MAX_CORE];
356
357static inline void
358mac_make_desc_rfr(struct msgrng_msg *msg,
359 vm_paddr_t addr)
360{
361 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
362 msg->msg1 = msg->msg2 = msg->msg3 = 0;
363}
364
365#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
366
367static void
368init_p2d_allocation(void)
369{
370 int active_core[8] = {0};
371 int i = 0;
372 uint32_t cpumask;
373 int cpu;
374
375 cpumask = xlr_hw_thread_mask;
376
377 for (i = 0; i < 32; i++) {
378 if (cpumask & (1 << i)) {
379 cpu = i;
380 if (!active_core[cpu / 4]) {
381 active_core[cpu / 4] = 1;
382 xlr_total_active_core++;
383 }
384 }
385 }
386 for (i = 0; i < XLR_MAX_CORE; i++) {
387 if (active_core[i])
388 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
389 }
390 printf("Total Active Core %d\n", xlr_total_active_core);
391}
392
393
394static void
395init_tx_ring(void)
396{
397 int i;
398 int j = 0;
399 struct tx_desc_node *start, *node;
400 struct p2d_tx_desc *tx_desc;
401 vm_paddr_t paddr;
402 vm_offset_t unmapped_addr;
403
404 for (i = 0; i < XLR_MAX_CORE; i++)
405 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
406
407 start = &tx_desc_nodes[0];
408 /* TODO: try to get this from KSEG0 */
409 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
410 M_DEVBUF, M_NOWAIT | M_ZERO, 0,
411 0x10000000, XLR_CACHELINE_SIZE, 0);
412
413 if (xlr_tx_ring_mem == NULL) {
414 panic("TX ring memory allocation failed");
415 }
416 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
417
418 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
419
420
421 tx_desc = (struct p2d_tx_desc *)unmapped_addr;
422
423 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
424 node = start + i;
425 node->ptr = tx_desc;
426 tx_desc++;
427 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
428 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
429 }
430}
431
432static inline struct p2d_tx_desc *
433get_p2d_desc(void)
434{
435 struct tx_desc_node *node;
436 struct p2d_tx_desc *tx_desc = NULL;
437 int cpu = xlr_core_id();
438
439 mtx_lock_spin(&tx_desc_lock[cpu]);
440 node = TAILQ_FIRST(&tx_frag_desc[cpu]);
441 if (node) {
442 xlr_tot_avail_p2d[cpu]--;
443 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
444 tx_desc = node->ptr;
445 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
446 } else {
447 /* Increment p2d desc fail count */
448 get_p2d_desc_failed++;
449 }
450 mtx_unlock_spin(&tx_desc_lock[cpu]);
451 return tx_desc;
452}
453static void
454free_p2d_desc(struct p2d_tx_desc *tx_desc)
455{
456 struct tx_desc_node *node;
457 int cpu = xlr_core_id();
458
459 mtx_lock_spin(&tx_desc_lock[cpu]);
460 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
461 KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
462
463 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
464 node->ptr = tx_desc;
465 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
466 xlr_tot_avail_p2d[cpu]++;
467 mtx_unlock_spin(&tx_desc_lock[cpu]);
468
469}
470
471static int
472build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
473{
474 struct mbuf *m;
475 vm_paddr_t paddr;
476 uint64_t p2d_len;
477 int nfrag;
478 vm_paddr_t p1, p2;
479 uint32_t len1, len2;
480 vm_offset_t taddr;
481 uint64_t fr_stid;
482
483 fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
484
485 if (tx_desc == NULL)
486 return 1;
487
488 nfrag = 0;
489 for (m = m_head; m != NULL; m = m->m_next) {
490 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
491 free_p2d_desc(tx_desc);
492 return 1;
493 }
494 if (m->m_len != 0) {
495 paddr = vtophys(mtod(m, vm_offset_t));
496 p1 = paddr + m->m_len;
497 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
498 if (p1 != p2) {
499 len1 = (uint32_t)
500 (PAGE_SIZE - (paddr & PAGE_MASK));
501 tx_desc->frag[nfrag] = (127ULL << 54) |
502 ((uint64_t) len1 << 40) | paddr;
503 nfrag++;
504 taddr = (vm_offset_t)m->m_data + len1;
505 p2 = vtophys(taddr);
506 len2 = m->m_len - len1;
507 if (len2 == 0)
508 continue;
509 if (nfrag >= XLR_MAX_TX_FRAGS)
510 panic("TX frags exceeded");
511
512 tx_desc->frag[nfrag] = (127ULL << 54) |
513 ((uint64_t) len2 << 40) | p2;
514
515 taddr += len2;
516 p1 = vtophys(taddr);
517
518 if ((p2 + len2) != p1) {
519 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
520 printf("len1 = %x len2 = %x\n", len1,
521 len2);
522 printf("m_data %p\n", m->m_data);
523 DELAY(1000000);
524 panic("Multiple Mbuf segment discontiguous\n");
525 }
526 } else {
527 tx_desc->frag[nfrag] = (127ULL << 54) |
528 ((uint64_t) m->m_len << 40) | paddr;
529 }
530 nfrag++;
531 }
532 }
533 /* set eop in the last tx p2d desc */
534 tx_desc->frag[nfrag - 1] |= (1ULL << 63);
535 paddr = vtophys((vm_offset_t)tx_desc);
536 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
537 nfrag++;
538 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
539 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
540
541 p2d_len = (nfrag * 8);
542 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
543 (p2d_len << 40) | paddr;
544
545 return 0;
546}
547static void
548release_tx_desc(struct msgrng_msg *msg, int rel_buf)
549{
550 struct p2d_tx_desc *tx_desc, *chk_addr;
551 struct mbuf *m;
552
553 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
554 chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
555 if (tx_desc != chk_addr) {
556 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
557 tx_desc, chk_addr);
558 return;
559 }
560 if (rel_buf) {
561 m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
562 m_freem(m);
563 }
564 free_p2d_desc(tx_desc);
565}
566
567
568static struct mbuf *
569get_mbuf(void)
570{
571 struct mbuf *m_new = NULL;
572
573 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
574 return NULL;
575
576 m_new->m_len = MCLBYTES;
577 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
578 return m_new;
579}
580
581static void
582free_buf(vm_paddr_t paddr)
583{
584 struct mbuf *m;
585 uint64_t mag;
586 uint32_t sr;
587
588 sr = xlr_enable_kx();
589 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
590 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
591 xlr_restore_kx(sr);
592 if (mag != 0xf00bad) {
593 printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
594 (u_long)paddr, (u_long)mag);
595 return;
596 }
597 if (m != NULL)
598 m_freem(m);
599}
600
601static void *
602get_buf(void)
603{
604 struct mbuf *m_new = NULL;
605 uint64_t *md;
606#ifdef INVARIANTS
607 vm_paddr_t temp1, temp2;
608#endif
609
610 m_new = get_mbuf();
611 if (m_new == NULL)
612 return NULL;
613
614 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
615 md = (uint64_t *)m_new->m_data;
616 md[0] = (uintptr_t)m_new; /* Back Ptr */
617 md[1] = 0xf00bad;
618 m_adj(m_new, XLR_CACHELINE_SIZE);
619
620#ifdef INVARIANTS
621 temp1 = vtophys((vm_offset_t)m_new->m_data);
622 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
623 if ((temp1 + 1536) != temp2)
624 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
625#endif
626 return (void *)m_new->m_data;
627}
628
629/**********************************************************************
630 **********************************************************************/
631static void
632rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
633{
634 uint32_t regval;
635 int tx_threshold = 1518;
636
637 if (flag) {
638 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
639 regval |= (1 << O_TX_CONTROL__TxEnable) |
640 (tx_threshold << O_TX_CONTROL__TxThreshold);
641
642 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
643
644 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
645 regval |= 1 << O_RX_CONTROL__RxEnable;
646 if (priv->mode == XLR_PORT0_RGMII)
647 regval |= 1 << O_RX_CONTROL__RGMII;
648 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
649
650 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
651 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
652 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
653 } else {
654 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
655 regval &= ~((1 << O_TX_CONTROL__TxEnable) |
656 (tx_threshold << O_TX_CONTROL__TxThreshold));
657
658 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
659
660 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
661 regval &= ~(1 << O_RX_CONTROL__RxEnable);
662 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
663
664 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
665 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
666 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
667 }
668}
669
670/**********************************************************************
671 **********************************************************************/
672static __inline__ int
673xlr_mac_send_fr(struct driver_data *priv,
674 vm_paddr_t addr, int len)
675{
676 struct msgrng_msg msg;
677 int stid = priv->rfrbucket;
678 int code, ret;
679 uint32_t msgrng_flags;
680#ifdef INVARIANTS
681 int i = 0;
682#endif
683
684 mac_make_desc_rfr(&msg, addr);
685
686 /* Send the packet to MAC */
687 dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
688 priv->instance, (u_long)addr, stid);
689 if (priv->type == XLR_XGMAC)
690 code = MSGRNG_CODE_XGMAC; /* WHY? */
691 else
692 code = MSGRNG_CODE_MAC;
693
694 do {
695 msgrng_flags = msgrng_access_enable();
696 ret = message_send(1, code, stid, &msg);
697 msgrng_restore(msgrng_flags);
698 KASSERT(i++ < 100000, ("Too many credit fails\n"));
699 } while (ret != 0);
700
701 return 0;
702}
703
704/**************************************************************/
705
706static void
707xgmac_mdio_setup(volatile unsigned int *_mmio)
708{
709 int i;
710 uint32_t rd_data;
711
712 for (i = 0; i < 4; i++) {
713 rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
714 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */
715 xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
716 }
717}
718
719/**********************************************************************
720 * Init MII interface
721 *
722 * Input parameters:
723 * s - priv structure
724 ********************************************************************* */
725#define PHY_STATUS_RETRIES 25000
726
727static void
728rmi_xlr_mac_mii_init(struct driver_data *priv)
729{
730 xlr_reg_t *mii_mmio = priv->mii_mmio;
731
732 /* use the lowest clock divisor - divisor 28 */
733 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
734}
735
736/**********************************************************************
737 * Read a PHY register.
738 *
739 * Input parameters:
740 * s - priv structure
741 * phyaddr - PHY's address
742 * regidx = index of register to read
743 *
744 * Return value:
745 * value read, or 0 if an error occurred.
746 ********************************************************************* */
747
748static int
749rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
750{
751 int i = 0;
752
753 /* setup the phy reg to be used */
754 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
755 (phyaddr << 8) | (regidx << 0));
756 /* Issue the read command */
757 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
758 (1 << O_MII_MGMT_COMMAND__rstat));
759
760 /* poll for the read cycle to complete */
761 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
762 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
763 break;
764 }
765
766 /* clear the read cycle */
767 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
768
769 if (i == PHY_STATUS_RETRIES) {
770 return 0xffffffff;
771 }
772 /* Read the data back */
773 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
774}
775
776static int
777rge_mii_read(device_t dev, int phyaddr, int regidx)
778{
779 struct rge_softc *sc = device_get_softc(dev);
780
781 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
782}
783
784/**********************************************************************
785 * Set MII hooks to newly selected media
786 *
787 * Input parameters:
788 * ifp - Interface Pointer
789 *
790 * Return value:
791 * nothing
792 ********************************************************************* */
793static int
794rmi_xlr_mac_mediachange(struct ifnet *ifp)
795{
796 struct rge_softc *sc = ifp->if_softc;
797
798 if (ifp->if_flags & IFF_UP)
799 mii_mediachg(&sc->rge_mii);
800
801 return 0;
802}
803
804/**********************************************************************
805 * Get the current interface media status
806 *
807 * Input parameters:
808 * ifp - Interface Pointer
809 * ifmr - Interface media request ptr
810 *
811 * Return value:
812 * nothing
813 ********************************************************************* */
814static void
815rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
816{
817 struct rge_softc *sc = ifp->if_softc;
818
819 /* Check whether this is interface is active or not. */
820 ifmr->ifm_status = IFM_AVALID;
821 if (sc->link_up) {
822 ifmr->ifm_status |= IFM_ACTIVE;
823 } else {
824 ifmr->ifm_active = IFM_ETHER;
825 }
826}
827
828/**********************************************************************
829 * Write a value to a PHY register.
830 *
831 * Input parameters:
832 * s - priv structure
833 * phyaddr - PHY to use
834 * regidx - register within the PHY
835 * regval - data to write to register
836 *
837 * Return value:
838 * nothing
839 ********************************************************************* */
840static void
841rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
842{
843 int i = 0;
844
845 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
846 (phyaddr << 8) | (regidx << 0));
847
848 /* Write the data which starts the write cycle */
849 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
850
851 /* poll for the write cycle to complete */
852 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
853 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
854 break;
855 }
856
857 return;
858}
859
860static int
861rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
862{
863 struct rge_softc *sc = device_get_softc(dev);
864
865 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
866 return (0);
867}
868
869static void
870rmi_xlr_mac_mii_statchg(struct device *dev)
871{
872}
873
874static void
875serdes_regs_init(struct driver_data *priv)
876{
877 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
878
879 /* Initialize SERDES CONTROL Registers */
880 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
881 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
882 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
883 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
884 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
885 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
886 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
887 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
888 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
889 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
890 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
891
892 /*
893 * GPIO setting which affect the serdes - needs figuring out
894 */
895 DELAY(100);
896 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
897 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
898 DELAY(100);
899
900 /*
901 * This kludge is needed to setup serdes (?) clock correctly on some
902 * XLS boards
903 */
904 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
905 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
906 xlr_boot1_info.board_minor_version == 4) {
907 /* use 125 Mhz instead of 156.25Mhz ref clock */
908 DELAY(100);
909 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
910 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
911 DELAY(100);
912 }
913
914 return;
915}
916
917static void
918serdes_autoconfig(struct driver_data *priv)
919{
920 int delay = 100000;
921
922 /* Enable Auto negotiation in the PCS Layer */
923 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
924 DELAY(delay);
925 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
926 DELAY(delay);
927
928 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
929 DELAY(delay);
930 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
931 DELAY(delay);
932
933 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
934 DELAY(delay);
935 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
936 DELAY(delay);
937
938 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
939 DELAY(delay);
940 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
941 DELAY(delay);
942
943}
944
945/*****************************************************************
946 * Initialize GMAC
947 *****************************************************************/
948static void
949rmi_xlr_config_pde(struct driver_data *priv)
950{
951 int i = 0, cpu = 0, bucket = 0;
952 uint64_t bucket_map = 0;
953
954 /* uint32_t desc_pack_ctrl = 0; */
955 uint32_t cpumask;
956
957 cpumask = 0x1;
958#ifdef SMP
959 /*
960 * rge may be called before SMP start in a BOOTP/NFSROOT
961 * setup. we will distribute packets to other cpus only when
962 * the SMP is started.
963 */
964 if (smp_started)
965 cpumask = xlr_hw_thread_mask;
966#endif
967
968 for (i = 0; i < MAXCPU; i++) {
969 if (cpumask & (1 << i)) {
970 cpu = i;
971 bucket = ((cpu >> 2) << 3);
972 bucket_map |= (3ULL << bucket);
973 }
974 }
975 printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
976
977 /* bucket_map = 0x1; */
978 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
979 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
980 ((bucket_map >> 32) & 0xffffffff));
981
982 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
983 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
984 ((bucket_map >> 32) & 0xffffffff));
985
986 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
987 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
988 ((bucket_map >> 32) & 0xffffffff));
989
990 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
991 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
992 ((bucket_map >> 32) & 0xffffffff));
993}
994
995static void
996rge_smp_update_pde(void *dummy __unused)
997{
998 int i;
999 struct driver_data *priv;
1000 struct rge_softc *sc;
1001
1002 printf("Updating packet distribution for SMP\n");
1003 for (i = 0; i < XLR_MAX_MACS; i++) {
1004 sc = dev_mac[i];
1005 if (!sc)
1006 continue;
1007 priv = &(sc->priv);
1008 rmi_xlr_mac_set_enable(priv, 0);
1009 rmi_xlr_config_pde(priv);
1010 rmi_xlr_mac_set_enable(priv, 1);
1011 }
1012}
1013
1014SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1015
1016
1017static void
1018rmi_xlr_config_parser(struct driver_data *priv)
1019{
1020 /*
1021 * Mark it as no classification The parser extract is gauranteed to
1022 * be zero with no classfication
1023 */
1024 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1025
1026 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1027
1028 /* configure the parser : L2 Type is configured in the bootloader */
1029 /* extract IP: src, dest protocol */
1030 xlr_write_reg(priv->mmio, R_L3CTABLE,
1031 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1032 (0x0800 << 0));
1033 xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1034 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1035
1036}
1037
1038static void
1039rmi_xlr_config_classifier(struct driver_data *priv)
1040{
1041 int i = 0;
1042
1043 if (priv->type == XLR_XGMAC) {
1044 /* xgmac translation table doesn't have sane values on reset */
1045 for (i = 0; i < 64; i++)
1046 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1047
1048 /*
1049 * use upper 7 bits of the parser extract to index the
1050 * translate table
1051 */
1052 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1053 }
1054}
1055
1056enum {
1057 SGMII_SPEED_10 = 0x00000000,
1058 SGMII_SPEED_100 = 0x02000000,
1059 SGMII_SPEED_1000 = 0x04000000,
1060};
1061
1062static void
1063rmi_xlr_gmac_config_speed(struct driver_data *priv)
1064{
1065 int phy_addr = priv->phy_addr;
1066 xlr_reg_t *mmio = priv->mmio;
1067 struct rge_softc *sc = priv->sc;
1068
1069 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1070 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1071 priv->speed = (priv->speed >> 3) & 0x03;
1072
1073 if (priv->speed == xlr_mac_speed_10) {
1074 if (priv->mode != XLR_RGMII)
1075 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1076 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1077 xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1078 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1079 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1080 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1081 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1082 } else if (priv->speed == xlr_mac_speed_100) {
1083 if (priv->mode != XLR_RGMII)
1084 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1085 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1086 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1087 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1088 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1089 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1090 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1091 } else {
1092 if (priv->speed != xlr_mac_speed_1000) {
1093 if (priv->mode != XLR_RGMII)
1094 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1095 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1096 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1097 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1098 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1099 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1100 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1101 } else {
1102 if (priv->mode != XLR_RGMII)
1103 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1104 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1105 xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1106 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1107 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1108 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1109 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1110 }
1111 }
1112
1113 if (!priv->link) {
1114 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1115 sc->link_up = 0;
1116 } else {
1117 sc->link_up = 1;
1118 }
1119}
1120
1121/*****************************************************************
1122 * Initialize XGMAC
1123 *****************************************************************/
1124static void
1125rmi_xlr_xgmac_init(struct driver_data *priv)
1126{
1127 int i = 0;
1128 xlr_reg_t *mmio = priv->mmio;
1129 int id = priv->instance;
1130 struct rge_softc *sc = priv->sc;
1131 volatile unsigned short *cpld;
1132
1133 cpld = (volatile unsigned short *)0xBD840000;
1134
1135 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1136 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1137 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1138 rmi_xlr_config_pde(priv);
1139 rmi_xlr_config_parser(priv);
1140 rmi_xlr_config_classifier(priv);
1141
1142 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1143
1144 /* configure the XGMAC Registers */
1145 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1146
1147 /* configure the XGMAC_GLUE Registers */
1148 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1149 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1150 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1151 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1152 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1153 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1154
1155 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1156 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1157 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1158 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1159
1160 /*
1161 * take XGMII phy out of reset
1162 */
1163 /*
1164 * we are pulling everything out of reset because writing a 0 would
1165 * reset other devices on the chip
1166 */
1167 cpld[ATX_CPLD_RESET_1] = 0xffff;
1168 cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1169 cpld[ATX_CPLD_RESET_2] = 0xffff;
1170
1171 xgmac_mdio_setup(mmio);
1172
1173 rmi_xlr_config_spill_area(priv);
1174
1175 if (id == 0) {
1176 for (i = 0; i < 16; i++) {
1177 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1178 bucket_sizes.
1179 bucket[MSGRNG_STNID_XGS0_TX + i]);
1180 }
1181
1182 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1183 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1184 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1185 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1186
1187 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1188 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1189 cc_table_xgs_0.
1190 counters[i >> 3][i & 0x07]);
1191 }
1192 } else if (id == 1) {
1193 for (i = 0; i < 16; i++) {
1194 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1195 bucket_sizes.
1196 bucket[MSGRNG_STNID_XGS1_TX + i]);
1197 }
1198
1199 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1200 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1201 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1202 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1203
1204 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1205 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1206 cc_table_xgs_1.
1207 counters[i >> 3][i & 0x07]);
1208 }
1209 }
1210 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1211 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1212 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1213 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1214 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1215
1216 priv->init_frin_desc = 1;
1217}
1218
1219/*******************************************************
1220 * Initialization gmac
1221 *******************************************************/
1222static int
1223rmi_xlr_gmac_reset(struct driver_data *priv)
1224{
1225 volatile uint32_t val;
1226 xlr_reg_t *mmio = priv->mmio;
1227 int i, maxloops = 100;
1228
1229 /* Disable MAC RX */
1230 val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1231 val &= ~0x4;
1232 xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1233
1234 /* Disable Core RX */
1235 val = xlr_read_reg(mmio, R_RX_CONTROL);
1236 val &= ~0x1;
1237 xlr_write_reg(mmio, R_RX_CONTROL, val);
1238
1239 /* wait for rx to halt */
1240 for (i = 0; i < maxloops; i++) {
1241 val = xlr_read_reg(mmio, R_RX_CONTROL);
1242 if (val & 0x2)
1243 break;
1244 DELAY(1000);
1245 }
1246 if (i == maxloops)
1247 return -1;
1248
1249 /* Issue a soft reset */
1250 val = xlr_read_reg(mmio, R_RX_CONTROL);
1251 val |= 0x4;
1252 xlr_write_reg(mmio, R_RX_CONTROL, val);
1253
1254 /* wait for reset to complete */
1255 for (i = 0; i < maxloops; i++) {
1256 val = xlr_read_reg(mmio, R_RX_CONTROL);
1257 if (val & 0x8)
1258 break;
1259 DELAY(1000);
1260 }
1261 if (i == maxloops)
1262 return -1;
1263
1264 /* Clear the soft reset bit */
1265 val = xlr_read_reg(mmio, R_RX_CONTROL);
1266 val &= ~0x4;
1267 xlr_write_reg(mmio, R_RX_CONTROL, val);
1268 return 0;
1269}
1270
1271static void
1272rmi_xlr_gmac_init(struct driver_data *priv)
1273{
1274 int i = 0;
1275 xlr_reg_t *mmio = priv->mmio;
1276 int id = priv->instance;
1277 struct stn_cc *gmac_cc_config;
1278 uint32_t value = 0;
1279 int blk = id / 4, port = id % 4;
1280
1281 rmi_xlr_mac_set_enable(priv, 0);
1282
1283 rmi_xlr_config_spill_area(priv);
1284
1285 xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1286 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1287 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1288 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1289
1290 rmi_xlr_config_pde(priv);
1291 rmi_xlr_config_parser(priv);
1292 rmi_xlr_config_classifier(priv);
1293
1294 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1295 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1296 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1297
1298 if (priv->mode == XLR_PORT0_RGMII) {
1299 printf("Port 0 set in RGMII mode\n");
1300 value = xlr_read_reg(mmio, R_RX_CONTROL);
1301 value |= 1 << O_RX_CONTROL__RGMII;
1302 xlr_write_reg(mmio, R_RX_CONTROL, value);
1303 }
1304 rmi_xlr_mac_mii_init(priv);
1305
1306
1307#if 0
1308 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1309 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1310 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1311 ADVERTISED_MII;
1312#endif
1313
1314 /*
1315 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1316 * set about every 1 sec in GigE mode, ignore it for now...
1317 */
1318 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1319
1320 if (priv->mode != XLR_RGMII) {
1321 serdes_regs_init(priv);
1322 serdes_autoconfig(priv);
1323 }
1324 rmi_xlr_gmac_config_speed(priv);
1325
1326 value = xlr_read_reg(mmio, R_IPG_IFG);
1327 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1328 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1329 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1330 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1331 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1332 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1333 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1334 xlr_write_reg(mmio, R_INTMASK, 0);
1335 xlr_write_reg(mmio, R_FREEQCARVE, 0);
1336
1337 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1338 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1339 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1340 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1341 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1342 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1343 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1344 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1345 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1346 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1347
1348 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1349 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1350
1351 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1352 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1353 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1354 gmac_cc_config->counters[i >> 3][i & 0x07]);
1355 dbg_msg("%d: %d -> %d\n", priv->instance,
1356 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1357 }
1358 priv->init_frin_desc = 1;
1359}
1360
1361/**********************************************************************
1362 * Set promiscuous mode
1363 **********************************************************************/
1364static void
1365xlr_mac_set_rx_mode(struct rge_softc *sc)
1366{
1367 struct driver_data *priv = &(sc->priv);
1368 uint32_t regval;
1369
1370 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1371
1372 if (sc->flags & IFF_PROMISC) {
1373 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1374 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1375 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1376 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1377 } else {
1378 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1379 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1380 }
1381
1382 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1383}
1384
1385/**********************************************************************
1386 * Configure LAN speed for the specified MAC.
1387 ********************************************************************* */
1388static int
1389rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1390{
1391 return 0;
1392}
1393
1394/**********************************************************************
1395 * Set Ethernet duplex and flow control options for this MAC
1396 ********************************************************************* */
1397static int
1398rmi_xlr_mac_set_duplex(struct driver_data *s,
1399 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1400{
1401 return 0;
1402}
1403
1404/*****************************************************************
1405 * Kernel Net Stack <-> MAC Driver Interface
1406 *****************************************************************/
1407/**********************************************************************
1408 **********************************************************************/
1409#define MAC_TX_FAIL 2
1410#define MAC_TX_PASS 0
1411#define MAC_TX_RETRY 1
1412
1413int xlr_dev_queue_xmit_hack = 0;
1414
1415static int
1416mac_xmit(struct mbuf *m, struct rge_softc *sc,
1417 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1418{
1419 struct msgrng_msg msg = {0,0,0,0};
1420 int stid = priv->txbucket;
1421 uint32_t tx_cycles = 0;
1422 uint32_t mflags;
1423 int vcpu = xlr_cpu_id();
1424 int rv;
1425
1426 tx_cycles = mips_rd_count();
1427
1428 if (build_frag_list(m, &msg, tx_desc) != 0)
1429 return MAC_TX_FAIL;
1430
1431 else {
1432 mflags = msgrng_access_enable();
1433 if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1434 msg_snd_failed++;
1435 msgrng_restore(mflags);
1436 release_tx_desc(&msg, 0);
1437 xlr_rge_msg_snd_failed[vcpu]++;
1438 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1439 vcpu, rv, stid, (uintmax_t)msg.msg0);
1440 return MAC_TX_FAIL;
1441 }
1442 msgrng_restore(mflags);
1443 port_inc_counter(priv->instance, PORT_TX);
1444 }
1445
1446 /* Send the packet to MAC */
1447 dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1448 (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1449#ifdef DUMP_PACKETS
1450 {
1451 int i = 0;
1452 unsigned char *buf = (char *)m->m_data;
1453
1454 printf("Tx Packet: length=%d\n", len);
1455 for (i = 0; i < 64; i++) {
1456 if (i && (i % 16) == 0)
1457 printf("\n");
1458 printf("%02x ", buf[i]);
1459 }
1460 printf("\n");
1461 }
1462#endif
1463 xlr_inc_counter(NETIF_TX);
1464 return MAC_TX_PASS;
1465}
1466
1467static int
1468rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1469{
1470 struct driver_data *priv = &(sc->priv);
1471 int ret = -ENOSPC;
1472
1473 dbg_msg("IN\n");
1474
1475 xlr_inc_counter(NETIF_STACK_TX);
1476
1477retry:
1478 ret = mac_xmit(m, sc, priv, len, tx_desc);
1479
1480 if (ret == MAC_TX_RETRY)
1481 goto retry;
1482
1483 dbg_msg("OUT, ret = %d\n", ret);
1484 if (ret == MAC_TX_FAIL) {
1485 /* FULL */
1486 dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1487 port_inc_counter(priv->instance, PORT_STOPQ);
1488 }
1489 return ret;
1490}
1491
1492static void
1493mac_frin_replenish(void *args /* ignored */ )
1494{
1495 int cpu = xlr_core_id();
1496 int done = 0;
1497 int i = 0;
1498
1499 xlr_inc_counter(REPLENISH_ENTER);
1500 /*
1501 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1502 * atomic_read(frin_to_be_sent));
1503 */
1504 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1505
1506 for (;;) {
1507
1508 done = 0;
1509
1510 for (i = 0; i < XLR_MAX_MACS; i++) {
1511 /* int offset = 0; */
1512 void *m;
1513 uint32_t cycles;
1514 struct rge_softc *sc;
1515 struct driver_data *priv;
1516 int frin_to_be_sent;
1517
1518 sc = dev_mac[i];
1519 if (!sc)
1520 goto skip;
1521
1522 priv = &(sc->priv);
1523 frin_to_be_sent = priv->frin_to_be_sent[cpu];
1524
1525 /* if (atomic_read(frin_to_be_sent) < 0) */
1526 if (frin_to_be_sent < 0) {
1527 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1528 __FUNCTION__, i,
1529 frin_to_be_sent);
1530 }
1531 /* if (!atomic_read(frin_to_be_sent)) */
1532 if (!frin_to_be_sent)
1533 goto skip;
1534
1535 cycles = mips_rd_count();
1536 {
1537 m = get_buf();
1538 if (!m) {
1539 device_printf(sc->rge_dev, "No buffer\n");
1540 goto skip;
1541 }
1542 }
1543 xlr_inc_counter(REPLENISH_FRIN);
1544 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1545 free_buf(vtophys(m));
1546 printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1547 break;
1548 }
1549 xlr_set_counter(REPLENISH_CYCLES,
1550 (read_c0_count() - cycles));
1551 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1552
1553 continue;
1554 skip:
1555 done++;
1556 }
1557 if (done == XLR_MAX_MACS)
1558 break;
1559 }
1560}
1561
1562static volatile uint32_t g_tx_frm_tx_ok=0;
1563
1564static void
1565rge_tx_bkp_func(void *arg, int npending)
1566{
1567 int i = 0;
1568
1569 for (i = 0; i < xlr_board_info.gmacports; i++) {
1570 if (!dev_mac[i] || !dev_mac[i]->active)
1571 continue;
1572 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1573 }
1574 atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1575}
1576
1577/* This function is called from an interrupt handler */
1578void
1579rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1580 int stid, struct msgrng_msg *msg,
1581 void *data /* ignored */ )
1582{
1583 uint64_t phys_addr = 0;
1584 unsigned long addr = 0;
1585 uint32_t length = 0;
1586 int ctrl = 0, port = 0;
1587 struct rge_softc *sc = NULL;
1588 struct driver_data *priv = 0;
1589 struct ifnet *ifp;
1590 int vcpu = xlr_cpu_id();
1591 int cpu = xlr_core_id();
1592
1593 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1594 bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1595
1596 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1597 length = (msg->msg0 >> 40) & 0x3fff;
1598 if (length == 0) {
1599 ctrl = CTRL_REG_FREE;
1600 port = (msg->msg0 >> 54) & 0x0f;
1601 addr = 0;
1602 } else {
1603 ctrl = CTRL_SNGL;
1604 length = length - BYTE_OFFSET - MAC_CRC_LEN;
1605 port = msg->msg0 & 0x0f;
1606 addr = 0;
1607 }
1608
1609 if (xlr_board_info.is_xls) {
1610 if (stid == MSGRNG_STNID_GMAC1)
1611 port += 4;
1612 sc = dev_mac[dev_mac_gmac0 + port];
1613 } else {
1614 if (stid == MSGRNG_STNID_XGS0FR)
1615 sc = dev_mac[dev_mac_xgs0];
1616 else if (stid == MSGRNG_STNID_XGS1FR)
1617 sc = dev_mac[dev_mac_xgs0 + 1];
1618 else
1619 sc = dev_mac[dev_mac_gmac0 + port];
1620 }
1621 if (sc == NULL)
1622 return;
1623 priv = &(sc->priv);
1624
1625 dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1626 (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1627
1628 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1629 xlr_rge_tx_ok_done[vcpu]++;
1630 release_tx_desc(msg, 1);
1631 ifp = sc->rge_ifp;
1632 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1633 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1634 }
1635 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1636 rge_tx_bkp_func(NULL, 0);
1637 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1638 (read_c0_count() - msgrng_msg_cycles));
1639 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1640 /* Rx Packet */
1641 /* struct mbuf *m = 0; */
1642 /* int logical_cpu = 0; */
1643
1644 dbg_msg("Received packet, port = %d\n", port);
1645 /*
1646 * if num frins to be sent exceeds threshold, wake up the
1647 * helper thread
1648 */
1649 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1650 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1651 mac_frin_replenish(NULL);
1652 }
1653 dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1654 priv->instance, (uintmax_t)phys_addr, length);
1655 mac_stats_add(priv->stats.rx_packets, 1);
1656 mac_stats_add(priv->stats.rx_bytes, length);
1657 xlr_inc_counter(NETIF_RX);
1658 xlr_set_counter(NETIF_RX_CYCLES,
1659 (read_c0_count() - msgrng_msg_cycles));
1660 rge_rx(sc, phys_addr, length);
1661 xlr_rge_rx_done[vcpu]++;
1662 } else {
1663 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1664 }
1665
1666}
1667
1668/**********************************************************************
1669 **********************************************************************/
1670static int
1671rge_probe(dev)
1672 device_t dev;
1673{
1674 device_set_desc(dev, "RMI Gigabit Ethernet");
1675
1676 /* Always return 0 */
1677 return 0;
1678}
1679
1680volatile unsigned long xlr_debug_enabled;
1681struct callout rge_dbg_count;
1682static void
1683xlr_debug_count(void *addr)
1684{
1685 struct driver_data *priv = &dev_mac[0]->priv;
1686
1687 /* uint32_t crdt; */
1688 if (xlr_debug_enabled) {
1689 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1690 }
1691 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1692}
1693
1694
1695static void
1696xlr_tx_q_wakeup(void *addr)
1697{
1698 int i = 0;
1699 int j = 0;
1700
1701 for (i = 0; i < xlr_board_info.gmacports; i++) {
1702 if (!dev_mac[i] || !dev_mac[i]->active)
1703 continue;
1704 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1705 for (j = 0; j < XLR_MAX_CORE; j++) {
1706 if (xlr_tot_avail_p2d[j]) {
1707 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1708 break;
1709 }
1710 }
1711 }
1712 }
1713 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1714 rge_tx_bkp_func(NULL, 0);
1715 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1716}
1717
1718static int
1719rge_attach(device_t dev)
1720{
1721 struct ifnet *ifp;
1722 struct rge_softc *sc;
1723 struct driver_data *priv = 0;
1724 int ret = 0;
1725 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1726
1727 sc = device_get_softc(dev);
1728 sc->rge_dev = dev;
1729
1730 /* Initialize mac's */
1731 sc->unit = device_get_unit(dev);
1732
1733 if (sc->unit > XLR_MAX_MACS) {
1734 ret = ENXIO;
1735 goto out;
1736 }
1737 RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1738
1739 priv = &(sc->priv);
1740 priv->sc = sc;
1741
1742 sc->flags = 0; /* TODO : fix me up later */
1743
1744 priv->id = sc->unit;
1745 if (gmac_conf->type == XLR_GMAC) {
1746 priv->instance = priv->id;
1747 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1748 0x1000 * (sc->unit % 4));
1749 if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1750 goto out;
1751 } else if (gmac_conf->type == XLR_XGMAC) {
1752 priv->instance = priv->id - xlr_board_info.gmacports;
1753 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1754 }
1755 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1756 (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1757 priv->instance >=4)) {
1758 dbg_msg("Arizona board - offset 4 \n");
1759 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1760 } else
1761 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1762
1763 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1764 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1765
1766 sc->base_addr = (unsigned long)priv->mmio;
1767 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1768
1769 sc->xmit = rge_start;
1770 sc->stop = rge_stop;
1771 sc->get_stats = rmi_xlr_mac_get_stats;
1772 sc->ioctl = rge_ioctl;
1773
1774 /* Initialize the device specific driver data */
1775 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1776
1777 priv->type = gmac_conf->type;
1778
1779 priv->mode = gmac_conf->mode;
1780 if (xlr_board_info.is_xls == 0) {
1781 /* TODO - check II and IIB boards */
1782 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1783 xlr_boot1_info.board_minor_version != 1)
1784 priv->phy_addr = priv->instance - 2;
1785 else
1786 priv->phy_addr = priv->instance;
1787 priv->mode = XLR_RGMII;
1788 } else {
1789 if (gmac_conf->mode == XLR_PORT0_RGMII &&
1790 priv->instance == 0) {
1791 priv->mode = XLR_PORT0_RGMII;
1792 priv->phy_addr = 0;
1793 } else {
1794 priv->mode = XLR_SGMII;
1795 /* Board 11 has SGMII daughter cards with the XLS chips, in this case
1796 the phy number is 0-3 for both GMAC blocks */
1797 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1798 priv->phy_addr = priv->instance % 4 + 16;
1799 else
1800 priv->phy_addr = priv->instance + 16;
1801 }
1802 }
1803
1804 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1805 priv->rfrbucket = gmac_conf->station_rfr;
1806 priv->spill_configured = 0;
1807
1808 dbg_msg("priv->mmio=%p\n", priv->mmio);
1809
1810 /* Set up ifnet structure */
1811 ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1812 if (ifp == NULL) {
1813 device_printf(sc->rge_dev, "failed to if_alloc()\n");
1814 rge_release_resources(sc);
1815 ret = ENXIO;
1816 RGE_LOCK_DESTROY(sc);
1817 goto out;
1818 }
1819 ifp->if_softc = sc;
1820 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1821 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1822 ifp->if_ioctl = rge_ioctl;
1823 ifp->if_start = rge_start;
1824 ifp->if_init = rge_init;
1825 ifp->if_mtu = ETHERMTU;
1826 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1827 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1828 IFQ_SET_READY(&ifp->if_snd);
1829 sc->active = 1;
1830 ifp->if_hwassist = 0;
1831 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1832 ifp->if_capenable = ifp->if_capabilities;
1833
1834 /* Initialize the rge_softc */
1835 sc->irq = gmac_conf->baseirq + priv->instance % 4;
1836
1837 /* Set the IRQ into the rid field */
1838 /*
1839 * note this is a hack to pass the irq to the iodi interrupt setup
1840 * routines
1841 */
1842 sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1843
1844 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1844 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_TYPE_NET | INTR_MPSAFE,
1845 NULL, rge_intr, sc, &sc->rge_intrhand);
1846
1847 if (ret) {
1848 rge_detach(dev);
1849 device_printf(sc->rge_dev, "couldn't set up irq\n");
1850 RGE_LOCK_DESTROY(sc);
1851 goto out;
1852 }
1853 xlr_mac_get_hwaddr(sc);
1854 xlr_mac_setup_hwaddr(priv);
1855
1856 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1857 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1858 (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1859 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1860 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1861
1862 /*
1863 * Set up ifmedia support.
1864 */
1865 /*
1866 * Initialize MII/media info.
1867 */
1868 sc->rge_mii.mii_ifp = ifp;
1869 sc->rge_mii.mii_readreg = rge_mii_read;
1870 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1871 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1872 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1873 rmi_xlr_mac_mediastatus);
1874 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1875 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1876 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1877
1878 /*
1879 * Call MI attach routine.
1880 */
1881 ether_ifattach(ifp, sc->dev_addr);
1882
1883 if (priv->type == XLR_GMAC) {
1884 rmi_xlr_gmac_init(priv);
1885 } else if (priv->type == XLR_XGMAC) {
1886 rmi_xlr_xgmac_init(priv);
1887 }
1888 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1889 sc->unit, priv->mmio, sc->mtu);
1890 dev_mac[sc->unit] = sc;
1891 if (priv->type == XLR_XGMAC && priv->instance == 0)
1892 dev_mac_xgs0 = sc->unit;
1893 if (priv->type == XLR_GMAC && priv->instance == 0)
1894 dev_mac_gmac0 = sc->unit;
1895
1896 if (!gmac_common_init_done) {
1897 mac_common_init();
1898 gmac_common_init_done = 1;
1899 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1900 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1901 callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1902 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1903 }
1904 if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1905 RGE_LOCK_DESTROY(sc);
1906 goto out;
1907 }
1908out:
1909 if (ret < 0) {
1910 device_printf(dev, "error - skipping\n");
1911 }
1912 return ret;
1913}
1914
1915static void
1916rge_reset(struct rge_softc *sc)
1917{
1918}
1919
1920static int
1921rge_detach(dev)
1922 device_t dev;
1923{
1924#ifdef FREEBSD_MAC_NOT_YET
1925 struct rge_softc *sc;
1926 struct ifnet *ifp;
1927
1928 sc = device_get_softc(dev);
1929 ifp = sc->rge_ifp;
1930
1931 RGE_LOCK(sc);
1932 rge_stop(sc);
1933 rge_reset(sc);
1934 RGE_UNLOCK(sc);
1935
1936 ether_ifdetach(ifp);
1937
1938 if (sc->rge_tbi) {
1939 ifmedia_removeall(&sc->rge_ifmedia);
1940 } else {
1941 bus_generic_detach(dev);
1942 device_delete_child(dev, sc->rge_miibus);
1943 }
1944
1945 rge_release_resources(sc);
1946
1947#endif /* FREEBSD_MAC_NOT_YET */
1948 return (0);
1949}
1950static int
1951rge_suspend(device_t dev)
1952{
1953 struct rge_softc *sc;
1954
1955 sc = device_get_softc(dev);
1956 RGE_LOCK(sc);
1957 rge_stop(sc);
1958 RGE_UNLOCK(sc);
1959
1960 return 0;
1961}
1962
1963static int
1964rge_resume(device_t dev)
1965{
1966 panic("rge_resume(): unimplemented\n");
1967 return 0;
1968}
1969
1970static void
1971rge_release_resources(struct rge_softc *sc)
1972{
1973
1974 if (sc->rge_ifp != NULL)
1975 if_free(sc->rge_ifp);
1976
1977 if (mtx_initialized(&sc->rge_mtx)) /* XXX */
1978 RGE_LOCK_DESTROY(sc);
1979}
1980uint32_t gmac_rx_fail[32];
1981uint32_t gmac_rx_pass[32];
1982
1983static void
1984rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1985{
1986 struct mbuf *m;
1987 struct ifnet *ifp = sc->rge_ifp;
1988 uint64_t mag;
1989 uint32_t sr;
1990 /*
1991 * On 32 bit machines we use XKPHYS to get the values stores with
1992 * the mbuf, need to explicitly enable KX. Disable interrupts while
1993 * KX is enabled to prevent this setting leaking to other code.
1994 */
1995 sr = xlr_enable_kx();
1996 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1997 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
1998 xlr_restore_kx(sr);
1999 if (mag != 0xf00bad) {
2000 /* somebody else packet Error - FIXME in intialization */
2001 printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2002 xlr_cpu_id(), (void *)paddr);
2003 return;
2004 }
2005 /* align the data */
2006 m->m_data += BYTE_OFFSET;
2007 m->m_pkthdr.len = m->m_len = len;
2008 m->m_pkthdr.rcvif = ifp;
2009
2010#ifdef DUMP_PACKETS
2011 {
2012 int i = 0;
2013 unsigned char *buf = (char *)m->m_data;
2014
2015 printf("Rx Packet: length=%d\n", len);
2016 for (i = 0; i < 64; i++) {
2017 if (i && (i % 16) == 0)
2018 printf("\n");
2019 printf("%02x ", buf[i]);
2020 }
2021 printf("\n");
2022 }
2023#endif
2024 ifp->if_ipackets++;
2025 (*ifp->if_input) (ifp, m);
2026}
2027
2028static void
2029rge_intr(void *arg)
2030{
2031 struct rge_softc *sc = (struct rge_softc *)arg;
2032 struct driver_data *priv = &(sc->priv);
2033 xlr_reg_t *mmio = priv->mmio;
2034 uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2035
2036 if (intreg & (1 << O_INTREG__MDInt)) {
2037 uint32_t phy_int_status = 0;
2038 int i = 0;
2039
2040 for (i = 0; i < XLR_MAX_MACS; i++) {
2041 struct rge_softc *phy_dev = 0;
2042 struct driver_data *phy_priv = 0;
2043
2044 phy_dev = dev_mac[i];
2045 if (phy_dev == NULL)
2046 continue;
2047
2048 phy_priv = &phy_dev->priv;
2049
2050 if (phy_priv->type == XLR_XGMAC)
2051 continue;
2052
2053 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2054 phy_priv->phy_addr, 26);
2055 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2056 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2057 rmi_xlr_gmac_config_speed(phy_priv);
2058 }
2059 } else {
2060 printf("[%s]: mac type = %d, instance %d error "
2061 "interrupt: INTREG = 0x%08x\n",
2062 __FUNCTION__, priv->type, priv->instance, intreg);
2063 }
2064
2065 /* clear all interrupts and hope to make progress */
2066 xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2067
2068 /* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2069 if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2070 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2071 struct driver_data *xgs0_priv = &xgs0_dev->priv;
2072 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2073 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2074
2075 if (xgs0_intreg) {
2076 printf("[%s]: mac type = %d, instance %d error "
2077 "interrupt: INTREG = 0x%08x\n",
2078 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2079
2080 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2081 }
2082 }
2083}
2084
2085static void
2086rge_start_locked(struct ifnet *ifp, int threshold)
2087{
2088 struct rge_softc *sc = ifp->if_softc;
2089 struct mbuf *m = NULL;
2090 int prepend_pkt = 0;
2091 int i = 0;
2092 struct p2d_tx_desc *tx_desc = NULL;
2093 int cpu = xlr_core_id();
2094 uint32_t vcpu = xlr_cpu_id();
2095
2096 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2097 return;
2098
2099 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2100 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2101 return;
2102 tx_desc = get_p2d_desc();
2103 if (!tx_desc) {
2104 xlr_rge_get_p2d_failed[vcpu]++;
2105 return;
2106 }
2107 /* Grab a packet off the queue. */
2108 IFQ_DEQUEUE(&ifp->if_snd, m);
2109 if (m == NULL) {
2110 free_p2d_desc(tx_desc);
2111 return;
2112 }
2113 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2114
2115 if (prepend_pkt) {
2116 xlr_rge_tx_prepend[vcpu]++;
2117 IF_PREPEND(&ifp->if_snd, m);
2118 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2119 return;
2120 } else {
2121 ifp->if_opackets++;
2122 xlr_rge_tx_done[vcpu]++;
2123 }
2124 }
2125}
2126
2127static void
2128rge_start(struct ifnet *ifp)
2129{
2130 rge_start_locked(ifp, RGE_TX_Q_SIZE);
2131}
2132
2133static int
2134rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2135{
2136 struct rge_softc *sc = ifp->if_softc;
2137 struct ifreq *ifr = (struct ifreq *)data;
2138 int mask, error = 0;
2139
2140 /* struct mii_data *mii; */
2141 switch (command) {
2142 case SIOCSIFMTU:
2143 ifp->if_mtu = ifr->ifr_mtu;
2144 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2145 break;
2146 case SIOCSIFFLAGS:
2147
2148 RGE_LOCK(sc);
2149 if (ifp->if_flags & IFF_UP) {
2150 /*
2151 * If only the state of the PROMISC flag changed,
2152 * then just use the 'set promisc mode' command
2153 * instead of reinitializing the entire NIC. Doing a
2154 * full re-init means reloading the firmware and
2155 * waiting for it to start up, which may take a
2156 * second or two. Similarly for ALLMULTI.
2157 */
2158 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2159 ifp->if_flags & IFF_PROMISC &&
2160 !(sc->flags & IFF_PROMISC)) {
2161 sc->flags |= IFF_PROMISC;
2162 xlr_mac_set_rx_mode(sc);
2163 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2164 !(ifp->if_flags & IFF_PROMISC) &&
2165 sc->flags & IFF_PROMISC) {
2166 sc->flags &= IFF_PROMISC;
2167 xlr_mac_set_rx_mode(sc);
2168 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2169 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2170 rmi_xlr_mac_set_multicast_list(sc);
2171 } else
2172 xlr_mac_set_rx_mode(sc);
2173 } else {
2174 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2175 xlr_mac_set_rx_mode(sc);
2176 }
2177 }
2178 sc->flags = ifp->if_flags;
2179 RGE_UNLOCK(sc);
2180 error = 0;
2181 break;
2182 case SIOCADDMULTI:
2183 case SIOCDELMULTI:
2184 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2185 RGE_LOCK(sc);
2186 rmi_xlr_mac_set_multicast_list(sc);
2187 RGE_UNLOCK(sc);
2188 error = 0;
2189 }
2190 break;
2191 case SIOCSIFMEDIA:
2192 case SIOCGIFMEDIA:
2193 error = ifmedia_ioctl(ifp, ifr,
2194 &sc->rge_mii.mii_media, command);
2195 break;
2196 case SIOCSIFCAP:
2197 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2198 ifp->if_hwassist = 0;
2199 break;
2200 default:
2201 error = ether_ioctl(ifp, command, data);
2202 break;
2203 }
2204
2205 return (error);
2206}
2207
2208static void
2209rge_init(void *addr)
2210{
2211 struct rge_softc *sc = (struct rge_softc *)addr;
2212 struct ifnet *ifp;
2213 struct driver_data *priv = &(sc->priv);
2214
2215 ifp = sc->rge_ifp;
2216
2217 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2218 return;
2219 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2220 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2221
2222 rmi_xlr_mac_set_enable(priv, 1);
2223}
2224
2225static void
2226rge_stop(struct rge_softc *sc)
2227{
2228 rmi_xlr_mac_close(sc);
2229}
2230
2231static int
2232rge_shutdown(device_t dev)
2233{
2234 struct rge_softc *sc;
2235
2236 sc = device_get_softc(dev);
2237
2238 RGE_LOCK(sc);
2239 rge_stop(sc);
2240 rge_reset(sc);
2241 RGE_UNLOCK(sc);
2242
2243 return (0);
2244}
2245
2246static int
2247rmi_xlr_mac_open(struct rge_softc *sc)
2248{
2249 struct driver_data *priv = &(sc->priv);
2250 int i;
2251
2252 dbg_msg("IN\n");
2253
2254 if (rmi_xlr_mac_fill_rxfr(sc)) {
2255 return -1;
2256 }
2257 mtx_lock_spin(&priv->lock);
2258
2259 xlr_mac_set_rx_mode(sc);
2260
2261 if (sc->unit == xlr_board_info.gmacports - 1) {
2262 printf("Enabling MDIO interrupts\n");
2263 struct rge_softc *tmp = NULL;
2264
2265 for (i = 0; i < xlr_board_info.gmacports; i++) {
2266 tmp = dev_mac[i];
2267 if (tmp)
2268 xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2269 ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2270 }
2271 }
2272 /*
2273 * Configure the speed, duplex, and flow control
2274 */
2275 rmi_xlr_mac_set_speed(priv, priv->speed);
2276 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2277 rmi_xlr_mac_set_enable(priv, 0);
2278
2279 mtx_unlock_spin(&priv->lock);
2280
2281 for (i = 0; i < 8; i++) {
2282 priv->frin_to_be_sent[i] = 0;
2283 }
2284
2285 return 0;
2286}
2287
2288/**********************************************************************
2289 **********************************************************************/
2290static int
2291rmi_xlr_mac_close(struct rge_softc *sc)
2292{
2293 struct driver_data *priv = &(sc->priv);
2294
2295 mtx_lock_spin(&priv->lock);
2296
2297 /*
2298 * There may have left over mbufs in the ring as well as in free in
2299 * they will be reused next time open is called
2300 */
2301
2302 rmi_xlr_mac_set_enable(priv, 0);
2303
2304 xlr_inc_counter(NETIF_STOP_Q);
2305 port_inc_counter(priv->instance, PORT_STOPQ);
2306
2307 mtx_unlock_spin(&priv->lock);
2308
2309 return 0;
2310}
2311
2312/**********************************************************************
2313 **********************************************************************/
2314static struct rge_softc_stats *
2315rmi_xlr_mac_get_stats(struct rge_softc *sc)
2316{
2317 struct driver_data *priv = &(sc->priv);
2318
2319 /* unsigned long flags; */
2320
2321 mtx_lock_spin(&priv->lock);
2322
2323 /* XXX update other stats here */
2324
2325 mtx_unlock_spin(&priv->lock);
2326
2327 return &priv->stats;
2328}
2329
2330/**********************************************************************
2331 **********************************************************************/
2332static void
2333rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2334{
2335}
2336
2337/**********************************************************************
2338 **********************************************************************/
2339static int
2340rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2341{
2342 struct driver_data *priv = &(sc->priv);
2343
2344 if ((new_mtu > 9500) || (new_mtu < 64)) {
2345 return -EINVAL;
2346 }
2347 mtx_lock_spin(&priv->lock);
2348
2349 sc->mtu = new_mtu;
2350
2351 /* Disable MAC TX/RX */
2352 rmi_xlr_mac_set_enable(priv, 0);
2353
2354 /* Flush RX FR IN */
2355 /* Flush TX IN */
2356 rmi_xlr_mac_set_enable(priv, 1);
2357
2358 mtx_unlock_spin(&priv->lock);
2359 return 0;
2360}
2361
2362/**********************************************************************
2363 **********************************************************************/
2364static int
2365rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2366{
2367 struct driver_data *priv = &(sc->priv);
2368 int i;
2369 int ret = 0;
2370 void *ptr;
2371
2372 dbg_msg("\n");
2373 if (!priv->init_frin_desc)
2374 return ret;
2375 priv->init_frin_desc = 0;
2376
2377 dbg_msg("\n");
2378 for (i = 0; i < MAX_NUM_DESC; i++) {
2379 ptr = get_buf();
2380 if (!ptr) {
2381 ret = -ENOMEM;
2382 break;
2383 }
2384 /* Send the free Rx desc to the MAC */
2385 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2386 }
2387
2388 return ret;
2389}
2390
2391/**********************************************************************
2392 **********************************************************************/
2393static __inline__ void *
2394rmi_xlr_config_spill(xlr_reg_t * mmio,
2395 int reg_start_0, int reg_start_1,
2396 int reg_size, int size)
2397{
2398 uint32_t spill_size = size;
2399 void *spill = NULL;
2400 uint64_t phys_addr = 0;
2401
2402
2403 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2404 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2405 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2406 panic("Unable to allocate memory for spill area!\n");
2407 }
2408 phys_addr = vtophys(spill);
2409 dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2410 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2411 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2412 xlr_write_reg(mmio, reg_size, spill_size);
2413
2414 return spill;
2415}
2416
2417static void
2418rmi_xlr_config_spill_area(struct driver_data *priv)
2419{
2420 /*
2421 * if driver initialization is done parallely on multiple cpus
2422 * spill_configured needs synchronization
2423 */
2424 if (priv->spill_configured)
2425 return;
2426
2427 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2428 priv->spill_configured = 1;
2429 return;
2430 }
2431 priv->spill_configured = 1;
2432
2433 priv->frin_spill =
2434 rmi_xlr_config_spill(priv->mmio,
2435 R_REG_FRIN_SPILL_MEM_START_0,
2436 R_REG_FRIN_SPILL_MEM_START_1,
2437 R_REG_FRIN_SPILL_MEM_SIZE,
2438 MAX_FRIN_SPILL *
2439 sizeof(struct fr_desc));
2440
2441 priv->class_0_spill =
2442 rmi_xlr_config_spill(priv->mmio,
2443 R_CLASS0_SPILL_MEM_START_0,
2444 R_CLASS0_SPILL_MEM_START_1,
2445 R_CLASS0_SPILL_MEM_SIZE,
2446 MAX_CLASS_0_SPILL *
2447 sizeof(union rx_tx_desc));
2448 priv->class_1_spill =
2449 rmi_xlr_config_spill(priv->mmio,
2450 R_CLASS1_SPILL_MEM_START_0,
2451 R_CLASS1_SPILL_MEM_START_1,
2452 R_CLASS1_SPILL_MEM_SIZE,
2453 MAX_CLASS_1_SPILL *
2454 sizeof(union rx_tx_desc));
2455
2456 priv->frout_spill =
2457 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2458 R_FROUT_SPILL_MEM_START_1,
2459 R_FROUT_SPILL_MEM_SIZE,
2460 MAX_FROUT_SPILL *
2461 sizeof(struct fr_desc));
2462
2463 priv->class_2_spill =
2464 rmi_xlr_config_spill(priv->mmio,
2465 R_CLASS2_SPILL_MEM_START_0,
2466 R_CLASS2_SPILL_MEM_START_1,
2467 R_CLASS2_SPILL_MEM_SIZE,
2468 MAX_CLASS_2_SPILL *
2469 sizeof(union rx_tx_desc));
2470 priv->class_3_spill =
2471 rmi_xlr_config_spill(priv->mmio,
2472 R_CLASS3_SPILL_MEM_START_0,
2473 R_CLASS3_SPILL_MEM_START_1,
2474 R_CLASS3_SPILL_MEM_SIZE,
2475 MAX_CLASS_3_SPILL *
2476 sizeof(union rx_tx_desc));
2477 priv->spill_configured = 1;
2478}
2479
2480/*****************************************************************
2481 * Write the MAC address to the XLR registers
2482 * All 4 addresses are the same for now
2483 *****************************************************************/
2484static void
2485xlr_mac_setup_hwaddr(struct driver_data *priv)
2486{
2487 struct rge_softc *sc = priv->sc;
2488
2489 xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2490 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2491 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2492 );
2493
2494 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2495 ((sc->dev_addr[1] << 24) | (sc->
2496 dev_addr[0] << 16)));
2497
2498 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2499
2500 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2501
2502 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2503
2504 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2505
2506 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2507 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2508 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2509 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2510 );
2511}
2512
2513/*****************************************************************
2514 * Read the MAC address from the XLR registers
2515 * All 4 addresses are the same for now
2516 *****************************************************************/
2517static void
2518xlr_mac_get_hwaddr(struct rge_softc *sc)
2519{
2520 struct driver_data *priv = &(sc->priv);
2521
2522 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2523 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2524 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2525 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2526 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2527 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2528}
2529
2530/*****************************************************************
2531 * Mac Module Initialization
2532 *****************************************************************/
2533static void
2534mac_common_init(void)
2535{
2536 init_p2d_allocation();
2537 init_tx_ring();
2538
2539 if (xlr_board_info.is_xls) {
2540 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2541 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2542 NULL)) {
2543 panic("Couldn't register msgring handler\n");
2544 }
2545 if (register_msgring_handler(MSGRNG_STNID_GMAC1,
2546 MSGRNG_STNID_GMAC1 + 1, rmi_xlr_mac_msgring_handler,
2547 NULL)) {
2548 panic("Couldn't register msgring handler\n");
2549 }
2550 } else {
2551 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2552 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2553 NULL)) {
2554 panic("Couldn't register msgring handler\n");
2555 }
2556 }
2557
2558 /*
2559 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2560 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2561 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2562 * if (register_msgring_handler (TX_STN_XGS_1,
2563 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2564 * msgring handler for TX_STN_XGS_1\n"); } }
2565 */
2566}
1845 NULL, rge_intr, sc, &sc->rge_intrhand);
1846
1847 if (ret) {
1848 rge_detach(dev);
1849 device_printf(sc->rge_dev, "couldn't set up irq\n");
1850 RGE_LOCK_DESTROY(sc);
1851 goto out;
1852 }
1853 xlr_mac_get_hwaddr(sc);
1854 xlr_mac_setup_hwaddr(priv);
1855
1856 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1857 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1858 (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1859 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1860 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1861
1862 /*
1863 * Set up ifmedia support.
1864 */
1865 /*
1866 * Initialize MII/media info.
1867 */
1868 sc->rge_mii.mii_ifp = ifp;
1869 sc->rge_mii.mii_readreg = rge_mii_read;
1870 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1871 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1872 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1873 rmi_xlr_mac_mediastatus);
1874 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1875 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1876 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1877
1878 /*
1879 * Call MI attach routine.
1880 */
1881 ether_ifattach(ifp, sc->dev_addr);
1882
1883 if (priv->type == XLR_GMAC) {
1884 rmi_xlr_gmac_init(priv);
1885 } else if (priv->type == XLR_XGMAC) {
1886 rmi_xlr_xgmac_init(priv);
1887 }
1888 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1889 sc->unit, priv->mmio, sc->mtu);
1890 dev_mac[sc->unit] = sc;
1891 if (priv->type == XLR_XGMAC && priv->instance == 0)
1892 dev_mac_xgs0 = sc->unit;
1893 if (priv->type == XLR_GMAC && priv->instance == 0)
1894 dev_mac_gmac0 = sc->unit;
1895
1896 if (!gmac_common_init_done) {
1897 mac_common_init();
1898 gmac_common_init_done = 1;
1899 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1900 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1901 callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1902 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1903 }
1904 if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1905 RGE_LOCK_DESTROY(sc);
1906 goto out;
1907 }
1908out:
1909 if (ret < 0) {
1910 device_printf(dev, "error - skipping\n");
1911 }
1912 return ret;
1913}
1914
1915static void
1916rge_reset(struct rge_softc *sc)
1917{
1918}
1919
1920static int
1921rge_detach(dev)
1922 device_t dev;
1923{
1924#ifdef FREEBSD_MAC_NOT_YET
1925 struct rge_softc *sc;
1926 struct ifnet *ifp;
1927
1928 sc = device_get_softc(dev);
1929 ifp = sc->rge_ifp;
1930
1931 RGE_LOCK(sc);
1932 rge_stop(sc);
1933 rge_reset(sc);
1934 RGE_UNLOCK(sc);
1935
1936 ether_ifdetach(ifp);
1937
1938 if (sc->rge_tbi) {
1939 ifmedia_removeall(&sc->rge_ifmedia);
1940 } else {
1941 bus_generic_detach(dev);
1942 device_delete_child(dev, sc->rge_miibus);
1943 }
1944
1945 rge_release_resources(sc);
1946
1947#endif /* FREEBSD_MAC_NOT_YET */
1948 return (0);
1949}
1950static int
1951rge_suspend(device_t dev)
1952{
1953 struct rge_softc *sc;
1954
1955 sc = device_get_softc(dev);
1956 RGE_LOCK(sc);
1957 rge_stop(sc);
1958 RGE_UNLOCK(sc);
1959
1960 return 0;
1961}
1962
1963static int
1964rge_resume(device_t dev)
1965{
1966 panic("rge_resume(): unimplemented\n");
1967 return 0;
1968}
1969
1970static void
1971rge_release_resources(struct rge_softc *sc)
1972{
1973
1974 if (sc->rge_ifp != NULL)
1975 if_free(sc->rge_ifp);
1976
1977 if (mtx_initialized(&sc->rge_mtx)) /* XXX */
1978 RGE_LOCK_DESTROY(sc);
1979}
1980uint32_t gmac_rx_fail[32];
1981uint32_t gmac_rx_pass[32];
1982
1983static void
1984rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1985{
1986 struct mbuf *m;
1987 struct ifnet *ifp = sc->rge_ifp;
1988 uint64_t mag;
1989 uint32_t sr;
1990 /*
1991 * On 32 bit machines we use XKPHYS to get the values stores with
1992 * the mbuf, need to explicitly enable KX. Disable interrupts while
1993 * KX is enabled to prevent this setting leaking to other code.
1994 */
1995 sr = xlr_enable_kx();
1996 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1997 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
1998 xlr_restore_kx(sr);
1999 if (mag != 0xf00bad) {
2000 /* somebody else packet Error - FIXME in intialization */
2001 printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2002 xlr_cpu_id(), (void *)paddr);
2003 return;
2004 }
2005 /* align the data */
2006 m->m_data += BYTE_OFFSET;
2007 m->m_pkthdr.len = m->m_len = len;
2008 m->m_pkthdr.rcvif = ifp;
2009
2010#ifdef DUMP_PACKETS
2011 {
2012 int i = 0;
2013 unsigned char *buf = (char *)m->m_data;
2014
2015 printf("Rx Packet: length=%d\n", len);
2016 for (i = 0; i < 64; i++) {
2017 if (i && (i % 16) == 0)
2018 printf("\n");
2019 printf("%02x ", buf[i]);
2020 }
2021 printf("\n");
2022 }
2023#endif
2024 ifp->if_ipackets++;
2025 (*ifp->if_input) (ifp, m);
2026}
2027
2028static void
2029rge_intr(void *arg)
2030{
2031 struct rge_softc *sc = (struct rge_softc *)arg;
2032 struct driver_data *priv = &(sc->priv);
2033 xlr_reg_t *mmio = priv->mmio;
2034 uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2035
2036 if (intreg & (1 << O_INTREG__MDInt)) {
2037 uint32_t phy_int_status = 0;
2038 int i = 0;
2039
2040 for (i = 0; i < XLR_MAX_MACS; i++) {
2041 struct rge_softc *phy_dev = 0;
2042 struct driver_data *phy_priv = 0;
2043
2044 phy_dev = dev_mac[i];
2045 if (phy_dev == NULL)
2046 continue;
2047
2048 phy_priv = &phy_dev->priv;
2049
2050 if (phy_priv->type == XLR_XGMAC)
2051 continue;
2052
2053 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2054 phy_priv->phy_addr, 26);
2055 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2056 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2057 rmi_xlr_gmac_config_speed(phy_priv);
2058 }
2059 } else {
2060 printf("[%s]: mac type = %d, instance %d error "
2061 "interrupt: INTREG = 0x%08x\n",
2062 __FUNCTION__, priv->type, priv->instance, intreg);
2063 }
2064
2065 /* clear all interrupts and hope to make progress */
2066 xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2067
2068 /* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2069 if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2070 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2071 struct driver_data *xgs0_priv = &xgs0_dev->priv;
2072 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2073 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2074
2075 if (xgs0_intreg) {
2076 printf("[%s]: mac type = %d, instance %d error "
2077 "interrupt: INTREG = 0x%08x\n",
2078 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2079
2080 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2081 }
2082 }
2083}
2084
2085static void
2086rge_start_locked(struct ifnet *ifp, int threshold)
2087{
2088 struct rge_softc *sc = ifp->if_softc;
2089 struct mbuf *m = NULL;
2090 int prepend_pkt = 0;
2091 int i = 0;
2092 struct p2d_tx_desc *tx_desc = NULL;
2093 int cpu = xlr_core_id();
2094 uint32_t vcpu = xlr_cpu_id();
2095
2096 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2097 return;
2098
2099 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2100 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2101 return;
2102 tx_desc = get_p2d_desc();
2103 if (!tx_desc) {
2104 xlr_rge_get_p2d_failed[vcpu]++;
2105 return;
2106 }
2107 /* Grab a packet off the queue. */
2108 IFQ_DEQUEUE(&ifp->if_snd, m);
2109 if (m == NULL) {
2110 free_p2d_desc(tx_desc);
2111 return;
2112 }
2113 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2114
2115 if (prepend_pkt) {
2116 xlr_rge_tx_prepend[vcpu]++;
2117 IF_PREPEND(&ifp->if_snd, m);
2118 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2119 return;
2120 } else {
2121 ifp->if_opackets++;
2122 xlr_rge_tx_done[vcpu]++;
2123 }
2124 }
2125}
2126
2127static void
2128rge_start(struct ifnet *ifp)
2129{
2130 rge_start_locked(ifp, RGE_TX_Q_SIZE);
2131}
2132
2133static int
2134rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2135{
2136 struct rge_softc *sc = ifp->if_softc;
2137 struct ifreq *ifr = (struct ifreq *)data;
2138 int mask, error = 0;
2139
2140 /* struct mii_data *mii; */
2141 switch (command) {
2142 case SIOCSIFMTU:
2143 ifp->if_mtu = ifr->ifr_mtu;
2144 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2145 break;
2146 case SIOCSIFFLAGS:
2147
2148 RGE_LOCK(sc);
2149 if (ifp->if_flags & IFF_UP) {
2150 /*
2151 * If only the state of the PROMISC flag changed,
2152 * then just use the 'set promisc mode' command
2153 * instead of reinitializing the entire NIC. Doing a
2154 * full re-init means reloading the firmware and
2155 * waiting for it to start up, which may take a
2156 * second or two. Similarly for ALLMULTI.
2157 */
2158 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2159 ifp->if_flags & IFF_PROMISC &&
2160 !(sc->flags & IFF_PROMISC)) {
2161 sc->flags |= IFF_PROMISC;
2162 xlr_mac_set_rx_mode(sc);
2163 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2164 !(ifp->if_flags & IFF_PROMISC) &&
2165 sc->flags & IFF_PROMISC) {
2166 sc->flags &= IFF_PROMISC;
2167 xlr_mac_set_rx_mode(sc);
2168 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2169 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2170 rmi_xlr_mac_set_multicast_list(sc);
2171 } else
2172 xlr_mac_set_rx_mode(sc);
2173 } else {
2174 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2175 xlr_mac_set_rx_mode(sc);
2176 }
2177 }
2178 sc->flags = ifp->if_flags;
2179 RGE_UNLOCK(sc);
2180 error = 0;
2181 break;
2182 case SIOCADDMULTI:
2183 case SIOCDELMULTI:
2184 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2185 RGE_LOCK(sc);
2186 rmi_xlr_mac_set_multicast_list(sc);
2187 RGE_UNLOCK(sc);
2188 error = 0;
2189 }
2190 break;
2191 case SIOCSIFMEDIA:
2192 case SIOCGIFMEDIA:
2193 error = ifmedia_ioctl(ifp, ifr,
2194 &sc->rge_mii.mii_media, command);
2195 break;
2196 case SIOCSIFCAP:
2197 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2198 ifp->if_hwassist = 0;
2199 break;
2200 default:
2201 error = ether_ioctl(ifp, command, data);
2202 break;
2203 }
2204
2205 return (error);
2206}
2207
2208static void
2209rge_init(void *addr)
2210{
2211 struct rge_softc *sc = (struct rge_softc *)addr;
2212 struct ifnet *ifp;
2213 struct driver_data *priv = &(sc->priv);
2214
2215 ifp = sc->rge_ifp;
2216
2217 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2218 return;
2219 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2220 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2221
2222 rmi_xlr_mac_set_enable(priv, 1);
2223}
2224
2225static void
2226rge_stop(struct rge_softc *sc)
2227{
2228 rmi_xlr_mac_close(sc);
2229}
2230
2231static int
2232rge_shutdown(device_t dev)
2233{
2234 struct rge_softc *sc;
2235
2236 sc = device_get_softc(dev);
2237
2238 RGE_LOCK(sc);
2239 rge_stop(sc);
2240 rge_reset(sc);
2241 RGE_UNLOCK(sc);
2242
2243 return (0);
2244}
2245
2246static int
2247rmi_xlr_mac_open(struct rge_softc *sc)
2248{
2249 struct driver_data *priv = &(sc->priv);
2250 int i;
2251
2252 dbg_msg("IN\n");
2253
2254 if (rmi_xlr_mac_fill_rxfr(sc)) {
2255 return -1;
2256 }
2257 mtx_lock_spin(&priv->lock);
2258
2259 xlr_mac_set_rx_mode(sc);
2260
2261 if (sc->unit == xlr_board_info.gmacports - 1) {
2262 printf("Enabling MDIO interrupts\n");
2263 struct rge_softc *tmp = NULL;
2264
2265 for (i = 0; i < xlr_board_info.gmacports; i++) {
2266 tmp = dev_mac[i];
2267 if (tmp)
2268 xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2269 ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2270 }
2271 }
2272 /*
2273 * Configure the speed, duplex, and flow control
2274 */
2275 rmi_xlr_mac_set_speed(priv, priv->speed);
2276 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2277 rmi_xlr_mac_set_enable(priv, 0);
2278
2279 mtx_unlock_spin(&priv->lock);
2280
2281 for (i = 0; i < 8; i++) {
2282 priv->frin_to_be_sent[i] = 0;
2283 }
2284
2285 return 0;
2286}
2287
2288/**********************************************************************
2289 **********************************************************************/
2290static int
2291rmi_xlr_mac_close(struct rge_softc *sc)
2292{
2293 struct driver_data *priv = &(sc->priv);
2294
2295 mtx_lock_spin(&priv->lock);
2296
2297 /*
2298 * There may have left over mbufs in the ring as well as in free in
2299 * they will be reused next time open is called
2300 */
2301
2302 rmi_xlr_mac_set_enable(priv, 0);
2303
2304 xlr_inc_counter(NETIF_STOP_Q);
2305 port_inc_counter(priv->instance, PORT_STOPQ);
2306
2307 mtx_unlock_spin(&priv->lock);
2308
2309 return 0;
2310}
2311
2312/**********************************************************************
2313 **********************************************************************/
2314static struct rge_softc_stats *
2315rmi_xlr_mac_get_stats(struct rge_softc *sc)
2316{
2317 struct driver_data *priv = &(sc->priv);
2318
2319 /* unsigned long flags; */
2320
2321 mtx_lock_spin(&priv->lock);
2322
2323 /* XXX update other stats here */
2324
2325 mtx_unlock_spin(&priv->lock);
2326
2327 return &priv->stats;
2328}
2329
2330/**********************************************************************
2331 **********************************************************************/
2332static void
2333rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2334{
2335}
2336
2337/**********************************************************************
2338 **********************************************************************/
2339static int
2340rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2341{
2342 struct driver_data *priv = &(sc->priv);
2343
2344 if ((new_mtu > 9500) || (new_mtu < 64)) {
2345 return -EINVAL;
2346 }
2347 mtx_lock_spin(&priv->lock);
2348
2349 sc->mtu = new_mtu;
2350
2351 /* Disable MAC TX/RX */
2352 rmi_xlr_mac_set_enable(priv, 0);
2353
2354 /* Flush RX FR IN */
2355 /* Flush TX IN */
2356 rmi_xlr_mac_set_enable(priv, 1);
2357
2358 mtx_unlock_spin(&priv->lock);
2359 return 0;
2360}
2361
2362/**********************************************************************
2363 **********************************************************************/
2364static int
2365rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2366{
2367 struct driver_data *priv = &(sc->priv);
2368 int i;
2369 int ret = 0;
2370 void *ptr;
2371
2372 dbg_msg("\n");
2373 if (!priv->init_frin_desc)
2374 return ret;
2375 priv->init_frin_desc = 0;
2376
2377 dbg_msg("\n");
2378 for (i = 0; i < MAX_NUM_DESC; i++) {
2379 ptr = get_buf();
2380 if (!ptr) {
2381 ret = -ENOMEM;
2382 break;
2383 }
2384 /* Send the free Rx desc to the MAC */
2385 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2386 }
2387
2388 return ret;
2389}
2390
2391/**********************************************************************
2392 **********************************************************************/
2393static __inline__ void *
2394rmi_xlr_config_spill(xlr_reg_t * mmio,
2395 int reg_start_0, int reg_start_1,
2396 int reg_size, int size)
2397{
2398 uint32_t spill_size = size;
2399 void *spill = NULL;
2400 uint64_t phys_addr = 0;
2401
2402
2403 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2404 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2405 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2406 panic("Unable to allocate memory for spill area!\n");
2407 }
2408 phys_addr = vtophys(spill);
2409 dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2410 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2411 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2412 xlr_write_reg(mmio, reg_size, spill_size);
2413
2414 return spill;
2415}
2416
2417static void
2418rmi_xlr_config_spill_area(struct driver_data *priv)
2419{
2420 /*
2421 * if driver initialization is done parallely on multiple cpus
2422 * spill_configured needs synchronization
2423 */
2424 if (priv->spill_configured)
2425 return;
2426
2427 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2428 priv->spill_configured = 1;
2429 return;
2430 }
2431 priv->spill_configured = 1;
2432
2433 priv->frin_spill =
2434 rmi_xlr_config_spill(priv->mmio,
2435 R_REG_FRIN_SPILL_MEM_START_0,
2436 R_REG_FRIN_SPILL_MEM_START_1,
2437 R_REG_FRIN_SPILL_MEM_SIZE,
2438 MAX_FRIN_SPILL *
2439 sizeof(struct fr_desc));
2440
2441 priv->class_0_spill =
2442 rmi_xlr_config_spill(priv->mmio,
2443 R_CLASS0_SPILL_MEM_START_0,
2444 R_CLASS0_SPILL_MEM_START_1,
2445 R_CLASS0_SPILL_MEM_SIZE,
2446 MAX_CLASS_0_SPILL *
2447 sizeof(union rx_tx_desc));
2448 priv->class_1_spill =
2449 rmi_xlr_config_spill(priv->mmio,
2450 R_CLASS1_SPILL_MEM_START_0,
2451 R_CLASS1_SPILL_MEM_START_1,
2452 R_CLASS1_SPILL_MEM_SIZE,
2453 MAX_CLASS_1_SPILL *
2454 sizeof(union rx_tx_desc));
2455
2456 priv->frout_spill =
2457 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2458 R_FROUT_SPILL_MEM_START_1,
2459 R_FROUT_SPILL_MEM_SIZE,
2460 MAX_FROUT_SPILL *
2461 sizeof(struct fr_desc));
2462
2463 priv->class_2_spill =
2464 rmi_xlr_config_spill(priv->mmio,
2465 R_CLASS2_SPILL_MEM_START_0,
2466 R_CLASS2_SPILL_MEM_START_1,
2467 R_CLASS2_SPILL_MEM_SIZE,
2468 MAX_CLASS_2_SPILL *
2469 sizeof(union rx_tx_desc));
2470 priv->class_3_spill =
2471 rmi_xlr_config_spill(priv->mmio,
2472 R_CLASS3_SPILL_MEM_START_0,
2473 R_CLASS3_SPILL_MEM_START_1,
2474 R_CLASS3_SPILL_MEM_SIZE,
2475 MAX_CLASS_3_SPILL *
2476 sizeof(union rx_tx_desc));
2477 priv->spill_configured = 1;
2478}
2479
2480/*****************************************************************
2481 * Write the MAC address to the XLR registers
2482 * All 4 addresses are the same for now
2483 *****************************************************************/
2484static void
2485xlr_mac_setup_hwaddr(struct driver_data *priv)
2486{
2487 struct rge_softc *sc = priv->sc;
2488
2489 xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2490 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2491 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2492 );
2493
2494 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2495 ((sc->dev_addr[1] << 24) | (sc->
2496 dev_addr[0] << 16)));
2497
2498 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2499
2500 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2501
2502 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2503
2504 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2505
2506 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2507 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2508 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2509 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2510 );
2511}
2512
2513/*****************************************************************
2514 * Read the MAC address from the XLR registers
2515 * All 4 addresses are the same for now
2516 *****************************************************************/
2517static void
2518xlr_mac_get_hwaddr(struct rge_softc *sc)
2519{
2520 struct driver_data *priv = &(sc->priv);
2521
2522 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2523 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2524 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2525 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2526 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2527 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2528}
2529
2530/*****************************************************************
2531 * Mac Module Initialization
2532 *****************************************************************/
2533static void
2534mac_common_init(void)
2535{
2536 init_p2d_allocation();
2537 init_tx_ring();
2538
2539 if (xlr_board_info.is_xls) {
2540 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2541 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2542 NULL)) {
2543 panic("Couldn't register msgring handler\n");
2544 }
2545 if (register_msgring_handler(MSGRNG_STNID_GMAC1,
2546 MSGRNG_STNID_GMAC1 + 1, rmi_xlr_mac_msgring_handler,
2547 NULL)) {
2548 panic("Couldn't register msgring handler\n");
2549 }
2550 } else {
2551 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2552 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2553 NULL)) {
2554 panic("Couldn't register msgring handler\n");
2555 }
2556 }
2557
2558 /*
2559 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2560 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2561 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2562 * if (register_msgring_handler (TX_STN_XGS_1,
2563 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2564 * msgring handler for TX_STN_XGS_1\n"); } }
2565 */
2566}