rge.c revision 202066
1/*- 2 * Copyright (c) 2003-2009 RMI Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of RMI Corporation, nor the names of its contributors, 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * RMI_BSD 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 202066 2010-01-11 04:49:44Z imp $"); 34 35#ifdef HAVE_KERNEL_OPTION_HEADERS 36#include "opt_device_polling.h" 37#endif 38 39#include <sys/types.h> 40#include <sys/endian.h> 41#include <sys/systm.h> 42#include <sys/sockio.h> 43#include <sys/param.h> 44#include <sys/lock.h> 45#include <sys/mutex.h> 46#include <sys/proc.h> 47#include <sys/limits.h> 48#include <sys/bus.h> 49#include <sys/mbuf.h> 50#include <sys/malloc.h> 51#include <sys/kernel.h> 52#include <sys/module.h> 53#include <sys/socket.h> 54#define __RMAN_RESOURCE_VISIBLE 55#include <sys/rman.h> 56#include <sys/taskqueue.h> 57 58#include <net/if.h> 59#include <net/if_arp.h> 60#include <net/ethernet.h> 61#include <net/if_dl.h> 62#include <net/if_media.h> 63 64#include <net/bpf.h> 65 66#include <net/if_types.h> 67#include <net/if_vlan_var.h> 68 69#include <netinet/in_systm.h> 70#include <netinet/in.h> 71#include <netinet/ip.h> 72 73#include <vm/vm.h> 74#include <vm/pmap.h> 75 76#include <machine/reg.h> 77#include <machine/cpu.h> 78#include <machine/mips_opcode.h> 79#include <machine/asm.h> 80#include <mips/rmi/rmi_mips_exts.h> 81#include <machine/cpuregs.h> 82 83#include <machine/param.h> 84#include <machine/intr_machdep.h> 85#include <machine/clock.h> /* for DELAY */ 86#include <machine/bus.h> /* */ 87#include <machine/resource.h> 88#include <mips/rmi/interrupt.h> 89#include <mips/rmi/msgring.h> 90#include <mips/rmi/iomap.h> 91#include <mips/rmi/debug.h> 92#include <mips/rmi/pic.h> 93#include <mips/rmi/xlrconfig.h> 94#include <mips/rmi/shared_structs.h> 95#include <mips/rmi/board.h> 96 97#include <mips/rmi/dev/xlr/atx_cpld.h> 98#include <mips/rmi/dev/xlr/xgmac_mdio.h> 99 100#include <dev/mii/mii.h> 101#include <dev/mii/miivar.h> 102#include <dev/mii/brgphyreg.h> 103 104#include <sys/sysctl.h> 105#include <dev/rmi/xlr/rge.h> 106 107/* #include "opt_rge.h" */ 108 109#include "miibus_if.h" 110 111MODULE_DEPEND(rge, ether, 1, 1, 1); 112MODULE_DEPEND(rge, miibus, 1, 1, 1); 113 114/* #define DEBUG */ 115/*#define RX_COPY */ 116 117#define RGE_TX_THRESHOLD 1024 118#define RGE_TX_Q_SIZE 1024 119 120#ifdef DEBUG 121#undef dbg_msg 122int mac_debug = 1; 123 124#define dbg_msg(fmt, args...) \ 125 do {\ 126 if (mac_debug) {\ 127 printf("[%s@%d|%s]: cpu_%d: " fmt, \ 128 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\ 129 }\ 130 } while(0); 131 132#define DUMP_PACKETS 133#else 134#undef dbg_msg 135#define dbg_msg(fmt, args...) 136int mac_debug = 0; 137 138#endif 139 140#define MAC_B2B_IPG 88 141 142/* frame sizes need to be cacheline aligned */ 143#define MAX_FRAME_SIZE 1536 144#define MAX_FRAME_SIZE_JUMBO 9216 145 146#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES 147#define MAC_PREPAD 0 148#define BYTE_OFFSET 2 149#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES) 150#define MAC_CRC_LEN 4 151#define MAX_NUM_MSGRNG_STN_CC 128 152 153#define MAX_NUM_DESC 1024 154#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128) 155 156#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16 157 158#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2) 159#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2) 160#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2) 161#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2) 162#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2) 163#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2) 164 165/***************************************************************** 166 * Phoenix Generic Mac driver 167 *****************************************************************/ 168 169extern uint32_t cpu_ltop_map[32]; 170 171#ifdef ENABLED_DEBUG 172static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE); 173 174#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1) 175#define port_set_counter(port, counter, value) atomic_set_int(&port_counters[port][(counter)], (value)) 176#else 177#define port_inc_counter(port, counter) /* Nothing */ 178#define port_set_counter(port, counter, value) /* Nothing */ 179#endif 180 181int xlr_rge_tx_prepend[MAXCPU]; 182int xlr_rge_tx_done[MAXCPU]; 183int xlr_rge_get_p2d_failed[MAXCPU]; 184int xlr_rge_msg_snd_failed[MAXCPU]; 185int xlr_rge_tx_ok_done[MAXCPU]; 186int xlr_rge_rx_done[MAXCPU]; 187int xlr_rge_repl_done[MAXCPU]; 188 189static __inline__ unsigned int 190ldadd_wu(unsigned int value, unsigned long *addr) 191{ 192 __asm__ __volatile__(".set push\n" 193 ".set noreorder\n" 194 "move $8, %2\n" 195 "move $9, %3\n" 196 /* "ldaddwu $8, $9\n" */ 197 ".word 0x71280011\n" 198 "move %0, $8\n" 199 ".set pop\n" 200 : "=&r"(value), "+m"(*addr) 201 : "0"(value), "r"((unsigned long)addr) 202 : "$8", "$9"); 203 204 return value; 205} 206 207/* #define mac_stats_add(x, val) ({(x) += (val);}) */ 208#define mac_stats_add(x, val) ldadd_wu(val, &x) 209 210 211#define XLR_MAX_CORE 8 212#define RGE_LOCK_INIT(_sc, _name) \ 213 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) 214#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx) 215#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED) 216#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx) 217#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx) 218 219#define XLR_MAX_MACS 8 220#define XLR_MAX_TX_FRAGS 14 221#define MAX_P2D_DESC_PER_PORT 512 222struct p2d_tx_desc { 223 uint64_t frag[XLR_MAX_TX_FRAGS + 2]; 224}; 225 226#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc)) 227 228struct rge_softc *dev_mac[XLR_MAX_MACS]; 229static int dev_mac_xgs0; 230static int dev_mac_gmac0; 231 232static int gmac_common_init_done; 233 234 235static int rge_probe(device_t); 236static int rge_attach(device_t); 237static int rge_detach(device_t); 238static int rge_suspend(device_t); 239static int rge_resume(device_t); 240static void rge_release_resources(struct rge_softc *); 241static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int); 242static void rge_intr(void *); 243static void rge_start_locked(struct ifnet *, int); 244static void rge_start(struct ifnet *); 245static int rge_ioctl(struct ifnet *, u_long, caddr_t); 246static void rge_init(void *); 247static void rge_stop(struct rge_softc *); 248static void rge_watchdog(struct ifnet *); 249static int rge_shutdown(device_t); 250static void rge_reset(struct rge_softc *); 251 252static struct mbuf *get_mbuf(void); 253static void free_buf(vm_paddr_t paddr); 254static void *get_buf(void); 255 256static void xlr_mac_get_hwaddr(struct rge_softc *); 257static void xlr_mac_setup_hwaddr(struct driver_data *); 258static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag); 259static void rmi_xlr_xgmac_init(struct driver_data *priv); 260static void rmi_xlr_gmac_init(struct driver_data *priv); 261static void mac_common_init(void); 262static int rge_mii_write(device_t, int, int, int); 263static int rge_mii_read(device_t, int, int); 264static void rmi_xlr_mac_mii_statchg(device_t); 265static int rmi_xlr_mac_mediachange(struct ifnet *); 266static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *); 267static void xlr_mac_set_rx_mode(struct rge_softc *sc); 268void 269rmi_xlr_mac_msgring_handler(int bucket, int size, int code, 270 int stid, struct msgrng_msg *msg, 271 void *data); 272static void mac_frin_replenish(void *); 273static int rmi_xlr_mac_open(struct rge_softc *); 274static int rmi_xlr_mac_close(struct rge_softc *); 275static int 276mac_xmit(struct mbuf *, struct rge_softc *, 277 struct driver_data *, int, struct p2d_tx_desc *); 278static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *); 279static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc); 280static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc); 281static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu); 282static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc); 283static void rmi_xlr_config_spill_area(struct driver_data *priv); 284static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed); 285static int 286rmi_xlr_mac_set_duplex(struct driver_data *s, 287 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc); 288static void serdes_regs_init(struct driver_data *priv); 289static int rmi_xlr_gmac_reset(struct driver_data *priv); 290 291/*Statistics...*/ 292static int get_p2d_desc_failed = 0; 293static int msg_snd_failed = 0; 294 295SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW, 296 &get_p2d_desc_failed, 0, "p2d desc failed"); 297SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW, 298 &msg_snd_failed, 0, "msg snd failed"); 299 300struct callout xlr_tx_stop_bkp; 301 302static device_method_t rge_methods[] = { 303 /* Device interface */ 304 DEVMETHOD(device_probe, rge_probe), 305 DEVMETHOD(device_attach, rge_attach), 306 DEVMETHOD(device_detach, rge_detach), 307 DEVMETHOD(device_shutdown, rge_shutdown), 308 DEVMETHOD(device_suspend, rge_suspend), 309 DEVMETHOD(device_resume, rge_resume), 310 311 /* MII interface */ 312 DEVMETHOD(miibus_readreg, rge_mii_read), 313 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg), 314 DEVMETHOD(miibus_writereg, rge_mii_write), 315 {0, 0} 316}; 317 318static driver_t rge_driver = { 319 "rge", 320 rge_methods, 321 sizeof(struct rge_softc) 322}; 323 324static devclass_t rge_devclass; 325 326DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0); 327DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0); 328 329#ifndef __STR 330#define __STR(x) #x 331#endif 332#ifndef STR 333#define STR(x) __STR(x) 334#endif 335 336#define XKPHYS 0x8000000000000000 337/* -- No longer needed RRS 338static __inline__ uint32_t 339lw_40bit_phys(uint64_t phys, int cca) 340{ 341 uint64_t addr; 342 uint32_t value = 0; 343 unsigned long flags; 344 345 addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL); 346 347 enable_KX(flags); 348 __asm__ __volatile__( 349 ".set push\n" 350 ".set noreorder\n" 351 ".set mips64\n" 352 "lw %0, 0(%1) \n" 353 ".set pop\n" 354 : "=r"(value) 355 : "r"(addr)); 356 357 disable_KX(flags); 358 return value; 359} 360*/ 361/* -- No longer used RRS 362static __inline__ uint64_t 363ld_40bit_phys(uint64_t phys, int cca) 364{ 365 uint64_t addr; 366 uint64_t value = 0; 367 unsigned long flags; 368 369 370 addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL); 371 enable_KX(flags); 372 __asm__ __volatile__( 373 ".set push\n" 374 ".set noreorder\n" 375 ".set mips64\n" 376 "ld %0, 0(%1) \n" 377 ".set pop\n" 378 : "=r"(value) 379 : "r"(addr)); 380 381 disable_KX(flags); 382 return value; 383} 384*/ 385 386void *xlr_tx_ring_mem; 387 388struct tx_desc_node { 389 struct p2d_tx_desc *ptr; 390 TAILQ_ENTRY(tx_desc_node) list; 391}; 392 393#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT) 394struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES]; 395static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE]; 396static int xlr_total_active_core = 0; 397 398/* 399 * This should contain the list of all free tx frag desc nodes pointing to tx 400 * p2d arrays 401 */ 402static 403TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] = 404{ 405 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]), 406 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]), 407 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]), 408 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]), 409 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]), 410 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]), 411 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]), 412 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]), 413}; 414 415/* This contains a list of free tx frag node descriptors */ 416static 417TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] = 418{ 419 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]), 420 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]), 421 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]), 422 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]), 423 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]), 424 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]), 425 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]), 426 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]), 427}; 428 429static struct mtx tx_desc_lock[XLR_MAX_CORE]; 430 431static inline void 432mac_make_desc_rfr(struct msgrng_msg *msg, 433 vm_paddr_t addr) 434{ 435 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL; 436 msg->msg1 = msg->msg2 = msg->msg3 = 0; 437} 438 439#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1) 440 441static void 442init_p2d_allocation(void) 443{ 444 int active_core[8] = {0}; 445 int i = 0; 446 uint32_t cpumask; 447 int cpu; 448 449 cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus); 450 451 for (i = 0; i < 32; i++) { 452 if (cpumask & (1 << i)) { 453 cpu = cpu_ltop_map[i]; 454 if (!active_core[cpu / 4]) { 455 active_core[cpu / 4] = 1; 456 xlr_total_active_core++; 457 } 458 } 459 } 460 for (i = 0; i < XLR_MAX_CORE; i++) { 461 if (active_core[i]) 462 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core; 463 } 464 printf("Total Active Core %d\n", xlr_total_active_core); 465} 466 467 468static void 469init_tx_ring(void) 470{ 471 int i; 472 int j = 0; 473 struct tx_desc_node *start, *node; 474 struct p2d_tx_desc *tx_desc; 475 vm_paddr_t paddr; 476 vm_offset_t unmapped_addr; 477 478 for (i = 0; i < XLR_MAX_CORE; i++) 479 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN); 480 481 start = &tx_desc_nodes[0]; 482 /* TODO: try to get this from KSEG0 */ 483 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE), 484 M_DEVBUF, M_NOWAIT | M_ZERO, 0, 485 0x10000000, XLR_CACHELINE_SIZE, 0); 486 487 if (xlr_tx_ring_mem == NULL) { 488 panic("TX ring memory allocation failed"); 489 } 490 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem); 491 492 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr); 493 494 495 tx_desc = (struct p2d_tx_desc *)unmapped_addr; 496 497 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) { 498 node = start + i; 499 node->ptr = tx_desc; 500 tx_desc++; 501 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list); 502 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core)); 503 } 504} 505 506static inline struct p2d_tx_desc * 507get_p2d_desc(void) 508{ 509 struct tx_desc_node *node; 510 struct p2d_tx_desc *tx_desc = NULL; 511 int cpu = xlr_cpu_id(); 512 513 mtx_lock_spin(&tx_desc_lock[cpu]); 514 node = TAILQ_FIRST(&tx_frag_desc[cpu]); 515 if (node) { 516 xlr_tot_avail_p2d[cpu]--; 517 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list); 518 tx_desc = node->ptr; 519 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list); 520 } else { 521 /* Increment p2d desc fail count */ 522 get_p2d_desc_failed++; 523 } 524 mtx_unlock_spin(&tx_desc_lock[cpu]); 525 return tx_desc; 526} 527static void 528free_p2d_desc(struct p2d_tx_desc *tx_desc) 529{ 530 struct tx_desc_node *node; 531 int cpu = xlr_cpu_id(); 532 533 mtx_lock_spin(&tx_desc_lock[cpu]); 534 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]); 535 KASSERT((node != NULL), ("Free TX frag node list is empty\n")); 536 537 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list); 538 node->ptr = tx_desc; 539 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list); 540 xlr_tot_avail_p2d[cpu]++; 541 mtx_unlock_spin(&tx_desc_lock[cpu]); 542 543} 544 545static int 546build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc) 547{ 548 struct mbuf *m; 549 vm_paddr_t paddr; 550 uint64_t p2d_len; 551 int nfrag; 552 vm_paddr_t p1, p2; 553 uint32_t len1, len2; 554 vm_offset_t taddr; 555 uint64_t fr_stid; 556 557 fr_stid = (xlr_cpu_id() << 3) + xlr_thr_id() + 4; 558 559 if (tx_desc == NULL) 560 return 1; 561 562 nfrag = 0; 563 for (m = m_head; m != NULL; m = m->m_next) { 564 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) { 565 free_p2d_desc(tx_desc); 566 return 1; 567 } 568 if (m->m_len != 0) { 569 paddr = vtophys(mtod(m, vm_offset_t)); 570 p1 = paddr + m->m_len; 571 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len)); 572 if (p1 != p2) { 573 len1 = (uint32_t) 574 (PAGE_SIZE - (paddr & PAGE_MASK)); 575 tx_desc->frag[nfrag] = (127ULL << 54) | 576 ((uint64_t) len1 << 40) | paddr; 577 nfrag++; 578 taddr = (vm_offset_t)m->m_data + len1; 579 p2 = vtophys(taddr); 580 len2 = m->m_len - len1; 581 if (nfrag >= XLR_MAX_TX_FRAGS) 582 panic("TX frags exceeded"); 583 584 tx_desc->frag[nfrag] = (127ULL << 54) | 585 ((uint64_t) len2 << 40) | p2; 586 587 taddr += len2; 588 p1 = vtophys(taddr); 589 590 if ((p2 + len2) != p1) { 591 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2); 592 printf("len1 = %x len2 = %x\n", len1, 593 len2); 594 printf("m_data %p\n", m->m_data); 595 DELAY(1000000); 596 panic("Multiple Mbuf segment discontiguous\n"); 597 } 598 } else { 599 tx_desc->frag[nfrag] = (127ULL << 54) | 600 ((uint64_t) m->m_len << 40) | paddr; 601 } 602 nfrag++; 603 } 604 } 605 /* set eop in the last tx p2d desc */ 606 tx_desc->frag[nfrag - 1] |= (1ULL << 63); 607 paddr = vtophys((vm_offset_t)tx_desc); 608 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr; 609 nfrag++; 610 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t) (vm_offset_t)tx_desc; 611 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t) (vm_offset_t)m_head; 612 613 p2d_len = (nfrag * 8); 614 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) | 615 (p2d_len << 40) | paddr; 616 617 return 0; 618} 619static void 620release_tx_desc(struct msgrng_msg *msg, int rel_buf) 621{ 622 /* 623 * OLD code: vm_paddr_t paddr = msg->msg0 & 0xffffffffffULL; 624 * uint64_t temp; struct p2d_tx_desc *tx_desc; struct mbuf *m; 625 * 626 * paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t)); *** In o32 we will 627 * crash here ****** temp = ld_40bit_phys(paddr, 3); tx_desc = 628 * (struct p2d_tx_desc *)((vm_offset_t)temp); 629 * 630 * if (rel_buf) { paddr += sizeof(uint64_t); 631 * 632 * temp = ld_40bit_phys(paddr, 3); 633 * 634 * m = (struct mbuf *)((vm_offset_t)temp); m_freem(m); } printf("Call 635 * fre_p2d_desc\n"); free_p2d_desc(tx_desc); 636 */ 637 struct p2d_tx_desc *tx_desc, *chk_addr; 638 struct mbuf *m; 639 640 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0); 641 chk_addr = (struct p2d_tx_desc *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS] & 0x00000000ffffffff); 642 if (tx_desc != chk_addr) { 643 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n", 644 tx_desc, chk_addr); 645 return; 646 } 647 if (rel_buf) { 648 m = (struct mbuf *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS + 1] & 0x00000000ffffffff); 649 m_freem(m); 650 } 651 free_p2d_desc(tx_desc); 652} 653 654#ifdef RX_COPY 655#define RGE_MAX_NUM_DESC (6 * MAX_NUM_DESC) 656uint8_t *rge_rx_buffers[RGE_MAX_NUM_DESC]; 657static struct mtx rge_rx_mtx; 658int g_rx_buf_head; 659 660static void 661init_rx_buf(void) 662{ 663 int i; 664 uint8_t *buf, *start; 665 uint32_t size, *ptr; 666 667 mtx_init(&rge_rx_mtx, "xlr rx_desc", NULL, MTX_SPIN); 668 669 size = (RGE_MAX_NUM_DESC * (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE)); 670 671 start = (uint8_t *) contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO, 672 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); 673 if (start == NULL) 674 panic("NO RX BUFFERS"); 675 buf = start; 676 size = (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE); 677 for (i = 0; i < RGE_MAX_NUM_DESC; i++) { 678 buf = start + (i * size); 679 ptr = (uint32_t *) buf; 680 *ptr = (uint32_t) buf; 681 rge_rx_buffers[i] = buf + XLR_CACHELINE_SIZE; 682 } 683} 684 685static void * 686get_rx_buf(void) 687{ 688 void *ptr = NULL; 689 690 mtx_lock_spin(&rge_rx_mtx); 691 if (g_rx_buf_head < RGE_MAX_NUM_DESC) { 692 ptr = (void *)rge_rx_buffers[g_rx_buf_head]; 693 g_rx_buf_head++; 694 } 695 mtx_unlock_spin(&rge_rx_mtx); 696 return ptr; 697} 698 699#endif 700 701static struct mbuf * 702get_mbuf(void) 703{ 704 struct mbuf *m_new = NULL; 705 706 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 707 return NULL; 708 709 m_new->m_len = MCLBYTES; 710 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 711 return m_new; 712} 713 714static void 715free_buf(vm_paddr_t paddr) 716{ 717 struct mbuf *m; 718 uint32_t *temp; 719 uint32_t mag, um; 720 721 /* 722 * This will crash I think. RRS temp = lw_40bit_phys((paddr - 723 * XLR_CACHELINE_SIZE), 3); m = (struct mbuf *)temp; 724 */ 725 /* 726 * This gets us a kseg0 address for the mbuf/magic on the ring but 727 * we need to get the va to free the mbuf. This is stored at *temp; 728 */ 729 temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); 730 um = temp[0]; 731 mag = temp[1]; 732 if (mag != 0xf00bad) { 733 printf("Something is wrong kseg:%p found mag:%x not 0xf00bad\n", 734 temp, mag); 735 return; 736 } 737 m = (struct mbuf *)um; 738 if (m != NULL) 739 m_freem(m); 740} 741 742static void * 743get_buf(void) 744{ 745#ifdef RX_COPY 746 return get_rx_buf(); 747#else 748 struct mbuf *m_new = NULL; 749 750#ifdef INVARIANTS 751 vm_paddr_t temp1, temp2; 752 753#endif 754 unsigned int *md; 755 756 m_new = get_mbuf(); 757 758 if (m_new == NULL) 759 return NULL; 760 761 m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f)); 762 md = (unsigned int *)m_new->m_data; 763 md[0] = (unsigned int)m_new; /* Back Ptr */ 764 md[1] = 0xf00bad; 765 m_adj(m_new, XLR_CACHELINE_SIZE); 766 767 768 /* return (void *)m_new; */ 769#ifdef INVARIANTS 770 temp1 = vtophys((vm_offset_t)m_new->m_data); 771 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536); 772 if ((temp1 + 1536) != temp2) 773 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n"); 774#endif 775 return (void *)m_new->m_data; 776#endif 777} 778 779/********************************************************************** 780 **********************************************************************/ 781static void 782rmi_xlr_mac_set_enable(struct driver_data *priv, int flag) 783{ 784 uint32_t regval; 785 int tx_threshold = 1518; 786 787 if (flag) { 788 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL); 789 regval |= (1 << O_TX_CONTROL__TxEnable) | 790 (tx_threshold << O_TX_CONTROL__TxThreshold); 791 792 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval); 793 794 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL); 795 regval |= 1 << O_RX_CONTROL__RxEnable; 796 if (priv->mode == XLR_PORT0_RGMII) 797 regval |= 1 << O_RX_CONTROL__RGMII; 798 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval); 799 800 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1); 801 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen); 802 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval); 803 } else { 804 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL); 805 regval &= ~((1 << O_TX_CONTROL__TxEnable) | 806 (tx_threshold << O_TX_CONTROL__TxThreshold)); 807 808 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval); 809 810 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL); 811 regval &= ~(1 << O_RX_CONTROL__RxEnable); 812 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval); 813 814 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1); 815 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen); 816 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval); 817 } 818} 819 820/********************************************************************** 821 **********************************************************************/ 822static __inline__ int 823xlr_mac_send_fr(struct driver_data *priv, 824 vm_paddr_t addr, int len) 825{ 826 int stid = priv->rfrbucket; 827 struct msgrng_msg msg; 828 int vcpu = (xlr_cpu_id() << 2) + xlr_thr_id(); 829 830 mac_make_desc_rfr(&msg, addr); 831 832 /* Send the packet to MAC */ 833 dbg_msg("mac_%d: Sending free packet %llx to stid %d\n", 834 priv->instance, addr, stid); 835 if (priv->type == XLR_XGMAC) { 836 while (message_send(1, MSGRNG_CODE_XGMAC, stid, &msg)); 837 } else { 838 while (message_send(1, MSGRNG_CODE_MAC, stid, &msg)); 839 xlr_rge_repl_done[vcpu]++; 840 } 841 842 return 0; 843} 844 845/**************************************************************/ 846 847static void 848xgmac_mdio_setup(volatile unsigned int *_mmio) 849{ 850 int i; 851 uint32_t rd_data; 852 853 for (i = 0; i < 4; i++) { 854 rd_data = xmdio_read(_mmio, 1, 0x8000 + i); 855 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */ 856 xmdio_write(_mmio, 1, 0x8000 + i, rd_data); 857 } 858} 859 860/********************************************************************** 861 * Init MII interface 862 * 863 * Input parameters: 864 * s - priv structure 865 ********************************************************************* */ 866#define PHY_STATUS_RETRIES 25000 867 868static void 869rmi_xlr_mac_mii_init(struct driver_data *priv) 870{ 871 xlr_reg_t *mii_mmio = priv->mii_mmio; 872 873 /* use the lowest clock divisor - divisor 28 */ 874 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07); 875} 876 877/********************************************************************** 878 * Read a PHY register. 879 * 880 * Input parameters: 881 * s - priv structure 882 * phyaddr - PHY's address 883 * regidx = index of register to read 884 * 885 * Return value: 886 * value read, or 0 if an error occurred. 887 ********************************************************************* */ 888 889static int 890rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx) 891{ 892 int i = 0; 893 894 /* setup the phy reg to be used */ 895 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS, 896 (phyaddr << 8) | (regidx << 0)); 897 /* Issue the read command */ 898 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 899 (1 << O_MII_MGMT_COMMAND__rstat)); 900 901 /* poll for the read cycle to complete */ 902 for (i = 0; i < PHY_STATUS_RETRIES; i++) { 903 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0) 904 break; 905 } 906 907 /* clear the read cycle */ 908 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0); 909 910 if (i == PHY_STATUS_RETRIES) { 911 return 0xffffffff; 912 } 913 /* Read the data back */ 914 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS); 915} 916 917static int 918rge_mii_read(device_t dev, int phyaddr, int regidx) 919{ 920 struct rge_softc *sc = device_get_softc(dev); 921 922 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx); 923} 924 925/********************************************************************** 926 * Set MII hooks to newly selected media 927 * 928 * Input parameters: 929 * ifp - Interface Pointer 930 * 931 * Return value: 932 * nothing 933 ********************************************************************* */ 934static int 935rmi_xlr_mac_mediachange(struct ifnet *ifp) 936{ 937 struct rge_softc *sc = ifp->if_softc; 938 939 if (ifp->if_flags & IFF_UP) 940 mii_mediachg(&sc->rge_mii); 941 942 return 0; 943} 944 945/********************************************************************** 946 * Get the current interface media status 947 * 948 * Input parameters: 949 * ifp - Interface Pointer 950 * ifmr - Interface media request ptr 951 * 952 * Return value: 953 * nothing 954 ********************************************************************* */ 955static void 956rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 957{ 958 struct rge_softc *sc = ifp->if_softc; 959 960 /* Check whether this is interface is active or not. */ 961 ifmr->ifm_status = IFM_AVALID; 962 if (sc->link_up) { 963 ifmr->ifm_status |= IFM_ACTIVE; 964 } else { 965 ifmr->ifm_active = IFM_ETHER; 966 } 967} 968 969/********************************************************************** 970 * Write a value to a PHY register. 971 * 972 * Input parameters: 973 * s - priv structure 974 * phyaddr - PHY to use 975 * regidx - register within the PHY 976 * regval - data to write to register 977 * 978 * Return value: 979 * nothing 980 ********************************************************************* */ 981static void 982rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval) 983{ 984 int i = 0; 985 986 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS, 987 (phyaddr << 8) | (regidx << 0)); 988 989 /* Write the data which starts the write cycle */ 990 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval); 991 992 /* poll for the write cycle to complete */ 993 for (i = 0; i < PHY_STATUS_RETRIES; i++) { 994 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0) 995 break; 996 } 997 998 return; 999} 1000 1001static int 1002rge_mii_write(device_t dev, int phyaddr, int regidx, int regval) 1003{ 1004 struct rge_softc *sc = device_get_softc(dev); 1005 1006 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval); 1007 return (0); 1008} 1009 1010static void 1011rmi_xlr_mac_mii_statchg(struct device *dev) 1012{ 1013} 1014 1015static void 1016serdes_regs_init(struct driver_data *priv) 1017{ 1018 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET); 1019 int i; 1020 1021 /* Initialize SERDES CONTROL Registers */ 1022 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0); 1023 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF); 1024 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0); 1025 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF); 1026 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000); 1027 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000); 1028 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005); 1029 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001); 1030 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000); 1031 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000); 1032 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000); 1033 1034 /* 1035 * For loop delay and GPIO programming crud from Linux driver, 1036 */ 1037 for (i = 0; i < 10000000; i++) { 1038 } 1039 mmio_gpio[0x20] = 0x7e6802; 1040 mmio_gpio[0x10] = 0x7104; 1041 for (i = 0; i < 100000000; i++) { 1042 } 1043 return; 1044} 1045 1046static void 1047serdes_autoconfig(struct driver_data *priv) 1048{ 1049 int delay = 100000; 1050 1051 /* Enable Auto negotiation in the PCS Layer */ 1052 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000); 1053 DELAY(delay); 1054 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200); 1055 DELAY(delay); 1056 1057 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000); 1058 DELAY(delay); 1059 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200); 1060 DELAY(delay); 1061 1062 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000); 1063 DELAY(delay); 1064 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200); 1065 DELAY(delay); 1066 1067 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000); 1068 DELAY(delay); 1069 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200); 1070 DELAY(delay); 1071 1072} 1073 1074/***************************************************************** 1075 * Initialize GMAC 1076 *****************************************************************/ 1077static void 1078rmi_xlr_config_pde(struct driver_data *priv) 1079{ 1080 int i = 0, cpu = 0, bucket = 0; 1081 uint64_t bucket_map = 0; 1082 1083 /* uint32_t desc_pack_ctrl = 0; */ 1084 uint32_t cpumask; 1085 1086 cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus); 1087 1088 for (i = 0; i < 32; i++) { 1089 if (cpumask & (1 << i)) { 1090 cpu = cpu_ltop_map[i]; 1091 bucket = ((cpu >> 2) << 3); 1092 //|(cpu & 0x03); 1093 bucket_map |= (1ULL << bucket); 1094 dbg_msg("i=%d, cpu=%d, bucket = %d, bucket_map=%llx\n", 1095 i, cpu, bucket, bucket_map); 1096 } 1097 } 1098 1099 /* bucket_map = 0x1; */ 1100 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff)); 1101 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1, 1102 ((bucket_map >> 32) & 0xffffffff)); 1103 1104 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff)); 1105 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1, 1106 ((bucket_map >> 32) & 0xffffffff)); 1107 1108 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff)); 1109 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1, 1110 ((bucket_map >> 32) & 0xffffffff)); 1111 1112 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff)); 1113 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1, 1114 ((bucket_map >> 32) & 0xffffffff)); 1115} 1116 1117static void 1118rmi_xlr_config_parser(struct driver_data *priv) 1119{ 1120 /* 1121 * Mark it as no classification The parser extract is gauranteed to 1122 * be zero with no classfication 1123 */ 1124 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00); 1125 1126 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01); 1127 1128 /* configure the parser : L2 Type is configured in the bootloader */ 1129 /* extract IP: src, dest protocol */ 1130 xlr_write_reg(priv->mmio, R_L3CTABLE, 1131 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | 1132 (0x0800 << 0)); 1133 xlr_write_reg(priv->mmio, R_L3CTABLE + 1, 1134 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10)); 1135 1136} 1137 1138static void 1139rmi_xlr_config_classifier(struct driver_data *priv) 1140{ 1141 int i = 0; 1142 1143 if (priv->type == XLR_XGMAC) { 1144 /* xgmac translation table doesn't have sane values on reset */ 1145 for (i = 0; i < 64; i++) 1146 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0); 1147 1148 /* 1149 * use upper 7 bits of the parser extract to index the 1150 * translate table 1151 */ 1152 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0); 1153 } 1154} 1155 1156enum { 1157 SGMII_SPEED_10 = 0x00000000, 1158 SGMII_SPEED_100 = 0x02000000, 1159 SGMII_SPEED_1000 = 0x04000000, 1160}; 1161 1162static void 1163rmi_xlr_gmac_config_speed(struct driver_data *priv) 1164{ 1165 int phy_addr = priv->phy_addr; 1166 xlr_reg_t *mmio = priv->mmio; 1167 struct rge_softc *sc = priv->sc; 1168 1169 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28); 1170 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4; 1171 priv->speed = (priv->speed >> 3) & 0x03; 1172 1173 if (priv->speed == xlr_mac_speed_10) { 1174 if (priv->mode != XLR_RGMII) 1175 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10); 1176 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); 1177 xlr_write_reg(mmio, R_CORECONTROL, 0x02); 1178 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev)); 1179 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; 1180 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; 1181 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; 1182 } else if (priv->speed == xlr_mac_speed_100) { 1183 if (priv->mode != XLR_RGMII) 1184 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100); 1185 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); 1186 xlr_write_reg(mmio, R_CORECONTROL, 0x01); 1187 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev)); 1188 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1189 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1190 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1191 } else { 1192 if (priv->speed != xlr_mac_speed_1000) { 1193 if (priv->mode != XLR_RGMII) 1194 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100); 1195 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n"); 1196 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); 1197 xlr_write_reg(mmio, R_CORECONTROL, 0x01); 1198 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1199 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1200 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; 1201 } else { 1202 if (priv->mode != XLR_RGMII) 1203 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000); 1204 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7237); 1205 xlr_write_reg(mmio, R_CORECONTROL, 0x00); 1206 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev)); 1207 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; 1208 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; 1209 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; 1210 } 1211 } 1212 1213 if (!priv->link) { 1214 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER; 1215 sc->link_up = 0; 1216 } else { 1217 sc->link_up = 1; 1218 } 1219} 1220 1221/***************************************************************** 1222 * Initialize XGMAC 1223 *****************************************************************/ 1224static void 1225rmi_xlr_xgmac_init(struct driver_data *priv) 1226{ 1227 int i = 0; 1228 xlr_reg_t *mmio = priv->mmio; 1229 int id = priv->instance; 1230 struct rge_softc *sc = priv->sc; 1231 volatile unsigned short *cpld; 1232 1233 cpld = (volatile unsigned short *)0xBD840000; 1234 1235 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL, 1236 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20)); 1237 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET); 1238 rmi_xlr_config_pde(priv); 1239 rmi_xlr_config_parser(priv); 1240 rmi_xlr_config_classifier(priv); 1241 1242 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1); 1243 1244 /* configure the XGMAC Registers */ 1245 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026); 1246 1247 /* configure the XGMAC_GLUE Registers */ 1248 xlr_write_reg(mmio, R_DMACR0, 0xffffffff); 1249 xlr_write_reg(mmio, R_DMACR1, 0xffffffff); 1250 xlr_write_reg(mmio, R_DMACR2, 0xffffffff); 1251 xlr_write_reg(mmio, R_DMACR3, 0xffffffff); 1252 xlr_write_reg(mmio, R_STATCTRL, 0x04); 1253 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); 1254 1255 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030); 1256 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f); 1257 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); 1258 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e); 1259 1260 /* 1261 * take XGMII phy out of reset 1262 */ 1263 /* 1264 * we are pulling everything out of reset because writing a 0 would 1265 * reset other devices on the chip 1266 */ 1267 cpld[ATX_CPLD_RESET_1] = 0xffff; 1268 cpld[ATX_CPLD_MISC_CTRL] = 0xffff; 1269 cpld[ATX_CPLD_RESET_2] = 0xffff; 1270 1271 xgmac_mdio_setup(mmio); 1272 1273 rmi_xlr_config_spill_area(priv); 1274 1275 if (id == 0) { 1276 for (i = 0; i < 16; i++) { 1277 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i, 1278 bucket_sizes. 1279 bucket[MSGRNG_STNID_XGS0_TX + i]); 1280 } 1281 1282 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE, 1283 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]); 1284 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE, 1285 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]); 1286 1287 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { 1288 xlr_write_reg(mmio, R_CC_CPU0_0 + i, 1289 cc_table_xgs_0. 1290 counters[i >> 3][i & 0x07]); 1291 } 1292 } else if (id == 1) { 1293 for (i = 0; i < 16; i++) { 1294 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i, 1295 bucket_sizes. 1296 bucket[MSGRNG_STNID_XGS1_TX + i]); 1297 } 1298 1299 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE, 1300 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]); 1301 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE, 1302 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]); 1303 1304 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { 1305 xlr_write_reg(mmio, R_CC_CPU0_0 + i, 1306 cc_table_xgs_1. 1307 counters[i >> 3][i & 0x07]); 1308 } 1309 } 1310 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; 1311 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE); 1312 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; 1313 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; 1314 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE); 1315 1316 priv->init_frin_desc = 1; 1317} 1318 1319/******************************************************* 1320 * Initialization gmac 1321 *******************************************************/ 1322static int 1323rmi_xlr_gmac_reset(struct driver_data *priv) 1324{ 1325 volatile uint32_t val; 1326 xlr_reg_t *mmio = priv->mmio; 1327 int i, maxloops = 100; 1328 1329 /* Disable MAC RX */ 1330 val = xlr_read_reg(mmio, R_MAC_CONFIG_1); 1331 val &= ~0x4; 1332 xlr_write_reg(mmio, R_MAC_CONFIG_1, val); 1333 1334 /* Disable Core RX */ 1335 val = xlr_read_reg(mmio, R_RX_CONTROL); 1336 val &= ~0x1; 1337 xlr_write_reg(mmio, R_RX_CONTROL, val); 1338 1339 /* wait for rx to halt */ 1340 for (i = 0; i < maxloops; i++) { 1341 val = xlr_read_reg(mmio, R_RX_CONTROL); 1342 if (val & 0x2) 1343 break; 1344 DELAY(1000); 1345 } 1346 if (i == maxloops) 1347 return -1; 1348 1349 /* Issue a soft reset */ 1350 val = xlr_read_reg(mmio, R_RX_CONTROL); 1351 val |= 0x4; 1352 xlr_write_reg(mmio, R_RX_CONTROL, val); 1353 1354 /* wait for reset to complete */ 1355 for (i = 0; i < maxloops; i++) { 1356 val = xlr_read_reg(mmio, R_RX_CONTROL); 1357 if (val & 0x8) 1358 break; 1359 DELAY(1000); 1360 } 1361 if (i == maxloops) 1362 return -1; 1363 1364 /* Clear the soft reset bit */ 1365 val = xlr_read_reg(mmio, R_RX_CONTROL); 1366 val &= ~0x4; 1367 xlr_write_reg(mmio, R_RX_CONTROL, val); 1368 return 0; 1369} 1370 1371static void 1372rmi_xlr_gmac_init(struct driver_data *priv) 1373{ 1374 int i = 0; 1375 xlr_reg_t *mmio = priv->mmio; 1376 int id = priv->instance; 1377 struct stn_cc *gmac_cc_config; 1378 uint32_t value = 0; 1379 int blk = id / 4, port = id % 4; 1380 1381 rmi_xlr_mac_set_enable(priv, 0); 1382 1383 rmi_xlr_config_spill_area(priv); 1384 1385 xlr_write_reg(mmio, R_DESC_PACK_CTRL, 1386 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) | 1387 (1 << O_DESC_PACK_CTRL__MaxEntry) | 1388 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize)); 1389 1390 rmi_xlr_config_pde(priv); 1391 rmi_xlr_config_parser(priv); 1392 rmi_xlr_config_classifier(priv); 1393 1394 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3); 1395 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35); 1396 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6)); 1397 1398 if (priv->mode == XLR_PORT0_RGMII) { 1399 printf("Port 0 set in RGMII mode\n"); 1400 value = xlr_read_reg(mmio, R_RX_CONTROL); 1401 value |= 1 << O_RX_CONTROL__RGMII; 1402 xlr_write_reg(mmio, R_RX_CONTROL, value); 1403 } 1404 rmi_xlr_mac_mii_init(priv); 1405 1406 1407#if 0 1408 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half | 1409 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half | 1410 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 1411 ADVERTISED_MII; 1412#endif 1413 1414 /* 1415 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get 1416 * set about every 1 sec in GigE mode, ignore it for now... 1417 */ 1418 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe); 1419 1420 if (priv->mode != XLR_RGMII) { 1421 serdes_regs_init(priv); 1422 serdes_autoconfig(priv); 1423 } 1424 rmi_xlr_gmac_config_speed(priv); 1425 1426 value = xlr_read_reg(mmio, R_IPG_IFG); 1427 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG)); 1428 xlr_write_reg(mmio, R_DMACR0, 0xffffffff); 1429 xlr_write_reg(mmio, R_DMACR1, 0xffffffff); 1430 xlr_write_reg(mmio, R_DMACR2, 0xffffffff); 1431 xlr_write_reg(mmio, R_DMACR3, 0xffffffff); 1432 xlr_write_reg(mmio, R_STATCTRL, 0x04); 1433 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); 1434 xlr_write_reg(mmio, R_INTMASK, 0); 1435 xlr_write_reg(mmio, R_FREEQCARVE, 0); 1436 1437 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port, 1438 xlr_board_info.bucket_sizes->bucket[priv->txbucket]); 1439 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE, 1440 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]); 1441 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE, 1442 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]); 1443 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE, 1444 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]); 1445 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE, 1446 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]); 1447 1448 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port, 1449 xlr_board_info.bucket_sizes->bucket[priv->txbucket]); 1450 1451 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config; 1452 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { 1453 xlr_write_reg(mmio, R_CC_CPU0_0 + i, 1454 gmac_cc_config->counters[i >> 3][i & 0x07]); 1455 dbg_msg("%d: %d -> %d\n", priv->instance, 1456 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]); 1457 } 1458 priv->init_frin_desc = 1; 1459} 1460 1461/********************************************************************** 1462 * Set promiscuous mode 1463 **********************************************************************/ 1464static void 1465xlr_mac_set_rx_mode(struct rge_softc *sc) 1466{ 1467 struct driver_data *priv = &(sc->priv); 1468 uint32_t regval; 1469 1470 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG); 1471 1472 if (sc->flags & IFF_PROMISC) { 1473 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | 1474 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | 1475 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | 1476 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN); 1477 } else { 1478 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | 1479 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN)); 1480 } 1481 1482 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval); 1483} 1484 1485/********************************************************************** 1486 * Configure LAN speed for the specified MAC. 1487 ********************************************************************* */ 1488static int 1489rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed) 1490{ 1491 return 0; 1492} 1493 1494/********************************************************************** 1495 * Set Ethernet duplex and flow control options for this MAC 1496 ********************************************************************* */ 1497static int 1498rmi_xlr_mac_set_duplex(struct driver_data *s, 1499 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc) 1500{ 1501 return 0; 1502} 1503 1504/***************************************************************** 1505 * Kernel Net Stack <-> MAC Driver Interface 1506 *****************************************************************/ 1507/********************************************************************** 1508 **********************************************************************/ 1509#define MAC_TX_FAIL 2 1510#define MAC_TX_PASS 0 1511#define MAC_TX_RETRY 1 1512 1513static __inline__ void 1514message_send_block(unsigned int size, unsigned int code, 1515 unsigned int stid, struct msgrng_msg *msg) 1516{ 1517 unsigned int dest = 0; 1518 unsigned long long status = 0; 1519 1520 msgrng_load_tx_msg0(msg->msg0); 1521 msgrng_load_tx_msg1(msg->msg1); 1522 msgrng_load_tx_msg2(msg->msg2); 1523 msgrng_load_tx_msg3(msg->msg3); 1524 1525 dest = ((size - 1) << 16) | (code << 8) | (stid); 1526 1527 do { 1528 msgrng_send(dest); 1529 status = msgrng_read_status(); 1530 } while (status & 0x6); 1531 1532} 1533 1534int xlr_dev_queue_xmit_hack = 0; 1535 1536static int 1537mac_xmit(struct mbuf *m, struct rge_softc *sc, 1538 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc) 1539{ 1540 struct msgrng_msg msg; 1541 int stid = priv->txbucket; 1542 uint32_t tx_cycles = 0; 1543 unsigned long mflags = 0; 1544 int vcpu = PCPU_GET(cpuid); 1545 int rv; 1546 1547 tx_cycles = mips_rd_count(); 1548 1549 if (build_frag_list(m, &msg, tx_desc) != 0) 1550 return MAC_TX_FAIL; 1551 1552 else { 1553 msgrng_access_enable(mflags); 1554 if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) { 1555 msg_snd_failed++; 1556 msgrng_access_disable(mflags); 1557 release_tx_desc(&msg, 0); 1558 xlr_rge_msg_snd_failed[vcpu]++; 1559 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%llx\n", 1560 vcpu, rv, stid, msg.msg0); 1561 return MAC_TX_FAIL; 1562 } 1563 msgrng_access_disable(mflags); 1564 port_inc_counter(priv->instance, PORT_TX); 1565 } 1566 1567 /* Send the packet to MAC */ 1568 dbg_msg("Sent tx packet to stid %d, msg0=%llx, msg1=%llx \n", stid, msg.msg0, msg.msg1); 1569#ifdef DUMP_PACKETS 1570 { 1571 int i = 0; 1572 unsigned char *buf = (char *)m->m_data; 1573 1574 printf("Tx Packet: length=%d\n", len); 1575 for (i = 0; i < 64; i++) { 1576 if (i && (i % 16) == 0) 1577 printf("\n"); 1578 printf("%02x ", buf[i]); 1579 } 1580 printf("\n"); 1581 } 1582#endif 1583 xlr_inc_counter(NETIF_TX); 1584 return MAC_TX_PASS; 1585} 1586 1587static int 1588rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc) 1589{ 1590 struct driver_data *priv = &(sc->priv); 1591 int ret = -ENOSPC; 1592 1593 dbg_msg("IN\n"); 1594 1595 xlr_inc_counter(NETIF_STACK_TX); 1596 1597retry: 1598 ret = mac_xmit(m, sc, priv, len, tx_desc); 1599 1600 if (ret == MAC_TX_RETRY) 1601 goto retry; 1602 1603 dbg_msg("OUT, ret = %d\n", ret); 1604 if (ret == MAC_TX_FAIL) { 1605 /* FULL */ 1606 dbg_msg("Msg Ring Full. Stopping upper layer Q\n"); 1607 port_inc_counter(priv->instance, PORT_STOPQ); 1608 } 1609 return ret; 1610} 1611 1612static void 1613mac_frin_replenish(void *args /* ignored */ ) 1614{ 1615#ifdef RX_COPY 1616 return; 1617#else 1618 int cpu = xlr_cpu_id(); 1619 int done = 0; 1620 int i = 0; 1621 1622 xlr_inc_counter(REPLENISH_ENTER); 1623 /* 1624 * xlr_set_counter(REPLENISH_ENTER_COUNT, 1625 * atomic_read(frin_to_be_sent)); 1626 */ 1627 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid)); 1628 1629 for (;;) { 1630 1631 done = 0; 1632 1633 for (i = 0; i < XLR_MAX_MACS; i++) { 1634 /* int offset = 0; */ 1635 unsigned long msgrng_flags; 1636 void *m; 1637 uint32_t cycles; 1638 struct rge_softc *sc; 1639 struct driver_data *priv; 1640 int frin_to_be_sent; 1641 1642 sc = dev_mac[i]; 1643 if (!sc) 1644 goto skip; 1645 1646 priv = &(sc->priv); 1647 frin_to_be_sent = priv->frin_to_be_sent[cpu]; 1648 1649 /* if (atomic_read(frin_to_be_sent) < 0) */ 1650 if (frin_to_be_sent < 0) { 1651 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n", 1652 __FUNCTION__, i, 1653 frin_to_be_sent); 1654 } 1655 /* if (!atomic_read(frin_to_be_sent)) */ 1656 if (!frin_to_be_sent) 1657 goto skip; 1658 1659 cycles = mips_rd_count(); 1660 { 1661 m = get_buf(); 1662 if (!m) { 1663 device_printf(sc->rge_dev, "No buffer\n"); 1664 goto skip; 1665 } 1666 } 1667 xlr_inc_counter(REPLENISH_FRIN); 1668 msgrng_access_enable(msgrng_flags); 1669 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) { 1670 free_buf(vtophys(m)); 1671 printf("[%s]: rx free message_send failed!\n", __FUNCTION__); 1672 msgrng_access_disable(msgrng_flags); 1673 break; 1674 } 1675 msgrng_access_disable(msgrng_flags); 1676 xlr_set_counter(REPLENISH_CYCLES, 1677 (read_c0_count() - cycles)); 1678 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1); 1679 1680 continue; 1681 skip: 1682 done++; 1683 } 1684 if (done == XLR_MAX_MACS) 1685 break; 1686 } 1687#endif 1688} 1689 1690static volatile uint32_t g_tx_frm_tx_ok; 1691 1692static void 1693rge_tx_bkp_func(void *arg, int npending) 1694{ 1695 int i = 0; 1696 1697 for (i = 0; i < xlr_board_info.gmacports; i++) { 1698 if (!dev_mac[i] || !dev_mac[i]->active) 1699 continue; 1700 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD); 1701 } 1702 atomic_subtract_int(&g_tx_frm_tx_ok, 1); 1703} 1704 1705/* This function is called from an interrupt handler */ 1706void 1707rmi_xlr_mac_msgring_handler(int bucket, int size, int code, 1708 int stid, struct msgrng_msg *msg, 1709 void *data /* ignored */ ) 1710{ 1711 uint64_t phys_addr = 0; 1712 unsigned long addr = 0; 1713 uint32_t length = 0; 1714 int ctrl = 0, port = 0; 1715 struct rge_softc *sc = NULL; 1716 struct driver_data *priv = 0; 1717 struct ifnet *ifp; 1718 int cpu = xlr_cpu_id(); 1719 int vcpu = (cpu << 2) + xlr_thr_id(); 1720 1721 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%llx msg1=%llx\n", 1722 bucket, size, code, stid, msg->msg0, msg->msg1); 1723 1724 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL); 1725 length = (msg->msg0 >> 40) & 0x3fff; 1726 if (length == 0) { 1727 ctrl = CTRL_REG_FREE; 1728 port = (msg->msg0 >> 54) & 0x0f; 1729 addr = 0; 1730 } else { 1731 ctrl = CTRL_SNGL; 1732 length = length - BYTE_OFFSET - MAC_CRC_LEN; 1733 port = msg->msg0 & 0x0f; 1734 addr = 0; 1735 } 1736 1737 if (xlr_board_info.is_xls) { 1738 if (stid == MSGRNG_STNID_GMAC1) 1739 port += 4; 1740 sc = dev_mac[dev_mac_gmac0 + port]; 1741 } else { 1742 if (stid == MSGRNG_STNID_XGS0FR) 1743 sc = dev_mac[dev_mac_xgs0]; 1744 else if (stid == MSGRNG_STNID_XGS1FR) 1745 sc = dev_mac[dev_mac_xgs0 + 1]; 1746 else 1747 sc = dev_mac[dev_mac_gmac0 + port]; 1748 } 1749 if (sc == NULL) 1750 return; 1751 priv = &(sc->priv); 1752 1753 dbg_msg("msg0 = %llx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n", 1754 msg->msg0, stid, port, addr, length, ctrl); 1755 1756 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) { 1757 xlr_rge_tx_ok_done[vcpu]++; 1758 release_tx_desc(msg, 1); 1759 ifp = sc->rge_ifp; 1760 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 1761 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1762 } 1763 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1)) 1764 rge_tx_bkp_func(NULL, 0); 1765 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES, 1766 (read_c0_count() - msgrng_msg_cycles)); 1767 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) { 1768 /* Rx Packet */ 1769 /* struct mbuf *m = 0; */ 1770 /* int logical_cpu = 0; */ 1771 1772 dbg_msg("Received packet, port = %d\n", port); 1773 /* 1774 * if num frins to be sent exceeds threshold, wake up the 1775 * helper thread 1776 */ 1777 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1); 1778 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) { 1779 mac_frin_replenish(NULL); 1780 } 1781 dbg_msg("gmac_%d: rx packet: phys_addr = %llx, length = %x\n", 1782 priv->instance, phys_addr, length); 1783 mac_stats_add(priv->stats.rx_packets, 1); 1784 mac_stats_add(priv->stats.rx_bytes, length); 1785 xlr_inc_counter(NETIF_RX); 1786 xlr_set_counter(NETIF_RX_CYCLES, 1787 (read_c0_count() - msgrng_msg_cycles)); 1788 rge_rx(sc, phys_addr, length); 1789 xlr_rge_rx_done[vcpu]++; 1790 } else { 1791 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl); 1792 } 1793 1794} 1795 1796/********************************************************************** 1797 **********************************************************************/ 1798static int 1799rge_probe(dev) 1800 device_t dev; 1801{ 1802 /* Always return 0 */ 1803 return 0; 1804} 1805 1806volatile unsigned long xlr_debug_enabled; 1807struct callout rge_dbg_count; 1808static void 1809xlr_debug_count(void *addr) 1810{ 1811 struct driver_data *priv = &dev_mac[0]->priv; 1812 1813 /* uint32_t crdt; */ 1814 if (xlr_debug_enabled) { 1815 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e)); 1816 } 1817 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL); 1818} 1819 1820 1821static void 1822xlr_tx_q_wakeup(void *addr) 1823{ 1824 int i = 0; 1825 int j = 0; 1826 1827 for (i = 0; i < xlr_board_info.gmacports; i++) { 1828 if (!dev_mac[i] || !dev_mac[i]->active) 1829 continue; 1830 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) { 1831 for (j = 0; j < XLR_MAX_CORE; j++) { 1832 if (xlr_tot_avail_p2d[j]) { 1833 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1834 break; 1835 } 1836 } 1837 } 1838 } 1839 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL); 1840} 1841 1842static int 1843rge_attach(device_t dev) 1844{ 1845 struct ifnet *ifp; 1846 struct rge_softc *sc; 1847 struct driver_data *priv = 0; 1848 int ret = 0; 1849 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev); 1850 1851 sc = device_get_softc(dev); 1852 sc->rge_dev = dev; 1853 1854 /* Initialize mac's */ 1855 sc->unit = device_get_unit(dev); 1856 1857 if (sc->unit > XLR_MAX_MACS) { 1858 ret = ENXIO; 1859 goto out; 1860 } 1861 RGE_LOCK_INIT(sc, device_get_nameunit(dev)); 1862 1863 priv = &(sc->priv); 1864 priv->sc = sc; 1865 1866 sc->flags = 0; /* TODO : fix me up later */ 1867 1868 priv->id = sc->unit; 1869 if (gmac_conf->type == XLR_GMAC) { 1870 priv->instance = priv->id; 1871 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr + 1872 0x1000 * (sc->unit % 4)); 1873 if ((ret = rmi_xlr_gmac_reset(priv)) == -1) 1874 goto out; 1875 } else if (gmac_conf->type == XLR_XGMAC) { 1876 priv->instance = priv->id - xlr_board_info.gmacports; 1877 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr); 1878 } 1879 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI) { 1880 dbg_msg("Arizona board - offset 4 \n"); 1881 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET); 1882 } else 1883 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET); 1884 1885 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr); 1886 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET); 1887 1888 sc->base_addr = (unsigned long)priv->mmio; 1889 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1; 1890 1891 sc->xmit = rge_start; 1892 sc->stop = rge_stop; 1893 sc->get_stats = rmi_xlr_mac_get_stats; 1894 sc->ioctl = rge_ioctl; 1895 1896 /* Initialize the device specific driver data */ 1897 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN); 1898 1899 priv->type = gmac_conf->type; 1900 1901 priv->mode = gmac_conf->mode; 1902 if (xlr_board_info.is_xls == 0) { 1903 if (xlr_board_atx_ii() && !xlr_board_atx_ii_b()) 1904 priv->phy_addr = priv->instance - 2; 1905 else 1906 priv->phy_addr = priv->instance; 1907 priv->mode = XLR_RGMII; 1908 } else { 1909 if (gmac_conf->mode == XLR_PORT0_RGMII && 1910 priv->instance == 0) { 1911 priv->mode = XLR_PORT0_RGMII; 1912 priv->phy_addr = 0; 1913 } else { 1914 priv->mode = XLR_SGMII; 1915 priv->phy_addr = priv->instance + 16; 1916 } 1917 } 1918 1919 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4; 1920 priv->rfrbucket = gmac_conf->station_rfr; 1921 priv->spill_configured = 0; 1922 1923 dbg_msg("priv->mmio=%p\n", priv->mmio); 1924 1925 /* Set up ifnet structure */ 1926 ifp = sc->rge_ifp = if_alloc(IFT_ETHER); 1927 if (ifp == NULL) { 1928 device_printf(sc->rge_dev, "failed to if_alloc()\n"); 1929 rge_release_resources(sc); 1930 ret = ENXIO; 1931 RGE_LOCK_DESTROY(sc); 1932 goto out; 1933 } 1934 ifp->if_softc = sc; 1935 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1936 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1937 ifp->if_ioctl = rge_ioctl; 1938 ifp->if_start = rge_start; 1939 ifp->if_watchdog = rge_watchdog; 1940 ifp->if_init = rge_init; 1941 ifp->if_mtu = ETHERMTU; 1942 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE; 1943 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1944 IFQ_SET_READY(&ifp->if_snd); 1945 sc->active = 1; 1946 ifp->if_hwassist = 0; 1947 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING; 1948 ifp->if_capenable = ifp->if_capabilities; 1949 1950 /* Initialize the rge_softc */ 1951 sc->irq = gmac_conf->baseirq + priv->instance % 4; 1952 1953 /* Set the IRQ into the rid field */ 1954 /* 1955 * note this is a hack to pass the irq to the iodi interrupt setup 1956 * routines 1957 */ 1958 sc->rge_irq.__r_i = (struct resource_i *)sc->irq; 1959 1960 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE, 1961 NULL, rge_intr, sc, &sc->rge_intrhand); 1962 1963 if (ret) { 1964 rge_detach(dev); 1965 device_printf(sc->rge_dev, "couldn't set up irq\n"); 1966 RGE_LOCK_DESTROY(sc); 1967 goto out; 1968 } 1969 xlr_mac_get_hwaddr(sc); 1970 xlr_mac_setup_hwaddr(priv); 1971 1972 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n", 1973 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio, 1974 (u_long)sc->base_addr, priv->phy_addr, sc->irq); 1975 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4], 1976 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket); 1977 1978 /* 1979 * Set up ifmedia support. 1980 */ 1981 /* 1982 * Initialize MII/media info. 1983 */ 1984 sc->rge_mii.mii_ifp = ifp; 1985 sc->rge_mii.mii_readreg = rge_mii_read; 1986 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write; 1987 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg; 1988 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange, 1989 rmi_xlr_mac_mediastatus); 1990 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1991 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO); 1992 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media; 1993 1994 /* 1995 * Call MI attach routine. 1996 */ 1997 ether_ifattach(ifp, sc->dev_addr); 1998 1999 if (priv->type == XLR_GMAC) { 2000 rmi_xlr_gmac_init(priv); 2001 } else if (priv->type == XLR_XGMAC) { 2002 rmi_xlr_xgmac_init(priv); 2003 } 2004 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n", 2005 sc->unit, priv->mmio, sc->mtu); 2006 dev_mac[sc->unit] = sc; 2007 if (priv->type == XLR_XGMAC && priv->instance == 0) 2008 dev_mac_xgs0 = sc->unit; 2009 if (priv->type == XLR_GMAC && priv->instance == 0) 2010 dev_mac_gmac0 = sc->unit; 2011 2012 if (!gmac_common_init_done) { 2013 mac_common_init(); 2014 gmac_common_init_done = 1; 2015 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE); 2016 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL); 2017 callout_init(&rge_dbg_count, CALLOUT_MPSAFE); 2018 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL); 2019 } 2020 if ((ret = rmi_xlr_mac_open(sc)) == -1) { 2021 RGE_LOCK_DESTROY(sc); 2022 goto out; 2023 } 2024out: 2025 if (ret < 0) { 2026 device_printf(dev, "error - skipping\n"); 2027 } 2028 return ret; 2029} 2030 2031static void 2032rge_reset(struct rge_softc *sc) 2033{ 2034} 2035 2036static int 2037rge_detach(dev) 2038 device_t dev; 2039{ 2040#ifdef FREEBSD_MAC_NOT_YET 2041 struct rge_softc *sc; 2042 struct ifnet *ifp; 2043 2044 sc = device_get_softc(dev); 2045 ifp = sc->rge_ifp; 2046 2047 RGE_LOCK(sc); 2048 rge_stop(sc); 2049 rge_reset(sc); 2050 RGE_UNLOCK(sc); 2051 2052 ether_ifdetach(ifp); 2053 2054 if (sc->rge_tbi) { 2055 ifmedia_removeall(&sc->rge_ifmedia); 2056 } else { 2057 bus_generic_detach(dev); 2058 device_delete_child(dev, sc->rge_miibus); 2059 } 2060 2061 rge_release_resources(sc); 2062 2063#endif /* FREEBSD_MAC_NOT_YET */ 2064 return (0); 2065} 2066static int 2067rge_suspend(device_t dev) 2068{ 2069 struct rge_softc *sc; 2070 2071 sc = device_get_softc(dev); 2072 RGE_LOCK(sc); 2073 rge_stop(sc); 2074 RGE_UNLOCK(sc); 2075 2076 return 0; 2077} 2078 2079static int 2080rge_resume(device_t dev) 2081{ 2082 panic("rge_resume(): unimplemented\n"); 2083 return 0; 2084} 2085 2086static void 2087rge_release_resources(struct rge_softc *sc) 2088{ 2089 2090 if (sc->rge_ifp != NULL) 2091 if_free(sc->rge_ifp); 2092 2093 if (mtx_initialized(&sc->rge_mtx)) /* XXX */ 2094 RGE_LOCK_DESTROY(sc); 2095} 2096uint32_t gmac_rx_fail[32]; 2097uint32_t gmac_rx_pass[32]; 2098 2099#ifdef RX_COPY 2100static void 2101rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len) 2102{ 2103 /* 2104 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr - 2105 * XLR_CACHELINE_SIZE); 2106 */ 2107 struct mbuf *m; 2108 void *ptr; 2109 uint32_t *temp; 2110 struct ifnet *ifp = sc->rge_ifp; 2111 unsigned long msgrng_flags; 2112 int cpu = PCPU_GET(cpuid); 2113 2114 2115 temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); 2116 2117 ptr = (void *)(temp + XLR_CACHELINE_SIZE); 2118 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2119 if (m != NULL) { 2120 m->m_len = m->m_pkthdr.len = MCLBYTES; 2121 m_copyback(m, 0, len + BYTE_OFFSET, ptr); 2122 /* align the data */ 2123 m->m_data += BYTE_OFFSET; 2124 m->m_pkthdr.len = m->m_len = len; 2125 m->m_pkthdr.rcvif = ifp; 2126 gmac_rx_pass[cpu]++; 2127 } else { 2128 gmac_rx_fail[cpu]++; 2129 } 2130 msgrng_access_enable(msgrng_flags); 2131 xlr_mac_send_fr(&sc->priv, paddr, MAX_FRAME_SIZE); 2132 msgrng_access_disable(msgrng_flags); 2133 2134#ifdef DUMP_PACKETS 2135 { 2136 int i = 0; 2137 unsigned char *buf = (char *)m->m_data; 2138 2139 printf("Rx Packet: length=%d\n", len); 2140 for (i = 0; i < 64; i++) { 2141 if (i && (i % 16) == 0) 2142 printf("\n"); 2143 printf("%02x ", buf[i]); 2144 } 2145 printf("\n"); 2146 } 2147#endif 2148 2149 2150 if (m) { 2151 ifp->if_ipackets++; 2152 (*ifp->if_input) (ifp, m); 2153 } 2154} 2155 2156#else 2157static void 2158rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len) 2159{ 2160 /* 2161 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr - 2162 * XLR_CACHELINE_SIZE); 2163 */ 2164 struct mbuf *m; 2165 uint32_t *temp, tm, mag; 2166 2167 struct ifnet *ifp = sc->rge_ifp; 2168 2169 2170 temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); 2171 tm = temp[0]; 2172 mag = temp[1]; 2173 m = (struct mbuf *)tm; 2174 if (mag != 0xf00bad) { 2175 /* somebody else packet Error - FIXME in intialization */ 2176 printf("cpu %d: *ERROR* Not my packet paddr %p\n", xlr_cpu_id(), (void *)paddr); 2177 return; 2178 } 2179 /* align the data */ 2180 m->m_data += BYTE_OFFSET; 2181 m->m_pkthdr.len = m->m_len = len; 2182 m->m_pkthdr.rcvif = ifp; 2183 2184#ifdef DUMP_PACKETS 2185 { 2186 int i = 0; 2187 unsigned char *buf = (char *)m->m_data; 2188 2189 printf("Rx Packet: length=%d\n", len); 2190 for (i = 0; i < 64; i++) { 2191 if (i && (i % 16) == 0) 2192 printf("\n"); 2193 printf("%02x ", buf[i]); 2194 } 2195 printf("\n"); 2196 } 2197#endif 2198 ifp->if_ipackets++; 2199 (*ifp->if_input) (ifp, m); 2200} 2201 2202#endif 2203 2204static void 2205rge_intr(void *arg) 2206{ 2207 struct rge_softc *sc = (struct rge_softc *)arg; 2208 struct driver_data *priv = &(sc->priv); 2209 xlr_reg_t *mmio = priv->mmio; 2210 uint32_t intreg = xlr_read_reg(mmio, R_INTREG); 2211 2212 if (intreg & (1 << O_INTREG__MDInt)) { 2213 uint32_t phy_int_status = 0; 2214 int i = 0; 2215 2216 for (i = 0; i < XLR_MAX_MACS; i++) { 2217 struct rge_softc *phy_dev = 0; 2218 struct driver_data *phy_priv = 0; 2219 2220 phy_dev = dev_mac[i]; 2221 if (phy_dev == NULL) 2222 continue; 2223 2224 phy_priv = &phy_dev->priv; 2225 2226 if (phy_priv->type == XLR_XGMAC) 2227 continue; 2228 2229 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio, 2230 phy_priv->phy_addr, 26); 2231 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance, 2232 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status); 2233 rmi_xlr_gmac_config_speed(phy_priv); 2234 } 2235 } else { 2236 printf("[%s]: mac type = %d, instance %d error " 2237 "interrupt: INTREG = 0x%08x\n", 2238 __FUNCTION__, priv->type, priv->instance, intreg); 2239 } 2240 2241 /* clear all interrupts and hope to make progress */ 2242 xlr_write_reg(mmio, R_INTREG, 0xffffffff); 2243 2244 /* on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */ 2245 if ((xlr_revision_b0()) && (priv->type == XLR_XGMAC)) { 2246 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0]; 2247 struct driver_data *xgs0_priv = &xgs0_dev->priv; 2248 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio; 2249 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG); 2250 2251 if (xgs0_intreg) { 2252 printf("[%s]: mac type = %d, instance %d error " 2253 "interrupt: INTREG = 0x%08x\n", 2254 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg); 2255 2256 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff); 2257 } 2258 } 2259} 2260 2261static void 2262rge_start_locked(struct ifnet *ifp, int threshold) 2263{ 2264 struct rge_softc *sc = ifp->if_softc; 2265 struct mbuf *m = NULL; 2266 int prepend_pkt = 0; 2267 int i = 0; 2268 struct p2d_tx_desc *tx_desc = NULL; 2269 int cpu = xlr_cpu_id(); 2270 uint32_t vcpu = (cpu << 2) + xlr_thr_id(); 2271 2272 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2273 return; 2274 2275 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) { 2276 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2277 return; 2278 tx_desc = get_p2d_desc(); 2279 if (!tx_desc) { 2280 xlr_rge_get_p2d_failed[vcpu]++; 2281 return; 2282 } 2283 /* Grab a packet off the queue. */ 2284 IFQ_DEQUEUE(&ifp->if_snd, m); 2285 if (m == NULL) { 2286 free_p2d_desc(tx_desc); 2287 return; 2288 } 2289 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc); 2290 2291 if (prepend_pkt) { 2292 xlr_rge_tx_prepend[vcpu]++; 2293 IF_PREPEND(&ifp->if_snd, m); 2294 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2295 return; 2296 } else { 2297 ifp->if_opackets++; 2298 xlr_rge_tx_done[vcpu]++; 2299 } 2300 } 2301} 2302 2303static void 2304rge_start(struct ifnet *ifp) 2305{ 2306 rge_start_locked(ifp, RGE_TX_Q_SIZE); 2307} 2308 2309static int 2310rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2311{ 2312 struct rge_softc *sc = ifp->if_softc; 2313 struct ifreq *ifr = (struct ifreq *)data; 2314 int mask, error = 0; 2315 2316 /* struct mii_data *mii; */ 2317 switch (command) { 2318 case SIOCSIFMTU: 2319 ifp->if_mtu = ifr->ifr_mtu; 2320 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu); 2321 break; 2322 case SIOCSIFFLAGS: 2323 2324 RGE_LOCK(sc); 2325 if (ifp->if_flags & IFF_UP) { 2326 /* 2327 * If only the state of the PROMISC flag changed, 2328 * then just use the 'set promisc mode' command 2329 * instead of reinitializing the entire NIC. Doing a 2330 * full re-init means reloading the firmware and 2331 * waiting for it to start up, which may take a 2332 * second or two. Similarly for ALLMULTI. 2333 */ 2334 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2335 ifp->if_flags & IFF_PROMISC && 2336 !(sc->flags & IFF_PROMISC)) { 2337 sc->flags |= IFF_PROMISC; 2338 xlr_mac_set_rx_mode(sc); 2339 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2340 !(ifp->if_flags & IFF_PROMISC) && 2341 sc->flags & IFF_PROMISC) { 2342 sc->flags &= IFF_PROMISC; 2343 xlr_mac_set_rx_mode(sc); 2344 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2345 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) { 2346 rmi_xlr_mac_set_multicast_list(sc); 2347 } else 2348 xlr_mac_set_rx_mode(sc); 2349 } else { 2350 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2351 xlr_mac_set_rx_mode(sc); 2352 } 2353 } 2354 sc->flags = ifp->if_flags; 2355 RGE_UNLOCK(sc); 2356 error = 0; 2357 break; 2358 case SIOCADDMULTI: 2359 case SIOCDELMULTI: 2360 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2361 RGE_LOCK(sc); 2362 rmi_xlr_mac_set_multicast_list(sc); 2363 RGE_UNLOCK(sc); 2364 error = 0; 2365 } 2366 break; 2367 case SIOCSIFMEDIA: 2368 case SIOCGIFMEDIA: 2369 error = ifmedia_ioctl(ifp, ifr, 2370 &sc->rge_mii.mii_media, command); 2371 break; 2372 case SIOCSIFCAP: 2373 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2374 ifp->if_hwassist = 0; 2375 break; 2376 default: 2377 error = ether_ioctl(ifp, command, data); 2378 break; 2379 } 2380 2381 return (error); 2382} 2383 2384static void 2385rge_init(void *addr) 2386{ 2387 struct rge_softc *sc = (struct rge_softc *)addr; 2388 struct ifnet *ifp; 2389 struct driver_data *priv = &(sc->priv); 2390 2391 ifp = sc->rge_ifp; 2392 2393 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2394 return; 2395 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2396 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2397 2398 rmi_xlr_mac_set_enable(priv, 1); 2399} 2400 2401static void 2402rge_stop(struct rge_softc *sc) 2403{ 2404 rmi_xlr_mac_close(sc); 2405} 2406 2407static void 2408rge_watchdog(struct ifnet *sc) 2409{ 2410} 2411 2412static int 2413rge_shutdown(device_t dev) 2414{ 2415 struct rge_softc *sc; 2416 2417 sc = device_get_softc(dev); 2418 2419 RGE_LOCK(sc); 2420 rge_stop(sc); 2421 rge_reset(sc); 2422 RGE_UNLOCK(sc); 2423 2424 return (0); 2425} 2426 2427static int 2428rmi_xlr_mac_open(struct rge_softc *sc) 2429{ 2430 struct driver_data *priv = &(sc->priv); 2431 int i; 2432 2433 dbg_msg("IN\n"); 2434 2435 if (rmi_xlr_mac_fill_rxfr(sc)) { 2436 return -1; 2437 } 2438 mtx_lock_spin(&priv->lock); 2439 2440 xlr_mac_set_rx_mode(sc); 2441 2442 if (sc->unit == xlr_board_info.gmacports - 1) { 2443 printf("Enabling MDIO interrupts\n"); 2444 struct rge_softc *tmp = NULL; 2445 2446 for (i = 0; i < xlr_board_info.gmacports; i++) { 2447 tmp = dev_mac[i]; 2448 if (tmp) 2449 xlr_write_reg(tmp->priv.mmio, R_INTMASK, 2450 ((tmp->priv.instance == 0) << O_INTMASK__MDInt)); 2451 } 2452 } 2453 /* 2454 * Configure the speed, duplex, and flow control 2455 */ 2456 rmi_xlr_mac_set_speed(priv, priv->speed); 2457 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl); 2458 rmi_xlr_mac_set_enable(priv, 0); 2459 2460 mtx_unlock_spin(&priv->lock); 2461 2462 for (i = 0; i < 8; i++) { 2463 atomic_set_int(&(priv->frin_to_be_sent[i]), 0); 2464 } 2465 2466 return 0; 2467} 2468 2469/********************************************************************** 2470 **********************************************************************/ 2471static int 2472rmi_xlr_mac_close(struct rge_softc *sc) 2473{ 2474 struct driver_data *priv = &(sc->priv); 2475 2476 mtx_lock_spin(&priv->lock); 2477 2478 /* 2479 * There may have left over mbufs in the ring as well as in free in 2480 * they will be reused next time open is called 2481 */ 2482 2483 rmi_xlr_mac_set_enable(priv, 0); 2484 2485 xlr_inc_counter(NETIF_STOP_Q); 2486 port_inc_counter(priv->instance, PORT_STOPQ); 2487 2488 mtx_unlock_spin(&priv->lock); 2489 2490 return 0; 2491} 2492 2493/********************************************************************** 2494 **********************************************************************/ 2495static struct rge_softc_stats * 2496rmi_xlr_mac_get_stats(struct rge_softc *sc) 2497{ 2498 struct driver_data *priv = &(sc->priv); 2499 2500 /* unsigned long flags; */ 2501 2502 mtx_lock_spin(&priv->lock); 2503 2504 /* XXX update other stats here */ 2505 2506 mtx_unlock_spin(&priv->lock); 2507 2508 return &priv->stats; 2509} 2510 2511/********************************************************************** 2512 **********************************************************************/ 2513static void 2514rmi_xlr_mac_set_multicast_list(struct rge_softc *sc) 2515{ 2516} 2517 2518/********************************************************************** 2519 **********************************************************************/ 2520static int 2521rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu) 2522{ 2523 struct driver_data *priv = &(sc->priv); 2524 2525 if ((new_mtu > 9500) || (new_mtu < 64)) { 2526 return -EINVAL; 2527 } 2528 mtx_lock_spin(&priv->lock); 2529 2530 sc->mtu = new_mtu; 2531 2532 /* Disable MAC TX/RX */ 2533 rmi_xlr_mac_set_enable(priv, 0); 2534 2535 /* Flush RX FR IN */ 2536 /* Flush TX IN */ 2537 rmi_xlr_mac_set_enable(priv, 1); 2538 2539 mtx_unlock_spin(&priv->lock); 2540 return 0; 2541} 2542 2543/********************************************************************** 2544 **********************************************************************/ 2545static int 2546rmi_xlr_mac_fill_rxfr(struct rge_softc *sc) 2547{ 2548 struct driver_data *priv = &(sc->priv); 2549 unsigned long msgrng_flags; 2550 int i; 2551 int ret = 0; 2552 void *ptr; 2553 2554 dbg_msg("\n"); 2555 if (!priv->init_frin_desc) 2556 return ret; 2557 priv->init_frin_desc = 0; 2558 2559 dbg_msg("\n"); 2560 for (i = 0; i < MAX_NUM_DESC; i++) { 2561 ptr = get_buf(); 2562 if (!ptr) { 2563 ret = -ENOMEM; 2564 break; 2565 } 2566 /* Send the free Rx desc to the MAC */ 2567 msgrng_access_enable(msgrng_flags); 2568 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE); 2569 msgrng_access_disable(msgrng_flags); 2570 } 2571 2572 return ret; 2573} 2574 2575/********************************************************************** 2576 **********************************************************************/ 2577static __inline__ void * 2578rmi_xlr_config_spill(xlr_reg_t * mmio, 2579 int reg_start_0, int reg_start_1, 2580 int reg_size, int size) 2581{ 2582 uint32_t spill_size = size; 2583 void *spill = NULL; 2584 uint64_t phys_addr = 0; 2585 2586 2587 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF, 2588 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); 2589 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) { 2590 panic("Unable to allocate memory for spill area!\n"); 2591 } 2592 phys_addr = vtophys(spill); 2593 dbg_msg("Allocate spill %d bytes at %llx\n", size, phys_addr); 2594 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff); 2595 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07); 2596 xlr_write_reg(mmio, reg_size, spill_size); 2597 2598 return spill; 2599} 2600 2601static void 2602rmi_xlr_config_spill_area(struct driver_data *priv) 2603{ 2604 /* 2605 * if driver initialization is done parallely on multiple cpus 2606 * spill_configured needs synchronization 2607 */ 2608 if (priv->spill_configured) 2609 return; 2610 2611 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) { 2612 priv->spill_configured = 1; 2613 return; 2614 } 2615 priv->spill_configured = 1; 2616 2617 priv->frin_spill = 2618 rmi_xlr_config_spill(priv->mmio, 2619 R_REG_FRIN_SPILL_MEM_START_0, 2620 R_REG_FRIN_SPILL_MEM_START_1, 2621 R_REG_FRIN_SPILL_MEM_SIZE, 2622 MAX_FRIN_SPILL * 2623 sizeof(struct fr_desc)); 2624 2625 priv->class_0_spill = 2626 rmi_xlr_config_spill(priv->mmio, 2627 R_CLASS0_SPILL_MEM_START_0, 2628 R_CLASS0_SPILL_MEM_START_1, 2629 R_CLASS0_SPILL_MEM_SIZE, 2630 MAX_CLASS_0_SPILL * 2631 sizeof(union rx_tx_desc)); 2632 priv->class_1_spill = 2633 rmi_xlr_config_spill(priv->mmio, 2634 R_CLASS1_SPILL_MEM_START_0, 2635 R_CLASS1_SPILL_MEM_START_1, 2636 R_CLASS1_SPILL_MEM_SIZE, 2637 MAX_CLASS_1_SPILL * 2638 sizeof(union rx_tx_desc)); 2639 2640 priv->frout_spill = 2641 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0, 2642 R_FROUT_SPILL_MEM_START_1, 2643 R_FROUT_SPILL_MEM_SIZE, 2644 MAX_FROUT_SPILL * 2645 sizeof(struct fr_desc)); 2646 2647 priv->class_2_spill = 2648 rmi_xlr_config_spill(priv->mmio, 2649 R_CLASS2_SPILL_MEM_START_0, 2650 R_CLASS2_SPILL_MEM_START_1, 2651 R_CLASS2_SPILL_MEM_SIZE, 2652 MAX_CLASS_2_SPILL * 2653 sizeof(union rx_tx_desc)); 2654 priv->class_3_spill = 2655 rmi_xlr_config_spill(priv->mmio, 2656 R_CLASS3_SPILL_MEM_START_0, 2657 R_CLASS3_SPILL_MEM_START_1, 2658 R_CLASS3_SPILL_MEM_SIZE, 2659 MAX_CLASS_3_SPILL * 2660 sizeof(union rx_tx_desc)); 2661 priv->spill_configured = 1; 2662} 2663 2664/***************************************************************** 2665 * Write the MAC address to the XLR registers 2666 * All 4 addresses are the same for now 2667 *****************************************************************/ 2668static void 2669xlr_mac_setup_hwaddr(struct driver_data *priv) 2670{ 2671 struct rge_softc *sc = priv->sc; 2672 2673 xlr_write_reg(priv->mmio, R_MAC_ADDR0, 2674 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) 2675 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2])) 2676 ); 2677 2678 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1, 2679 ((sc->dev_addr[1] << 24) | (sc-> 2680 dev_addr[0] << 16))); 2681 2682 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff); 2683 2684 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff); 2685 2686 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff); 2687 2688 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff); 2689 2690 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, 2691 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | 2692 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | 2693 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID) 2694 ); 2695} 2696 2697/***************************************************************** 2698 * Read the MAC address from the XLR registers 2699 * All 4 addresses are the same for now 2700 *****************************************************************/ 2701static void 2702xlr_mac_get_hwaddr(struct rge_softc *sc) 2703{ 2704 struct driver_data *priv = &(sc->priv); 2705 2706 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff; 2707 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff; 2708 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff; 2709 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff; 2710 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff; 2711 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance; 2712} 2713 2714/***************************************************************** 2715 * Mac Module Initialization 2716 *****************************************************************/ 2717static void 2718mac_common_init(void) 2719{ 2720 init_p2d_allocation(); 2721 init_tx_ring(); 2722#ifdef RX_COPY 2723 init_rx_buf(); 2724#endif 2725 2726 if (xlr_board_info.is_xls) { 2727 if (register_msgring_handler(TX_STN_GMAC0, 2728 rmi_xlr_mac_msgring_handler, NULL)) { 2729 panic("Couldn't register msgring handler\n"); 2730 } 2731 if (register_msgring_handler(TX_STN_GMAC1, 2732 rmi_xlr_mac_msgring_handler, NULL)) { 2733 panic("Couldn't register msgring handler\n"); 2734 } 2735 } else { 2736 if (register_msgring_handler(TX_STN_GMAC, 2737 rmi_xlr_mac_msgring_handler, NULL)) { 2738 panic("Couldn't register msgring handler\n"); 2739 } 2740 } 2741 2742 /* 2743 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler 2744 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) { 2745 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); } 2746 * if (register_msgring_handler (TX_STN_XGS_1, 2747 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register 2748 * msgring handler for TX_STN_XGS_1\n"); } } 2749 */ 2750} 2751