1/*- 2 * Copyright (c) 2003-2012 Broadcom Corporation 3 * All Rights Reserved 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in 13 * the documentation and/or other materials provided with the 14 * distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: releng/10.3/sys/mips/nlm/dev/net/nae.c 261455 2014-02-04 03:36:42Z eadler $"); 31#include <sys/types.h> 32#include <sys/systm.h> 33 34#include <mips/nlm/hal/mips-extns.h> 35#include <mips/nlm/hal/haldefs.h> 36#include <mips/nlm/hal/iomap.h> 37#include <mips/nlm/hal/sys.h> 38#include <mips/nlm/hal/nae.h> 39#include <mips/nlm/hal/mdio.h> 40#include <mips/nlm/hal/sgmii.h> 41#include <mips/nlm/hal/xaui.h> 42 43#include <mips/nlm/board.h> 44#include <mips/nlm/xlp.h> 45 46void 47nlm_nae_flush_free_fifo(uint64_t nae_base, int nblocks) 48{ 49 uint32_t data, fifo_mask; 50 51 fifo_mask = (1 << (4 * nblocks)) - 1; 52 53 nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP, fifo_mask); 54 do { 55 data = nlm_read_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP); 56 } while (data != fifo_mask); 57 58 nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP, 0); 59} 60 61void 62nlm_program_nae_parser_seq_fifo(uint64_t nae_base, int maxports, 63 struct nae_port_config *cfg) 64{ 65 uint32_t val; 66 int start = 0, size, i; 67 68 for (i = 0; i < maxports; i++) { 69 size = cfg[i].pseq_fifo_size; 70 val = (((size & 0x1fff) << 17) | 71 ((start & 0xfff) << 5) | 72 (i & 0x1f)); 73 nlm_write_nae_reg(nae_base, NAE_PARSER_SEQ_FIFO_CFG, val); 74 start += size; 75 } 76} 77 78void 79nlm_setup_rx_cal_cfg(uint64_t nae_base, int total_num_ports, 80 struct nae_port_config *cfg) 81{ 82 int rx_slots = 0, port; 83 int cal_len, cal = 0, last_free = 0; 84 uint32_t val; 85 86 for (port = 0; port < total_num_ports; port++) { 87 if (cfg[port].rx_slots_reqd) 88 rx_slots += cfg[port].rx_slots_reqd; 89 if (rx_slots > MAX_CAL_SLOTS) { 90 rx_slots = MAX_CAL_SLOTS; 91 break; 92 } 93 } 94 95 cal_len = rx_slots - 1; 96 97 do { 98 if (cal >= MAX_CAL_SLOTS) 99 break; 100 last_free = cal; 101 for (port = 0; port < total_num_ports; port++) { 102 if (cfg[port].rx_slots_reqd > 0) { 103 val = (cal_len << 16) | (port << 8) | cal; 104 nlm_write_nae_reg(nae_base, 105 NAE_RX_IF_SLOT_CAL, val); 106 cal++; 107 cfg[port].rx_slots_reqd--; 108 } 109 } 110 if (last_free == cal) 111 break; 112 } while (1); 113} 114 115void 116nlm_setup_tx_cal_cfg(uint64_t nae_base, int total_num_ports, 117 struct nae_port_config *cfg) 118{ 119 int tx_slots = 0, port; 120 int cal = 0, last_free = 0; 121 uint32_t val; 122 123 for (port = 0; port < total_num_ports; port++) { 124 if (cfg[port].tx_slots_reqd) 125 tx_slots += cfg[port].tx_slots_reqd; 126 if (tx_slots > MAX_CAL_SLOTS) { 127 tx_slots = MAX_CAL_SLOTS; 128 break; 129 } 130 } 131 132 nlm_write_nae_reg(nae_base, NAE_EGR_NIOR_CAL_LEN_REG, tx_slots - 1); 133 do { 134 if (cal >= MAX_CAL_SLOTS) 135 break; 136 last_free = cal; 137 for (port = 0; port < total_num_ports; port++) { 138 if (cfg[port].tx_slots_reqd > 0) { 139 val = (port << 7) | (cal << 1) | 1; 140 nlm_write_nae_reg(nae_base, 141 NAE_EGR_NIOR_CRDT_CAL_PROG, val); 142 cal++; 143 cfg[port].tx_slots_reqd--; 144 } 145 } 146 if (last_free == cal) 147 break; 148 } while (1); 149} 150 151void 152nlm_deflate_frin_fifo_carving(uint64_t nae_base, int total_num_ports) 153{ 154 const int minimum_size = 8; 155 uint32_t value; 156 int intf, start; 157 158 for (intf = 0; intf < total_num_ports; intf++) { 159 start = minimum_size * intf; 160 value = (minimum_size << 20) | (start << 8) | (intf); 161 nlm_write_nae_reg(nae_base, NAE_FREE_IN_FIFO_CFG, value); 162 } 163} 164 165void 166nlm_reset_nae(int node) 167{ 168 uint64_t sysbase; 169 uint64_t nae_base; 170 uint64_t nae_pcibase; 171 uint32_t rx_config; 172 uint32_t bar0; 173 int reset_bit; 174 175 sysbase = nlm_get_sys_regbase(node); 176 nae_base = nlm_get_nae_regbase(node); 177 nae_pcibase = nlm_get_nae_pcibase(node); 178 179 bar0 = nlm_read_pci_reg(nae_pcibase, XLP_PCI_CFGREG4); 180 181#if BYTE_ORDER == LITTLE_ENDIAN 182 if (nlm_is_xlp8xx_ax()) { 183 uint8_t val; 184 /* membar fixup */ 185 val = (bar0 >> 24) & 0xff; 186 bar0 = (val << 24) | (val << 16) | (val << 8) | val; 187 } 188#endif 189 190 if (nlm_is_xlp3xx()) 191 reset_bit = 6; 192 else 193 reset_bit = 9; 194 195 /* Reset NAE */ 196 nlm_write_sys_reg(sysbase, SYS_RESET, (1 << reset_bit)); 197 198 /* XXXJC - 1s delay here may be too high */ 199 DELAY(1000000); 200 nlm_write_sys_reg(sysbase, SYS_RESET, (0 << reset_bit)); 201 DELAY(1000000); 202 203 rx_config = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); 204 nlm_write_pci_reg(nae_pcibase, XLP_PCI_CFGREG4, bar0); 205} 206 207void 208nlm_setup_poe_class_config(uint64_t nae_base, int max_poe_classes, 209 int num_contexts, int *poe_cl_tbl) 210{ 211 uint32_t val; 212 int i, max_poe_class_ctxt_tbl_sz; 213 214 max_poe_class_ctxt_tbl_sz = num_contexts/max_poe_classes; 215 for (i = 0; i < max_poe_class_ctxt_tbl_sz; i++) { 216 val = (poe_cl_tbl[(i/max_poe_classes) & 0x7] << 8) | i; 217 nlm_write_nae_reg(nae_base, NAE_POE_CLASS_SETUP_CFG, val); 218 } 219} 220 221void 222nlm_setup_vfbid_mapping(uint64_t nae_base) 223{ 224 uint32_t val; 225 int dest_vc, vfbid; 226 227 /* 127 is max vfbid */ 228 for (vfbid = 127; vfbid >= 0; vfbid--) { 229 dest_vc = nlm_get_vfbid_mapping(vfbid); 230 if (dest_vc < 0) 231 continue; 232 val = (dest_vc << 16) | (vfbid << 4) | 1; 233 nlm_write_nae_reg(nae_base, NAE_VFBID_DESTMAP_CMD, val); 234 } 235} 236 237void 238nlm_setup_flow_crc_poly(uint64_t nae_base, uint32_t poly) 239{ 240 nlm_write_nae_reg(nae_base, NAE_FLOW_CRC16_POLY_CFG, poly); 241} 242 243void 244nlm_setup_iface_fifo_cfg(uint64_t nae_base, int maxports, 245 struct nae_port_config *cfg) 246{ 247 uint32_t reg; 248 int fifo_xoff_thresh = 12; 249 int i, size; 250 int cur_iface_start = 0; 251 252 for (i = 0; i < maxports; i++) { 253 size = cfg[i].iface_fifo_size; 254 reg = ((fifo_xoff_thresh << 25) | 255 ((size & 0x1ff) << 16) | 256 ((cur_iface_start & 0xff) << 8) | 257 (i & 0x1f)); 258 nlm_write_nae_reg(nae_base, NAE_IFACE_FIFO_CFG, reg); 259 cur_iface_start += size; 260 } 261} 262 263void 264nlm_setup_rx_base_config(uint64_t nae_base, int maxports, 265 struct nae_port_config *cfg) 266{ 267 int base = 0; 268 uint32_t val; 269 int i; 270 int id; 271 272 for (i = 0; i < (maxports/2); i++) { 273 id = 0x12 + i; /* RX_IF_BASE_CONFIG0 */ 274 275 val = (base & 0x3ff); 276 base += cfg[(i * 2)].num_channels; 277 278 val |= ((base & 0x3ff) << 16); 279 base += cfg[(i * 2) + 1].num_channels; 280 281 nlm_write_nae_reg(nae_base, NAE_REG(7, 0, id), val); 282 } 283} 284 285void 286nlm_setup_rx_buf_config(uint64_t nae_base, int maxports, 287 struct nae_port_config *cfg) 288{ 289 uint32_t val; 290 int i, sz, k; 291 int context = 0; 292 int base = 0; 293 294 for (i = 0; i < maxports; i++) { 295 if (cfg[i].type == UNKNOWN) 296 continue; 297 for (k = 0; k < cfg[i].num_channels; k++) { 298 /* write index (context num) */ 299 nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH_ADDR, 300 (context+k)); 301 302 /* write value (rx buf sizes) */ 303 sz = cfg[i].rxbuf_size; 304 val = 0x80000000 | ((base << 2) & 0x3fff); /* base */ 305 val |= (((sz << 2) & 0x3fff) << 16); /* size */ 306 307 nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH, val); 308 nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH, 309 (0x7fffffff & val)); 310 base += sz; 311 } 312 context += cfg[i].num_channels; 313 } 314} 315 316void 317nlm_setup_freein_fifo_cfg(uint64_t nae_base, struct nae_port_config *cfg) 318{ 319 int size, i; 320 uint32_t reg; 321 int start = 0, maxbufpool; 322 323 if (nlm_is_xlp8xx()) 324 maxbufpool = MAX_FREE_FIFO_POOL_8XX; 325 else 326 maxbufpool = MAX_FREE_FIFO_POOL_3XX; 327 for (i = 0; i < maxbufpool; i++) { 328 /* Each entry represents 2 descs; hence division by 2 */ 329 size = (cfg[i].num_free_descs / 2); 330 if (size == 0) 331 size = 8; 332 reg = ((size & 0x3ff ) << 20) | /* fcSize */ 333 ((start & 0x1ff) << 8) | /* fcStart */ 334 (i & 0x1f); 335 336 nlm_write_nae_reg(nae_base, NAE_FREE_IN_FIFO_CFG, reg); 337 start += size; 338 } 339} 340 341/* XXX function name */ 342int 343nlm_get_flow_mask(int num_ports) 344{ 345 const int max_bits = 5; /* upto 32 ports */ 346 int i; 347 348 /* Compute the number of bits to needed to 349 * represent all the ports */ 350 for (i = 0; i < max_bits; i++) { 351 if (num_ports <= (2 << i)) 352 return (i + 1); 353 } 354 return (max_bits); 355} 356 357void 358nlm_program_flow_cfg(uint64_t nae_base, int port, 359 uint32_t cur_flow_base, uint32_t flow_mask) 360{ 361 uint32_t val; 362 363 val = (cur_flow_base << 16) | port; 364 val |= ((flow_mask & 0x1f) << 8); 365 nlm_write_nae_reg(nae_base, NAE_FLOW_BASEMASK_CFG, val); 366} 367 368void 369xlp_ax_nae_lane_reset_txpll(uint64_t nae_base, int block, int lane_ctrl, 370 int mode) 371{ 372 uint32_t val = 0, saved_data; 373 int rext_sel = 0; 374 375 val = PHY_LANE_CTRL_RST | 376 PHY_LANE_CTRL_PWRDOWN | 377 (mode << PHY_LANE_CTRL_PHYMODE_POS); 378 379 /* set comma bypass for XAUI */ 380 if (mode != PHYMODE_SGMII) 381 val |= PHY_LANE_CTRL_BPC_XAUI; 382 383 nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); 384 385 if (lane_ctrl != 4) { 386 rext_sel = (1 << 23); 387 if (mode != PHYMODE_SGMII) 388 rext_sel |= PHY_LANE_CTRL_BPC_XAUI; 389 390 val = nlm_read_nae_reg(nae_base, 391 NAE_REG(block, PHY, lane_ctrl)); 392 val &= ~PHY_LANE_CTRL_RST; 393 val |= rext_sel; 394 395 /* Resetting PMA for non-zero lanes */ 396 nlm_write_nae_reg(nae_base, 397 NAE_REG(block, PHY, lane_ctrl), val); 398 399 DELAY(20000); /* 20 ms delay, XXXJC: needed? */ 400 401 val |= PHY_LANE_CTRL_RST; 402 nlm_write_nae_reg(nae_base, 403 NAE_REG(block, PHY, lane_ctrl), val); 404 405 val = 0; 406 } 407 408 /* Come out of reset for TXPLL */ 409 saved_data = nlm_read_nae_reg(nae_base, 410 NAE_REG(block, PHY, lane_ctrl)) & 0xFFC00000; 411 412 nlm_write_nae_reg(nae_base, 413 NAE_REG(block, PHY, lane_ctrl), 414 (0x66 << PHY_LANE_CTRL_ADDR_POS) 415 | PHY_LANE_CTRL_CMD_READ 416 | PHY_LANE_CTRL_CMD_START 417 | PHY_LANE_CTRL_RST 418 | rext_sel 419 | val ); 420 421 while (((val = nlm_read_nae_reg(nae_base, 422 NAE_REG(block, PHY, lane_ctrl))) & 423 PHY_LANE_CTRL_CMD_PENDING)); 424 425 val &= 0xFF; 426 /* set bit[4] to 0 */ 427 val &= ~(1 << 4); 428 nlm_write_nae_reg(nae_base, 429 NAE_REG(block, PHY, lane_ctrl), 430 (0x66 << PHY_LANE_CTRL_ADDR_POS) 431 | PHY_LANE_CTRL_CMD_WRITE 432 | PHY_LANE_CTRL_CMD_START 433 | (0x0 << 19) /* (0x4 << 19) */ 434 | rext_sel 435 | saved_data 436 | val ); 437 438 /* re-do */ 439 nlm_write_nae_reg(nae_base, 440 NAE_REG(block, PHY, lane_ctrl), 441 (0x66 << PHY_LANE_CTRL_ADDR_POS) 442 | PHY_LANE_CTRL_CMD_WRITE 443 | PHY_LANE_CTRL_CMD_START 444 | (0x0 << 19) /* (0x4 << 19) */ 445 | rext_sel 446 | saved_data 447 | val ); 448 449 while (!((val = nlm_read_nae_reg(nae_base, 450 NAE_REG(block, PHY, (lane_ctrl - PHY_LANE_0_CTRL)))) & 451 PHY_LANE_STAT_PCR)); 452 453 /* Clear the Power Down bit */ 454 val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); 455 val &= ~((1 << 29) | (0x7ffff)); 456 nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), 457 (rext_sel | val)); 458} 459 460void 461xlp_nae_lane_reset_txpll(uint64_t nae_base, int block, int lane_ctrl, 462 int mode) 463{ 464 uint32_t val = 0; 465 int rext_sel = 0; 466 467 if (lane_ctrl != 4) 468 rext_sel = (1 << 23); 469 470 val = nlm_read_nae_reg(nae_base, 471 NAE_REG(block, PHY, lane_ctrl)); 472 473 /* set comma bypass for XAUI */ 474 if (mode != PHYMODE_SGMII) 475 val |= PHY_LANE_CTRL_BPC_XAUI; 476 val |= 0x100000; 477 val |= (mode << PHY_LANE_CTRL_PHYMODE_POS); 478 val &= ~(0x20000); 479 nlm_write_nae_reg(nae_base, 480 NAE_REG(block, PHY, lane_ctrl), val); 481 482 val = nlm_read_nae_reg(nae_base, 483 NAE_REG(block, PHY, lane_ctrl)); 484 val |= 0x40000000; 485 nlm_write_nae_reg(nae_base, 486 NAE_REG(block, PHY, lane_ctrl), val); 487 488 /* clear the power down bit */ 489 val = nlm_read_nae_reg(nae_base, 490 NAE_REG(block, PHY, lane_ctrl)); 491 val &= ~( (1 << 29) | (0x7ffff)); 492 nlm_write_nae_reg(nae_base, 493 NAE_REG(block, PHY, lane_ctrl), rext_sel | val); 494} 495 496void 497xlp_nae_config_lane_gmac(uint64_t nae_base, int cplx_mask) 498{ 499 int block, lane_ctrl; 500 int cplx_lane_enable; 501 int lane_enable = 0; 502 503 cplx_lane_enable = LM_SGMII | 504 (LM_SGMII << 4) | 505 (LM_SGMII << 8) | 506 (LM_SGMII << 12); 507 508 /* Lane mode progamming */ 509 block = 7; 510 511 /* Complexes 0, 1 */ 512 if (cplx_mask & 0x1) 513 lane_enable |= cplx_lane_enable; 514 515 if (cplx_mask & 0x2) 516 lane_enable |= (cplx_lane_enable << 16); 517 518 if (lane_enable) { 519 nlm_write_nae_reg(nae_base, 520 NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_0_1), 521 lane_enable); 522 lane_enable = 0; 523 } 524 /* Complexes 2 3 */ 525 if (cplx_mask & 0x4) 526 lane_enable |= cplx_lane_enable; 527 528 if (cplx_mask & 0x8) 529 lane_enable |= (cplx_lane_enable << 16); 530 531 nlm_write_nae_reg(nae_base, 532 NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_2_3), 533 lane_enable); 534 535 /* complex 4 */ 536 /* XXXJC : fix duplicate code */ 537 if (cplx_mask & 0x10) { 538 nlm_write_nae_reg(nae_base, 539 NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_4), 540 ((LM_SGMII << 4) | LM_SGMII)); 541 for (lane_ctrl = PHY_LANE_0_CTRL; 542 lane_ctrl <= PHY_LANE_1_CTRL; lane_ctrl++) { 543 if (!nlm_is_xlp8xx_ax()) 544 xlp_nae_lane_reset_txpll(nae_base, 545 4, lane_ctrl, PHYMODE_SGMII); 546 else 547 xlp_ax_nae_lane_reset_txpll(nae_base, 4, 548 lane_ctrl, PHYMODE_SGMII); 549 } 550 } 551 552 for (block = 0; block < 4; block++) { 553 if ((cplx_mask & (1 << block)) == 0) 554 continue; 555 556 for (lane_ctrl = PHY_LANE_0_CTRL; 557 lane_ctrl <= PHY_LANE_3_CTRL; lane_ctrl++) { 558 if (!nlm_is_xlp8xx_ax()) 559 xlp_nae_lane_reset_txpll(nae_base, 560 block, lane_ctrl, PHYMODE_SGMII); 561 else 562 xlp_ax_nae_lane_reset_txpll(nae_base, block, 563 lane_ctrl, PHYMODE_SGMII); 564 } 565 } 566} 567 568void 569config_egress_fifo_carvings(uint64_t nae_base, int hwport, int start_ctxt, 570 int num_ctxts, int max_ctxts, struct nae_port_config *cfg) 571{ 572 static uint32_t cur_start[6] = {0, 0, 0, 0, 0, 0}; 573 uint32_t data = 0; 574 uint32_t start = 0, size, offset; 575 int i, limit; 576 577 limit = start_ctxt + num_ctxts; 578 /* Stage 2 FIFO */ 579 start = cur_start[0]; 580 for (i = start_ctxt; i < limit; i++) { 581 size = cfg[hwport].stg2_fifo_size / max_ctxts; 582 if (size) 583 offset = size - 1; 584 else 585 offset = size; 586 if (offset > cfg[hwport].max_stg2_offset) 587 offset = cfg[hwport].max_stg2_offset; 588 data = offset << 23 | 589 start << 11 | 590 i << 1 | 591 1; 592 nlm_write_nae_reg(nae_base, NAE_STG2_PMEM_PROG, data); 593 start += size; 594 } 595 cur_start[0] = start; 596 597 /* EH FIFO */ 598 start = cur_start[1]; 599 for (i = start_ctxt; i < limit; i++) { 600 size = cfg[hwport].eh_fifo_size / max_ctxts; 601 if (size) 602 offset = size - 1; 603 else 604 offset = size ; 605 if (offset > cfg[hwport].max_eh_offset) 606 offset = cfg[hwport].max_eh_offset; 607 data = offset << 23 | 608 start << 11 | 609 i << 1 | 610 1; 611 nlm_write_nae_reg(nae_base, NAE_EH_PMEM_PROG, data); 612 start += size; 613 } 614 cur_start[1] = start; 615 616 /* FROUT FIFO */ 617 start = cur_start[2]; 618 for (i = start_ctxt; i < limit; i++) { 619 size = cfg[hwport].frout_fifo_size / max_ctxts; 620 if (size) 621 offset = size - 1; 622 else 623 offset = size ; 624 if (offset > cfg[hwport].max_frout_offset) 625 offset = cfg[hwport].max_frout_offset; 626 data = offset << 23 | 627 start << 11 | 628 i << 1 | 629 1; 630 nlm_write_nae_reg(nae_base, NAE_FREE_PMEM_PROG, data); 631 start += size; 632 } 633 cur_start[2] = start; 634 635 /* MS FIFO */ 636 start = cur_start[3]; 637 for (i = start_ctxt; i < limit; i++) { 638 size = cfg[hwport].ms_fifo_size / max_ctxts; 639 if (size) 640 offset = size - 1; 641 else 642 offset = size ; 643 if (offset > cfg[hwport].max_ms_offset) 644 offset = cfg[hwport].max_ms_offset; 645 data = offset << 22 | /* FIXME in PRM */ 646 start << 11 | 647 i << 1 | 648 1; 649 nlm_write_nae_reg(nae_base, NAE_STR_PMEM_CMD, data); 650 start += size; 651 } 652 cur_start[3] = start; 653 654 /* PKT FIFO */ 655 start = cur_start[4]; 656 for (i = start_ctxt; i < limit; i++) { 657 size = cfg[hwport].pkt_fifo_size / max_ctxts; 658 if (size) 659 offset = size - 1; 660 else 661 offset = size ; 662 if (offset > cfg[hwport].max_pmem_offset) 663 offset = cfg[hwport].max_pmem_offset; 664 nlm_write_nae_reg(nae_base, NAE_TX_PKT_PMEM_CMD1, offset); 665 666 data = start << 11 | 667 i << 1 | 668 1; 669 nlm_write_nae_reg(nae_base, NAE_TX_PKT_PMEM_CMD0, data); 670 start += size; 671 } 672 cur_start[4] = start; 673 674 /* PKT LEN FIFO */ 675 start = cur_start[5]; 676 for (i = start_ctxt; i < limit; i++) { 677 size = cfg[hwport].pktlen_fifo_size / max_ctxts; 678 if (size) 679 offset = size - 1; 680 else 681 offset = size ; 682 data = offset << 22 | 683 start << 11 | 684 i << 1 | 685 1; 686 nlm_write_nae_reg(nae_base, NAE_TX_PKTLEN_PMEM_CMD, data); 687 start += size; 688 } 689 cur_start[5] = start; 690} 691 692void 693config_egress_fifo_credits(uint64_t nae_base, int hwport, int start_ctxt, 694 int num_ctxts, int max_ctxts, struct nae_port_config *cfg) 695{ 696 uint32_t data, credit, max_credit; 697 int i, limit; 698 699 limit = start_ctxt + num_ctxts; 700 /* Stage1 -> Stage2 */ 701 max_credit = cfg[hwport].max_stg2_offset + 1; 702 for (i = start_ctxt; i < limit; i++) { 703 credit = cfg[hwport].stg1_2_credit / max_ctxts; 704 if (credit > max_credit) 705 credit = max_credit; 706 data = credit << 16 | 707 i << 4 | 708 1; 709 nlm_write_nae_reg(nae_base, NAE_STG1_STG2CRDT_CMD, data); 710 } 711 712 /* Stage2 -> EH */ 713 max_credit = cfg[hwport].max_eh_offset + 1; 714 for (i = start_ctxt; i < limit; i++) { 715 credit = cfg[hwport].stg2_eh_credit / max_ctxts; 716 if (credit > max_credit) 717 credit = max_credit; 718 data = credit << 16 | 719 i << 4 | 720 1; 721 nlm_write_nae_reg(nae_base, NAE_STG2_EHCRDT_CMD, data); 722 } 723 724 /* Stage2 -> Frout */ 725 max_credit = cfg[hwport].max_frout_offset + 1; 726 for (i = start_ctxt; i < limit; i++) { 727 credit = cfg[hwport].stg2_frout_credit / max_ctxts; 728 if (credit > max_credit) 729 credit = max_credit; 730 data = credit << 16 | 731 i << 4 | 732 1; 733 nlm_write_nae_reg(nae_base, NAE_EH_FREECRDT_CMD, data); 734 } 735 736 /* Stage2 -> MS */ 737 max_credit = cfg[hwport].max_ms_offset + 1; 738 for (i = start_ctxt; i < limit; i++) { 739 credit = cfg[hwport].stg2_ms_credit / max_ctxts; 740 if (credit > max_credit) 741 credit = max_credit; 742 data = credit << 16 | 743 i << 4 | 744 1; 745 nlm_write_nae_reg(nae_base, NAE_STG2_STRCRDT_CMD, data); 746 } 747} 748 749void 750nlm_config_freein_fifo_uniq_cfg(uint64_t nae_base, int port, 751 int nblock_free_desc) 752{ 753 uint32_t val; 754 int size_in_clines; 755 756 size_in_clines = (nblock_free_desc / NAE_CACHELINE_SIZE); 757 val = (size_in_clines << 8) | (port & 0x1f); 758 nlm_write_nae_reg(nae_base, NAE_FREEIN_FIFO_UNIQ_SZ_CFG, val); 759} 760 761/* XXXJC: redundant, see ucore_spray_config() */ 762void 763nlm_config_ucore_iface_mask_cfg(uint64_t nae_base, int port, 764 int nblock_ucore_mask) 765{ 766 uint32_t val; 767 768 val = ( 0x1U << 31) | ((nblock_ucore_mask & 0xffff) << 8) | 769 (port & 0x1f); 770 nlm_write_nae_reg(nae_base, NAE_UCORE_IFACEMASK_CFG, val); 771} 772 773int 774nlm_nae_init_netior(uint64_t nae_base, int nblocks) 775{ 776 uint32_t ctrl1, ctrl2, ctrl3; 777 778 if (nblocks == 5) 779 ctrl3 = 0x07 << 18; 780 else 781 ctrl3 = 0; 782 783 switch (nblocks) { 784 case 2: 785 ctrl1 = 0xff; 786 ctrl2 = 0x0707; 787 break; 788 case 4: 789 case 5: 790 ctrl1 = 0xfffff; 791 ctrl2 = 0x07070707; 792 break; 793 default: 794 printf("WARNING: unsupported blocks %d\n", nblocks); 795 return (-1); 796 } 797 798 nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0); 799 nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL3, ctrl3); 800 nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL2, ctrl2); 801 nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL1, ctrl1); 802 nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL1, 0x0); 803 return (0); 804} 805 806void 807nlm_nae_init_ingress(uint64_t nae_base, uint32_t desc_size) 808{ 809 uint32_t rx_cfg; 810 uint32_t parser_threshold = 384; 811 812 rx_cfg = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); 813 rx_cfg &= ~(0x3 << 1); /* reset max message size */ 814 rx_cfg &= ~(0xff << 4); /* clear freein desc cluster size */ 815 rx_cfg &= ~(0x3f << 24); /* reset rx status mask */ /*XXX: why not 7f */ 816 817 rx_cfg |= 1; /* rx enable */ 818 rx_cfg |= (0x0 << 1); /* max message size */ 819 rx_cfg |= (0x43 & 0x7f) << 24; /* rx status mask */ 820 rx_cfg |= ((desc_size / 64) & 0xff) << 4; /* freein desc cluster size */ 821 nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, rx_cfg); 822 nlm_write_nae_reg(nae_base, NAE_PARSER_CONFIG, 823 (parser_threshold & 0x3ff) | 824 (((parser_threshold / desc_size) + 1) & 0xff) << 12 | 825 (((parser_threshold / 64) % desc_size) & 0xff) << 20); 826 827 /*nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_THRESH, 33);*/ 828} 829 830void 831nlm_nae_init_egress(uint64_t nae_base) 832{ 833 uint32_t tx_cfg; 834 835 tx_cfg = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG); 836 if (!nlm_is_xlp8xx_ax()) { 837 nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, 838 tx_cfg | 839 0x1 | /* tx enable */ 840 0x2 | /* tx ace */ 841 0x4 | /* tx compatible */ 842 (1 << 3)); 843 } else { 844 nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, 845 tx_cfg | 846 0x1 | /* tx enable */ 847 0x2); /* tx ace */ 848 } 849} 850 851uint32_t 852ucore_spray_config(uint32_t interface, uint32_t ucore_mask, int cmd) 853{ 854 return ((cmd & 0x1) << 31) | ((ucore_mask & 0xffff) << 8) | 855 (interface & 0x1f); 856} 857 858void 859nlm_nae_init_ucore(uint64_t nae_base, int if_num, u_int ucore_mask) 860{ 861 uint32_t ucfg; 862 863 ucfg = ucore_spray_config(if_num, ucore_mask, 1); /* 1 : write */ 864 nlm_write_nae_reg(nae_base, NAE_UCORE_IFACEMASK_CFG, ucfg); 865} 866 867uint64_t 868nae_tx_desc(u_int type, u_int rdex, u_int fbid, u_int len, uint64_t addr) 869{ 870 return ((uint64_t)type << 62) | 871 ((uint64_t)rdex << 61) | 872 ((uint64_t)fbid << 54) | 873 ((uint64_t)len << 40) | addr; 874} 875 876void 877nlm_setup_l2type(uint64_t nae_base, int hwport, uint32_t l2extlen, 878 uint32_t l2extoff, uint32_t extra_hdrsize, uint32_t proto_offset, 879 uint32_t fixed_hdroff, uint32_t l2proto) 880{ 881 uint32_t val; 882 883 val = ((l2extlen & 0x3f) << 26) | 884 ((l2extoff & 0x3f) << 20) | 885 ((extra_hdrsize & 0x3f) << 14) | 886 ((proto_offset & 0x3f) << 8) | 887 ((fixed_hdroff & 0x3f) << 2) | 888 (l2proto & 0x3); 889 nlm_write_nae_reg(nae_base, (NAE_L2_TYPE_PORT0 + hwport), val); 890} 891 892void 893nlm_setup_l3ctable_mask(uint64_t nae_base, int hwport, uint32_t ptmask, 894 uint32_t l3portmask) 895{ 896 uint32_t val; 897 898 val = ((ptmask & 0x1) << 6) | 899 ((l3portmask & 0x1) << 5) | 900 (hwport & 0x1f); 901 nlm_write_nae_reg(nae_base, NAE_L3_CTABLE_MASK0, val); 902} 903 904void 905nlm_setup_l3ctable_even(uint64_t nae_base, int entry, uint32_t l3hdroff, 906 uint32_t ipcsum_en, uint32_t l4protooff, 907 uint32_t l2proto, uint32_t eth_type) 908{ 909 uint32_t val; 910 911 val = ((l3hdroff & 0x3f) << 26) | 912 ((l4protooff & 0x3f) << 20) | 913 ((ipcsum_en & 0x1) << 18) | 914 ((l2proto & 0x3) << 16) | 915 (eth_type & 0xffff); 916 nlm_write_nae_reg(nae_base, (NAE_L3CTABLE0 + (entry * 2)), val); 917} 918 919void 920nlm_setup_l3ctable_odd(uint64_t nae_base, int entry, uint32_t l3off0, 921 uint32_t l3len0, uint32_t l3off1, uint32_t l3len1, 922 uint32_t l3off2, uint32_t l3len2) 923{ 924 uint32_t val; 925 926 val = ((l3off0 & 0x3f) << 26) | 927 ((l3len0 & 0x1f) << 21) | 928 ((l3off1 & 0x3f) << 15) | 929 ((l3len1 & 0x1f) << 10) | 930 ((l3off2 & 0x3f) << 4) | 931 (l3len2 & 0xf); 932 nlm_write_nae_reg(nae_base, (NAE_L3CTABLE0 + ((entry * 2) + 1)), val); 933} 934 935void 936nlm_setup_l4ctable_even(uint64_t nae_base, int entry, uint32_t im, 937 uint32_t l3cm, uint32_t l4pm, uint32_t port, 938 uint32_t l3camaddr, uint32_t l4proto) 939{ 940 uint32_t val; 941 942 val = ((im & 0x1) << 19) | 943 ((l3cm & 0x1) << 18) | 944 ((l4pm & 0x1) << 17) | 945 ((port & 0x1f) << 12) | 946 ((l3camaddr & 0xf) << 8) | 947 (l4proto & 0xff); 948 nlm_write_nae_reg(nae_base, (NAE_L4CTABLE0 + (entry * 2)), val); 949} 950 951void 952nlm_setup_l4ctable_odd(uint64_t nae_base, int entry, uint32_t l4off0, 953 uint32_t l4len0, uint32_t l4off1, uint32_t l4len1) 954{ 955 uint32_t val; 956 957 val = ((l4off0 & 0x3f) << 21) | 958 ((l4len0 & 0xf) << 17) | 959 ((l4off1 & 0x3f) << 11) | 960 (l4len1 & 0xf); 961 nlm_write_nae_reg(nae_base, (NAE_L4CTABLE0 + ((entry * 2) + 1)), val); 962} 963 964void 965nlm_enable_hardware_parser(uint64_t nae_base) 966{ 967 uint32_t val; 968 969 val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); 970 val |= (1 << 12); /* hardware parser enable */ 971 nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val); 972 973 /*********************************************** 974 * program L3 CAM table 975 ***********************************************/ 976 977 /* 978 * entry-0 is ipv4 MPLS type 1 label 979 */ 980 /* l3hdroff = 4 bytes, ether_type = 0x8847 for MPLS_type1 */ 981 nlm_setup_l3ctable_even(nae_base, 0, 4, 1, 9, 1, 0x8847); 982 /* l3off0 (8 bytes) -> l3len0 (1 byte) := ip proto 983 * l3off1 (12 bytes) -> l3len1 (4 bytes) := src ip 984 * l3off2 (16 bytes) -> l3len2 (4 bytes) := dst ip 985 */ 986 nlm_setup_l3ctable_odd(nae_base, 0, 9, 1, 12, 4, 16, 4); 987 988 /* 989 * entry-1 is for ethernet IPv4 packets 990 */ 991 nlm_setup_l3ctable_even(nae_base, 1, 0, 1, 9, 1, 0x0800); 992 /* l3off0 (8 bytes) -> l3len0 (1 byte) := ip proto 993 * l3off1 (12 bytes) -> l3len1 (4 bytes) := src ip 994 * l3off2 (16 bytes) -> l3len2 (4 bytes) := dst ip 995 */ 996 nlm_setup_l3ctable_odd(nae_base, 1, 9, 1, 12, 4, 16, 4); 997 998 /* 999 * entry-2 is for ethernet IPv6 packets 1000 */ 1001 nlm_setup_l3ctable_even(nae_base, 2, 0, 1, 6, 1, 0x86dd); 1002 /* l3off0 (6 bytes) -> l3len0 (1 byte) := next header (ip proto) 1003 * l3off1 (8 bytes) -> l3len1 (16 bytes) := src ip 1004 * l3off2 (24 bytes) -> l3len2 (16 bytes) := dst ip 1005 */ 1006 nlm_setup_l3ctable_odd(nae_base, 2, 6, 1, 8, 16, 24, 16); 1007 1008 /* 1009 * entry-3 is for ethernet ARP packets 1010 */ 1011 nlm_setup_l3ctable_even(nae_base, 3, 0, 0, 9, 1, 0x0806); 1012 /* extract 30 bytes from packet start */ 1013 nlm_setup_l3ctable_odd(nae_base, 3, 0, 30, 0, 0, 0, 0); 1014 1015 /* 1016 * entry-4 is for ethernet FCoE packets 1017 */ 1018 nlm_setup_l3ctable_even(nae_base, 4, 0, 0, 9, 1, 0x8906); 1019 /* FCoE packet consists of 4 byte start-of-frame, 1020 * and 24 bytes of frame header, followed by 1021 * 64 bytes of optional-header (ESP, network..), 1022 * 2048 bytes of payload, 36 bytes of optional 1023 * "fill bytes" or ESP trailer, 4 bytes of CRC, 1024 * and 4 bytes of end-of-frame 1025 * We extract the first 4 + 24 = 28 bytes 1026 */ 1027 nlm_setup_l3ctable_odd(nae_base, 4, 0, 28, 0, 0, 0, 0); 1028 1029 /* 1030 * entry-5 is for vlan tagged frames (0x8100) 1031 */ 1032 nlm_setup_l3ctable_even(nae_base, 5, 0, 0, 9, 1, 0x8100); 1033 /* we extract 31 bytes from the payload */ 1034 nlm_setup_l3ctable_odd(nae_base, 5, 0, 31, 0, 0, 0, 0); 1035 1036 /* 1037 * entry-6 is for ieee 802.1ad provider bridging 1038 * tagged frames (0x88a8) 1039 */ 1040 nlm_setup_l3ctable_even(nae_base, 6, 0, 0, 9, 1, 0x88a8); 1041 /* we extract 31 bytes from the payload */ 1042 nlm_setup_l3ctable_odd(nae_base, 6, 0, 31, 0, 0, 0, 0); 1043 1044 /* 1045 * entry-7 is for Cisco's Q-in-Q tagged frames (0x9100) 1046 */ 1047 nlm_setup_l3ctable_even(nae_base, 7, 0, 0, 9, 1, 0x9100); 1048 /* we extract 31 bytes from the payload */ 1049 nlm_setup_l3ctable_odd(nae_base, 7, 0, 31, 0, 0, 0, 0); 1050 1051 /* 1052 * entry-8 is for Ethernet Jumbo frames (0x8870) 1053 */ 1054 nlm_setup_l3ctable_even(nae_base, 8, 0, 0, 9, 1, 0x8870); 1055 /* we extract 31 bytes from the payload */ 1056 nlm_setup_l3ctable_odd(nae_base, 8, 0, 31, 0, 0, 0, 0); 1057 1058 /* 1059 * entry-9 is for MPLS Multicast frames (0x8848) 1060 */ 1061 nlm_setup_l3ctable_even(nae_base, 9, 0, 0, 9, 1, 0x8848); 1062 /* we extract 31 bytes from the payload */ 1063 nlm_setup_l3ctable_odd(nae_base, 9, 0, 31, 0, 0, 0, 0); 1064 1065 /* 1066 * entry-10 is for IEEE 802.1ae MAC Security frames (0x88e5) 1067 */ 1068 nlm_setup_l3ctable_even(nae_base, 10, 0, 0, 9, 1, 0x88e5); 1069 /* we extract 31 bytes from the payload */ 1070 nlm_setup_l3ctable_odd(nae_base, 10, 0, 31, 0, 0, 0, 0); 1071 1072 /* 1073 * entry-11 is for PTP frames (0x88f7) 1074 */ 1075 nlm_setup_l3ctable_even(nae_base, 11, 0, 0, 9, 1, 0x88f7); 1076 /* PTP messages can be sent as UDP messages over 1077 * IPv4 or IPv6; and as a raw ethernet message 1078 * with ethertype 0x88f7. The message contents 1079 * are the same for UDP or ethernet based encapsulations 1080 * The header is 34 bytes long, and we extract 1081 * it all out. 1082 */ 1083 nlm_setup_l3ctable_odd(nae_base, 11, 0, 31, 31, 2, 0, 0); 1084 1085 /* 1086 * entry-12 is for ethernet Link Control Protocol (LCP) 1087 * used with PPPoE 1088 */ 1089 nlm_setup_l3ctable_even(nae_base, 12, 0, 0, 9, 1, 0xc021); 1090 /* LCP packet consists of 1 byte of code, 1 byte of 1091 * identifier and two bytes of length followed by 1092 * data (upto length bytes). 1093 * We extract 4 bytes from start of packet 1094 */ 1095 nlm_setup_l3ctable_odd(nae_base, 12, 0, 4, 0, 0, 0, 0); 1096 1097 /* 1098 * entry-13 is for ethernet Link Quality Report (0xc025) 1099 * used with PPPoE 1100 */ 1101 nlm_setup_l3ctable_even(nae_base, 13, 0, 0, 9, 1, 0xc025); 1102 /* We extract 31 bytes from packet start */ 1103 nlm_setup_l3ctable_odd(nae_base, 13, 0, 31, 0, 0, 0, 0); 1104 1105 /* 1106 * entry-14 is for PPPoE Session (0x8864) 1107 */ 1108 nlm_setup_l3ctable_even(nae_base, 14, 0, 0, 9, 1, 0x8864); 1109 /* We extract 31 bytes from packet start */ 1110 nlm_setup_l3ctable_odd(nae_base, 14, 0, 31, 0, 0, 0, 0); 1111 1112 /* 1113 * entry-15 - default entry 1114 */ 1115 nlm_setup_l3ctable_even(nae_base, 15, 0, 0, 0, 0, 0x0000); 1116 /* We extract 31 bytes from packet start */ 1117 nlm_setup_l3ctable_odd(nae_base, 15, 0, 31, 0, 0, 0, 0); 1118 1119 /*********************************************** 1120 * program L4 CAM table 1121 ***********************************************/ 1122 1123 /* 1124 * entry-0 - tcp packets (0x6) 1125 */ 1126 nlm_setup_l4ctable_even(nae_base, 0, 0, 0, 1, 0, 0, 0x6); 1127 /* tcp header is 20 bytes without tcp options 1128 * We extract 20 bytes from tcp start */ 1129 nlm_setup_l4ctable_odd(nae_base, 0, 0, 15, 15, 5); 1130 1131 /* 1132 * entry-1 - udp packets (0x11) 1133 */ 1134 nlm_setup_l4ctable_even(nae_base, 1, 0, 0, 1, 0, 0, 0x11); 1135 /* udp header is 8 bytes in size. 1136 * We extract 8 bytes from udp start */ 1137 nlm_setup_l4ctable_odd(nae_base, 1, 0, 8, 0, 0); 1138 1139 /* 1140 * entry-2 - sctp packets (0x84) 1141 */ 1142 nlm_setup_l4ctable_even(nae_base, 2, 0, 0, 1, 0, 0, 0x84); 1143 /* sctp packets have a 12 byte generic header 1144 * and various chunks. 1145 * We extract 12 bytes from sctp start */ 1146 nlm_setup_l4ctable_odd(nae_base, 2, 0, 12, 0, 0); 1147 1148 /* 1149 * entry-3 - RDP packets (0x1b) 1150 */ 1151 nlm_setup_l4ctable_even(nae_base, 3, 0, 0, 1, 0, 0, 0x1b); 1152 /* RDP packets have 18 bytes of generic header 1153 * before variable header starts. 1154 * We extract 18 bytes from rdp start */ 1155 nlm_setup_l4ctable_odd(nae_base, 3, 0, 15, 15, 3); 1156 1157 /* 1158 * entry-4 - DCCP packets (0x21) 1159 */ 1160 nlm_setup_l4ctable_even(nae_base, 4, 0, 0, 1, 0, 0, 0x21); 1161 /* DCCP has two types of generic headers of 1162 * sizes 16 bytes and 12 bytes if X = 1. 1163 * We extract 16 bytes from dccp start */ 1164 nlm_setup_l4ctable_odd(nae_base, 4, 0, 15, 15, 1); 1165 1166 /* 1167 * entry-5 - ipv6 encapsulated in ipv4 packets (0x29) 1168 */ 1169 nlm_setup_l4ctable_even(nae_base, 5, 0, 0, 1, 0, 0, 0x29); 1170 /* ipv4 header is 20 bytes excluding IP options. 1171 * We extract 20 bytes from IPv4 start */ 1172 nlm_setup_l4ctable_odd(nae_base, 5, 0, 15, 15, 5); 1173 1174 /* 1175 * entry-6 - ip in ip encapsulation packets (0x04) 1176 */ 1177 nlm_setup_l4ctable_even(nae_base, 6, 0, 0, 1, 0, 0, 0x04); 1178 /* ipv4 header is 20 bytes excluding IP options. 1179 * We extract 20 bytes from ipv4 start */ 1180 nlm_setup_l4ctable_odd(nae_base, 6, 0, 15, 15, 5); 1181 1182 /* 1183 * entry-7 - default entry (0x0) 1184 */ 1185 nlm_setup_l4ctable_even(nae_base, 7, 0, 0, 1, 0, 0, 0x0); 1186 /* We extract 20 bytes from packet start */ 1187 nlm_setup_l4ctable_odd(nae_base, 7, 0, 15, 15, 5); 1188} 1189 1190void 1191nlm_enable_hardware_parser_per_port(uint64_t nae_base, int block, int port) 1192{ 1193 int hwport = (block * 4) + (port & 0x3); 1194 1195 /* program L2 and L3 header extraction for each port */ 1196 /* enable ethernet L2 mode on port */ 1197 nlm_setup_l2type(nae_base, hwport, 0, 0, 0, 0, 0, 1); 1198 1199 /* l2proto and ethtype included in l3cam */ 1200 nlm_setup_l3ctable_mask(nae_base, hwport, 1, 0); 1201} 1202 1203void 1204nlm_prepad_enable(uint64_t nae_base, int size) 1205{ 1206 uint32_t val; 1207 1208 val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); 1209 val |= (1 << 13); /* prepad enable */ 1210 val |= ((size & 0x3) << 22); /* prepad size */ 1211 nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val); 1212} 1213 1214void 1215nlm_setup_1588_timer(uint64_t nae_base, struct nae_port_config *cfg) 1216{ 1217 uint32_t hi, lo, val; 1218 1219 hi = cfg[0].ieee1588_userval >> 32; 1220 lo = cfg[0].ieee1588_userval & 0xffffffff; 1221 nlm_write_nae_reg(nae_base, NAE_1588_PTP_USER_VALUE_HI, hi); 1222 nlm_write_nae_reg(nae_base, NAE_1588_PTP_USER_VALUE_LO, lo); 1223 1224 hi = cfg[0].ieee1588_ptpoff >> 32; 1225 lo = cfg[0].ieee1588_ptpoff & 0xffffffff; 1226 nlm_write_nae_reg(nae_base, NAE_1588_PTP_OFFSET_HI, hi); 1227 nlm_write_nae_reg(nae_base, NAE_1588_PTP_OFFSET_LO, lo); 1228 1229 hi = cfg[0].ieee1588_tmr1 >> 32; 1230 lo = cfg[0].ieee1588_tmr1 & 0xffffffff; 1231 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR1_HI, hi); 1232 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR1_LO, lo); 1233 1234 hi = cfg[0].ieee1588_tmr2 >> 32; 1235 lo = cfg[0].ieee1588_tmr2 & 0xffffffff; 1236 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR2_HI, hi); 1237 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR2_LO, lo); 1238 1239 hi = cfg[0].ieee1588_tmr3 >> 32; 1240 lo = cfg[0].ieee1588_tmr3 & 0xffffffff; 1241 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR3_HI, hi); 1242 nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR3_LO, lo); 1243 1244 nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_INTG, 1245 cfg[0].ieee1588_inc_intg); 1246 nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_NUM, 1247 cfg[0].ieee1588_inc_num); 1248 nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_DEN, 1249 cfg[0].ieee1588_inc_den); 1250 1251 val = nlm_read_nae_reg(nae_base, NAE_1588_PTP_CONTROL); 1252 /* set and clear freq_mul = 1 */ 1253 nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val | (0x1 << 1)); 1254 nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val); 1255 /* set and clear load_user_val = 1 */ 1256 nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val | (0x1 << 6)); 1257 nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val); 1258} 1259 1260void 1261nlm_mac_enable(uint64_t nae_base, int nblock, int port_type, int port) 1262{ 1263 uint32_t mac_cfg1, xaui_cfg; 1264 uint32_t netwk_inf; 1265 int iface = port & 0x3; 1266 1267 switch(port_type) { 1268 case SGMIIC: 1269 netwk_inf = nlm_read_nae_reg(nae_base, 1270 SGMII_NET_IFACE_CTRL(nblock, iface)); 1271 nlm_write_nae_reg(nae_base, 1272 SGMII_NET_IFACE_CTRL(nblock, iface), 1273 netwk_inf | 1274 (1 << 2)); /* enable tx */ 1275 mac_cfg1 = nlm_read_nae_reg(nae_base, 1276 SGMII_MAC_CONF1(nblock, iface)); 1277 nlm_write_nae_reg(nae_base, 1278 SGMII_MAC_CONF1(nblock, iface), 1279 mac_cfg1 | 1280 (1 << 2) | /* rx enable */ 1281 1); /* tx enable */ 1282 break; 1283 case XAUIC: 1284 xaui_cfg = nlm_read_nae_reg(nae_base, 1285 XAUI_CONFIG1(nblock)); 1286 nlm_write_nae_reg(nae_base, 1287 XAUI_CONFIG1(nblock), 1288 xaui_cfg | 1289 XAUI_CONFIG_TFEN | 1290 XAUI_CONFIG_RFEN); 1291 break; 1292 case ILC: 1293 break; 1294 } 1295} 1296 1297void 1298nlm_mac_disable(uint64_t nae_base, int nblock, int port_type, int port) 1299{ 1300 uint32_t mac_cfg1, xaui_cfg; 1301 uint32_t netwk_inf; 1302 int iface = port & 0x3; 1303 1304 switch(port_type) { 1305 case SGMIIC: 1306 mac_cfg1 = nlm_read_nae_reg(nae_base, 1307 SGMII_MAC_CONF1(nblock, iface)); 1308 nlm_write_nae_reg(nae_base, 1309 SGMII_MAC_CONF1(nblock, iface), 1310 mac_cfg1 & 1311 ~((1 << 2) | /* rx enable */ 1312 1)); /* tx enable */ 1313 netwk_inf = nlm_read_nae_reg(nae_base, 1314 SGMII_NET_IFACE_CTRL(nblock, iface)); 1315 nlm_write_nae_reg(nae_base, 1316 SGMII_NET_IFACE_CTRL(nblock, iface), 1317 netwk_inf & 1318 ~(1 << 2)); /* enable tx */ 1319 break; 1320 case XAUIC: 1321 xaui_cfg = nlm_read_nae_reg(nae_base, 1322 XAUI_CONFIG1(nblock)); 1323 nlm_write_nae_reg(nae_base, 1324 XAUI_CONFIG1(nblock), 1325 xaui_cfg & 1326 ~(XAUI_CONFIG_TFEN | 1327 XAUI_CONFIG_RFEN)); 1328 break; 1329 case ILC: 1330 break; 1331 } 1332} 1333 1334/* 1335 * Set IOR credits for the ports in ifmask to valmask 1336 */ 1337static void 1338nlm_nae_set_ior_credit(uint64_t nae_base, uint32_t ifmask, uint32_t valmask) 1339{ 1340 uint32_t tx_config, tx_ior_credit; 1341 1342 tx_ior_credit = nlm_read_nae_reg(nae_base, NAE_TX_IORCRDT_INIT); 1343 tx_ior_credit &= ~ifmask; 1344 tx_ior_credit |= valmask; 1345 nlm_write_nae_reg(nae_base, NAE_TX_IORCRDT_INIT, tx_ior_credit); 1346 1347 tx_config = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG); 1348 /* need to toggle these bits for credits to be loaded */ 1349 nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, 1350 tx_config | (TXINITIORCR(ifmask))); 1351 nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, 1352 tx_config & ~(TXINITIORCR(ifmask))); 1353} 1354 1355int 1356nlm_nae_open_if(uint64_t nae_base, int nblock, int port_type, 1357 int port, uint32_t desc_size) 1358{ 1359 uint32_t netwk_inf; 1360 uint32_t mac_cfg1, netior_ctrl3; 1361 int iface, iface_ctrl_reg, iface_ctrl3_reg, conf1_reg, conf2_reg; 1362 1363 switch (port_type) { 1364 case XAUIC: 1365 netwk_inf = nlm_read_nae_reg(nae_base, 1366 XAUI_NETIOR_XGMAC_CTRL1(nblock)); 1367 netwk_inf |= (1 << NETIOR_XGMAC_STATS_CLR_POS); 1368 nlm_write_nae_reg(nae_base, 1369 XAUI_NETIOR_XGMAC_CTRL1(nblock), netwk_inf); 1370 1371 nlm_nae_set_ior_credit(nae_base, 0xf << port, 0xf << port); 1372 break; 1373 1374 case ILC: 1375 nlm_nae_set_ior_credit(nae_base, 0xff << port, 0xff << port); 1376 break; 1377 1378 case SGMIIC: 1379 nlm_nae_set_ior_credit(nae_base, 0x1 << port, 0); 1380 1381 /* 1382 * XXXJC: split this and merge to sgmii.c 1383 * some of this is duplicated from there. 1384 */ 1385 /* init phy id to access internal PCS */ 1386 iface = port & 0x3; 1387 iface_ctrl_reg = SGMII_NET_IFACE_CTRL(nblock, iface); 1388 conf1_reg = SGMII_MAC_CONF1(nblock, iface); 1389 conf2_reg = SGMII_MAC_CONF2(nblock, iface); 1390 1391 netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); 1392 netwk_inf &= 0x7ffffff; 1393 netwk_inf |= (port << 27); 1394 nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf); 1395 1396 /* Sofreset sgmii port - set bit 11 to 0 */ 1397 netwk_inf &= 0xfffff7ff; 1398 nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf); 1399 1400 /* Reset Gmac */ 1401 mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); 1402 nlm_write_nae_reg(nae_base, conf1_reg, 1403 mac_cfg1 | 1404 (1U << 31) | /* soft reset */ 1405 (1 << 2) | /* rx enable */ 1406 (1)); /* tx enable */ 1407 1408 /* default to 1G */ 1409 nlm_write_nae_reg(nae_base, 1410 conf2_reg, 1411 (0x7 << 12) | /* interface preamble length */ 1412 (0x2 << 8) | /* interface mode */ 1413 (0x1 << 2) | /* pad crc enable */ 1414 (0x1)); /* full duplex */ 1415 1416 /* clear gmac reset */ 1417 mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); 1418 nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 & ~(1U << 31)); 1419 1420 /* clear speed debug bit */ 1421 iface_ctrl3_reg = SGMII_NET_IFACE_CTRL3(nblock, iface); 1422 netior_ctrl3 = nlm_read_nae_reg(nae_base, iface_ctrl3_reg); 1423 nlm_write_nae_reg(nae_base, iface_ctrl3_reg, 1424 netior_ctrl3 & ~(1 << 6)); 1425 1426 /* disable TX, RX for now */ 1427 mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); 1428 nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 & ~(0x5)); 1429 netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); 1430 nlm_write_nae_reg(nae_base, iface_ctrl_reg, 1431 netwk_inf & ~(0x1 << 2)); 1432 1433 /* clear stats counters */ 1434 netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); 1435 nlm_write_nae_reg(nae_base, iface_ctrl_reg, 1436 netwk_inf | (1 << 15)); 1437 1438 /* enable stats counters */ 1439 netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); 1440 nlm_write_nae_reg(nae_base, iface_ctrl_reg, 1441 (netwk_inf & ~(1 << 15)) | (1 << 16)); 1442 1443 /* flow control? */ 1444 mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); 1445 nlm_write_nae_reg(nae_base, conf1_reg, 1446 mac_cfg1 | (0x3 << 4)); 1447 break; 1448 } 1449 1450 nlm_nae_init_ingress(nae_base, desc_size); 1451 nlm_nae_init_egress(nae_base); 1452 1453 return (0); 1454} 1455