cvmx-helper.c revision 232816
1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41 42 43 44 45 46/** 47 * @file 48 * 49 * Helper functions for common, but complicated tasks. 50 * 51 * <hr>$Revision: 70030 $<hr> 52 */ 53#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 54#include <linux/module.h> 55#include <asm/octeon/cvmx.h> 56#include <asm/octeon/cvmx-config.h> 57#include <asm/octeon/cvmx-bootmem.h> 58#include <asm/octeon/cvmx-sriox-defs.h> 59#include <asm/octeon/cvmx-npi-defs.h> 60#include <asm/octeon/cvmx-mio-defs.h> 61#include <asm/octeon/cvmx-pexp-defs.h> 62#include <asm/octeon/cvmx-pip-defs.h> 63#include <asm/octeon/cvmx-asxx-defs.h> 64#include <asm/octeon/cvmx-gmxx-defs.h> 65#include <asm/octeon/cvmx-smix-defs.h> 66#include <asm/octeon/cvmx-dbg-defs.h> 67#include <asm/octeon/cvmx-sso-defs.h> 68 69#include <asm/octeon/cvmx-gmx.h> 70#include <asm/octeon/cvmx-fpa.h> 71#include <asm/octeon/cvmx-pip.h> 72#include <asm/octeon/cvmx-pko.h> 73#include <asm/octeon/cvmx-ipd.h> 74#include <asm/octeon/cvmx-spi.h> 75#include <asm/octeon/cvmx-clock.h> 76#include <asm/octeon/cvmx-helper.h> 77#include <asm/octeon/cvmx-helper-board.h> 78#include <asm/octeon/cvmx-helper-errata.h> 79#include <asm/octeon/cvmx-helper-cfg.h> 80#else 81#if !defined(__FreeBSD__) || !defined(_KERNEL) 82#include "executive-config.h" 83#endif 84#include "cvmx.h" 85#include "cvmx-sysinfo.h" 86#include "cvmx-bootmem.h" 87#include "cvmx-version.h" 88#include "cvmx-helper-check-defines.h" 89#include "cvmx-gmx.h" 90#if !defined(__FreeBSD__) || !defined(_KERNEL) 91#include "cvmx-error.h" 92#include "cvmx-config.h" 93#endif 94 95#include "cvmx-fpa.h" 96#include "cvmx-pip.h" 97#include "cvmx-pko.h" 98#include "cvmx-ipd.h" 99#include "cvmx-spi.h" 100#include "cvmx-helper.h" 101#include "cvmx-helper-board.h" 102#include "cvmx-helper-errata.h" 103#include "cvmx-helper-cfg.h" 104#endif 105 106 107#ifdef CVMX_ENABLE_PKO_FUNCTIONS 108 109/** 110 * cvmx_override_pko_queue_priority(int pko_port, uint64_t 111 * priorities[16]) is a function pointer. It is meant to allow 112 * customization of the PKO queue priorities based on the port 113 * number. Users should set this pointer to a function before 114 * calling any cvmx-helper operations. 115 */ 116CVMX_SHARED void (*cvmx_override_pko_queue_priority)(int ipd_port, 117 uint64_t *priorities) = NULL; 118#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 119EXPORT_SYMBOL(cvmx_override_pko_queue_priority); 120#endif 121 122/** 123 * cvmx_override_ipd_port_setup(int ipd_port) is a function 124 * pointer. It is meant to allow customization of the IPD 125 * port/port kind setup before packet input/output comes online. 126 * It is called after cvmx-helper does the default IPD configuration, 127 * but before IPD is enabled. Users should set this pointer to a 128 * function before calling any cvmx-helper operations. 129 */ 130CVMX_SHARED void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL; 131 132/** 133 * Return the number of interfaces the chip has. Each interface 134 * may have multiple ports. Most chips support two interfaces, 135 * but the CNX0XX and CNX1XX are exceptions. These only support 136 * one interface. 137 * 138 * @return Number of interfaces on chip 139 */ 140int cvmx_helper_get_number_of_interfaces(void) 141{ 142 switch (cvmx_sysinfo_get()->board_type) { 143#if defined(OCTEON_VENDOR_LANNER) 144 case CVMX_BOARD_TYPE_CUST_LANNER_MR955: 145 return 2; 146 case CVMX_BOARD_TYPE_CUST_LANNER_MR730: 147 return 1; 148#endif 149 default: 150 break; 151 } 152 153 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 154 return 9; 155 else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) 156 if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) 157 return 7; 158 else 159 return 8; 160 else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) 161 return 6; 162 else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX)) 163 return 4; 164 else 165 return 3; 166} 167#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 168EXPORT_SYMBOL(cvmx_helper_get_number_of_interfaces); 169#endif 170 171 172/** 173 * Return the number of ports on an interface. Depending on the 174 * chip and configuration, this can be 1-16. A value of 0 175 * specifies that the interface doesn't exist or isn't usable. 176 * 177 * @param interface Interface to get the port count for 178 * 179 * @return Number of ports on interface. Can be Zero. 180 */ 181int cvmx_helper_ports_on_interface(int interface) 182{ 183 if (octeon_has_feature(OCTEON_FEATURE_PKND)) 184 return cvmx_helper_interface_enumerate(interface); 185 else 186 return __cvmx_helper_get_num_ipd_ports(interface); 187} 188#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 189EXPORT_SYMBOL(cvmx_helper_ports_on_interface); 190#endif 191 192 193/** 194 * Get the operating mode of an interface. Depending on the Octeon 195 * chip and configuration, this function returns an enumeration 196 * of the type of packet I/O supported by an interface. 197 * 198 * @param interface Interface to probe 199 * 200 * @return Mode of the interface. Unknown or unsupported interfaces return 201 * DISABLED. 202 */ 203cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) 204{ 205 cvmx_gmxx_inf_mode_t mode; 206 207 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 208 { 209 cvmx_mio_qlmx_cfg_t qlm_cfg; 210 switch(interface) 211 { 212 case 0: 213 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 214 /* QLM is disabled when QLM SPD is 15. */ 215 if (qlm_cfg.s.qlm_spd == 15) 216 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 217 218 if (qlm_cfg.s.qlm_cfg == 7) 219 return CVMX_HELPER_INTERFACE_MODE_RXAUI; 220 else if (qlm_cfg.s.qlm_cfg == 2) 221 return CVMX_HELPER_INTERFACE_MODE_SGMII; 222 else if (qlm_cfg.s.qlm_cfg == 3) 223 return CVMX_HELPER_INTERFACE_MODE_XAUI; 224 else 225 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 226 break; 227 case 1: 228 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 229 /* QLM is disabled when QLM SPD is 15. */ 230 if (qlm_cfg.s.qlm_spd == 15) 231 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 232 233 if (qlm_cfg.s.qlm_cfg == 7) 234 return CVMX_HELPER_INTERFACE_MODE_RXAUI; 235 else 236 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 237 break; 238 case 2: 239 case 3: 240 case 4: 241 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface)); 242 /* QLM is disabled when QLM SPD is 15. */ 243 if (qlm_cfg.s.qlm_spd == 15) 244 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 245 246 if (qlm_cfg.s.qlm_cfg == 2) 247 return CVMX_HELPER_INTERFACE_MODE_SGMII; 248 else if (qlm_cfg.s.qlm_cfg == 3) 249 return CVMX_HELPER_INTERFACE_MODE_XAUI; 250 else 251 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 252 break; 253 case 5: 254 case 6: 255 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface - 4)); 256 /* QLM is disabled when QLM SPD is 15. */ 257 if (qlm_cfg.s.qlm_spd == 15) 258 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 259 260 if (qlm_cfg.s.qlm_cfg == 1) 261 { 262 return CVMX_HELPER_INTERFACE_MODE_ILK; 263 } 264 else 265 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 266 break; 267 case 7: 268 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3)); 269 /* QLM is disabled when QLM SPD is 15. */ 270 if (qlm_cfg.s.qlm_spd == 15) 271 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 272 else if (qlm_cfg.s.qlm_cfg != 0) 273 { 274 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 275 if (qlm_cfg.s.qlm_cfg != 0) 276 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 277 } 278 return CVMX_HELPER_INTERFACE_MODE_NPI; 279 break; 280 case 8: 281 return CVMX_HELPER_INTERFACE_MODE_LOOP; 282 break; 283 default: 284 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 285 break; 286 } 287 } 288 289 if (interface == 2) 290 return CVMX_HELPER_INTERFACE_MODE_NPI; 291 292 if (interface == 3) 293 { 294 if (OCTEON_IS_MODEL(OCTEON_CN56XX) 295 || OCTEON_IS_MODEL(OCTEON_CN52XX) 296 || OCTEON_IS_MODEL(OCTEON_CN6XXX) 297 || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) 298 return CVMX_HELPER_INTERFACE_MODE_LOOP; 299 else 300 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 301 } 302 303 /* Only present in CN63XX & CN66XX Octeon model */ 304 if ((OCTEON_IS_MODEL(OCTEON_CN63XX) && (interface == 4 || interface == 5)) 305 || (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 && interface <= 7)) 306 { 307 cvmx_sriox_status_reg_t sriox_status_reg; 308 309 /* cn66xx pass1.0 has only 2 SRIO interfaces. */ 310 if ((interface == 5 || interface == 7) && OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) 311 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 312 313 sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(interface-4)); 314 if (sriox_status_reg.s.srio) 315 return CVMX_HELPER_INTERFACE_MODE_SRIO; 316 else 317 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 318 } 319 320 /* Interface 5 always disabled in CN66XX */ 321 if (OCTEON_IS_MODEL(OCTEON_CN66XX)) 322 { 323 cvmx_mio_qlmx_cfg_t mio_qlm_cfg; 324 325 /* QLM2 is SGMII0 and QLM1 is SGMII1 */ 326 if (interface == 0) 327 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2)); 328 else if (interface == 1) 329 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 330 else 331 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 332 333 if (mio_qlm_cfg.s.qlm_spd == 15) 334 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 335 336 if (mio_qlm_cfg.s.qlm_cfg == 9) 337 return CVMX_HELPER_INTERFACE_MODE_SGMII; 338 else if (mio_qlm_cfg.s.qlm_cfg == 11) 339 return CVMX_HELPER_INTERFACE_MODE_XAUI; 340 else 341 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 342 } 343 else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) 344 { 345 cvmx_mio_qlmx_cfg_t qlm_cfg; 346 347 if (interface == 0) 348 { 349 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2)); 350 if (qlm_cfg.s.qlm_cfg == 2) 351 return CVMX_HELPER_INTERFACE_MODE_SGMII; 352 else if (qlm_cfg.s.qlm_cfg == 3) 353 return CVMX_HELPER_INTERFACE_MODE_XAUI; 354 else 355 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 356 } 357 else if (interface == 1) 358 { 359 /* If QLM 1 is PEV0/PEM1 mode, them QLM0 cannot be SGMII/XAUI */ 360 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 361 if (qlm_cfg.s.qlm_cfg == 1) 362 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 363 364 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 365 if (qlm_cfg.s.qlm_cfg == 2) 366 return CVMX_HELPER_INTERFACE_MODE_SGMII; 367 else if (qlm_cfg.s.qlm_cfg == 3) 368 return CVMX_HELPER_INTERFACE_MODE_XAUI; 369 else 370 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 371 } 372 } 373 374 if (interface == 0 && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 && cvmx_sysinfo_get()->board_rev_major == 1) 375 { 376 /* Lie about interface type of CN3005 board. This board has a switch on port 1 like 377 ** the other evaluation boards, but it is connected over RGMII instead of GMII. Report 378 ** GMII mode so that the speed is forced to 1 Gbit full duplex. Other than some initial configuration 379 ** (which does not use the output of this function) there is no difference in setup between GMII and RGMII modes. 380 */ 381 return CVMX_HELPER_INTERFACE_MODE_GMII; 382 } 383 384 /* Interface 1 is always disabled on CN31XX and CN30XX */ 385 if ((interface == 1) 386 && (OCTEON_IS_MODEL(OCTEON_CN31XX) 387 || OCTEON_IS_MODEL(OCTEON_CN30XX) 388 || OCTEON_IS_MODEL(OCTEON_CN50XX) 389 || OCTEON_IS_MODEL(OCTEON_CN52XX) 390 || OCTEON_IS_MODEL(OCTEON_CN63XX) 391 || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 392 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 393 394 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); 395 396 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) 397 { 398 switch(mode.cn56xx.mode) 399 { 400 case 0: return CVMX_HELPER_INTERFACE_MODE_DISABLED; 401 case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI; 402 case 2: return CVMX_HELPER_INTERFACE_MODE_SGMII; 403 case 3: return CVMX_HELPER_INTERFACE_MODE_PICMG; 404 default:return CVMX_HELPER_INTERFACE_MODE_DISABLED; 405 } 406 } 407 else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) 408 { 409 switch(mode.cn63xx.mode) 410 { 411 case 0: return CVMX_HELPER_INTERFACE_MODE_SGMII; 412 case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI; 413 default: return CVMX_HELPER_INTERFACE_MODE_DISABLED; 414 } 415 } 416 else 417 { 418 if (!mode.s.en) 419 return CVMX_HELPER_INTERFACE_MODE_DISABLED; 420 421 if (mode.s.type) 422 { 423 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) 424 return CVMX_HELPER_INTERFACE_MODE_SPI; 425 else 426 return CVMX_HELPER_INTERFACE_MODE_GMII; 427 } 428 else 429 return CVMX_HELPER_INTERFACE_MODE_RGMII; 430 } 431} 432#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 433EXPORT_SYMBOL(cvmx_helper_interface_get_mode); 434#endif 435 436/** 437 * @INTERNAL 438 * Configure the IPD/PIP tagging and QoS options for a specific 439 * port. This function determines the POW work queue entry 440 * contents for a port. The setup performed here is controlled by 441 * the defines in executive-config.h. 442 * 443 * @param ipd_port Port/Port kind to configure. This follows the IPD numbering, 444 * not the per interface numbering 445 * 446 * @return Zero on success, negative on failure 447 */ 448static int __cvmx_helper_port_setup_ipd(int ipd_port) 449{ 450 cvmx_pip_prt_cfgx_t port_config; 451 cvmx_pip_prt_tagx_t tag_config; 452 453 if (octeon_has_feature(OCTEON_FEATURE_PKND)) 454 { 455 int interface, index, pknd; 456 cvmx_pip_prt_cfgbx_t prt_cfgbx; 457 458 interface = cvmx_helper_get_interface_num(ipd_port); 459 index = cvmx_helper_get_interface_index_num(ipd_port); 460 pknd = cvmx_helper_get_pknd(interface, index); 461 462 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd)); 463 tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pknd)); 464 465 port_config.s.qos = pknd & 0x7; 466 467 /* Default BPID to use for packets on this port-kind */ 468 prt_cfgbx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGBX(pknd)); 469 prt_cfgbx.s.bpid = pknd; 470 cvmx_write_csr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64); 471 } 472 else 473 { 474 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); 475 tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port)); 476 477 /* Have each port go to a different POW queue */ 478 port_config.s.qos = ipd_port & 0x7; 479 } 480 481 /* Process the headers and place the IP header in the work queue */ 482 port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE; 483 484 tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP; 485 tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP; 486 tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT; 487 tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT; 488 tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER; 489 tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP; 490 tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP; 491 tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT; 492 tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT; 493 tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL; 494 tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT; 495 tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 496 tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 497 tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 498 tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 499 tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 500 /* Put all packets in group 0. Other groups can be used by the app */ 501 tag_config.s.grp = 0; 502 503 cvmx_pip_config_port(ipd_port, port_config, tag_config); 504 505 /* Give the user a chance to override our setting for each port */ 506 if (cvmx_override_ipd_port_setup) 507 cvmx_override_ipd_port_setup(ipd_port); 508 509 return 0; 510} 511 512/** 513 * Enable or disable FCS stripping for all the ports on an interface. 514 * 515 * @param interface 516 * @param nports number of ports 517 * @param has_fcs 0 for disable and !0 for enable 518 */ 519static int cvmx_helper_fcs_op(int interface, int nports, int has_fcs) 520{ 521 uint64_t port_bit; 522 int index; 523 int pknd; 524 cvmx_pip_sub_pkind_fcsx_t pkind_fcsx; 525 cvmx_pip_prt_cfgx_t port_cfg; 526 527 if (!octeon_has_feature(OCTEON_FEATURE_PKND)) 528 return 0; 529 530 port_bit = 0; 531 for (index = 0; index < nports; index++) 532 port_bit |= ((uint64_t)1 << cvmx_helper_get_pknd(interface, index)); 533 534 pkind_fcsx.u64 = cvmx_read_csr(CVMX_PIP_SUB_PKIND_FCSX(0)); 535 if (has_fcs) 536 pkind_fcsx.s.port_bit |= port_bit; 537 else 538 pkind_fcsx.s.port_bit &= ~port_bit; 539 cvmx_write_csr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64); 540 541 for (pknd = 0; pknd < 64; pknd++) 542 { 543 if ((1ull << pknd) & port_bit) 544 { 545 port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd)); 546 port_cfg.s.crc_en = (has_fcs) ? 1 : 0; 547 cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64); 548 } 549 } 550 551 return 0; 552} 553 554/** 555 * Determine the actual number of hardware ports connected to an 556 * interface. It doesn't setup the ports or enable them. 557 * 558 * @param interface Interface to enumerate 559 * 560 * @return The number of ports on the interface, negative on failure 561 */ 562int cvmx_helper_interface_enumerate(int interface) 563{ 564 switch (cvmx_helper_interface_get_mode(interface)) { 565 /* XAUI is a single high speed port */ 566 case CVMX_HELPER_INTERFACE_MODE_XAUI: 567 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 568 return __cvmx_helper_xaui_enumerate(interface); 569 /* RGMII/GMII/MII are all treated about the same. Most functions 570 refer to these ports as RGMII */ 571 case CVMX_HELPER_INTERFACE_MODE_RGMII: 572 case CVMX_HELPER_INTERFACE_MODE_GMII: 573 return __cvmx_helper_rgmii_enumerate(interface); 574 /* SPI4 can have 1-16 ports depending on the device at the other end */ 575 case CVMX_HELPER_INTERFACE_MODE_SPI: 576 return __cvmx_helper_spi_enumerate(interface); 577 /* SGMII can have 1-4 ports depending on how many are hooked up */ 578 case CVMX_HELPER_INTERFACE_MODE_SGMII: 579 case CVMX_HELPER_INTERFACE_MODE_PICMG: 580 return __cvmx_helper_sgmii_enumerate(interface); 581 /* PCI target Network Packet Interface */ 582 case CVMX_HELPER_INTERFACE_MODE_NPI: 583 return __cvmx_helper_npi_enumerate(interface); 584 /* Special loopback only ports. These are not the same 585 * as other ports in loopback mode */ 586 case CVMX_HELPER_INTERFACE_MODE_LOOP: 587 return __cvmx_helper_loop_enumerate(interface); 588 /* SRIO has 2^N ports, where N is number of interfaces */ 589 case CVMX_HELPER_INTERFACE_MODE_SRIO: 590 return __cvmx_helper_srio_enumerate(interface); 591 592 case CVMX_HELPER_INTERFACE_MODE_ILK: 593 return __cvmx_helper_ilk_enumerate(interface); 594 /* These types don't support ports to IPD/PKO */ 595 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 596 case CVMX_HELPER_INTERFACE_MODE_PCIE: 597 default: 598 return 0; 599 } 600} 601 602/** 603 * This function probes an interface to determine the actual number of 604 * hardware ports connected to it. It does some setup the ports but 605 * doesn't enable them. The main goal here is to set the global 606 * interface_port_count[interface] correctly. Final hardware setup of 607 * the ports will be performed later. 608 * 609 * @param interface Interface to probe 610 * 611 * @return Zero on success, negative on failure 612 */ 613int cvmx_helper_interface_probe(int interface) 614{ 615 /* At this stage in the game we don't want packets to be moving yet. 616 The following probe calls should perform hardware setup 617 needed to determine port counts. Receive must still be disabled */ 618 int nports; 619 int has_fcs; 620 enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE; 621 622 nports = -1; 623 has_fcs = 0; 624 switch (cvmx_helper_interface_get_mode(interface)) 625 { 626 /* These types don't support ports to IPD/PKO */ 627 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 628 case CVMX_HELPER_INTERFACE_MODE_PCIE: 629 nports = 0; 630 break; 631 /* XAUI is a single high speed port */ 632 case CVMX_HELPER_INTERFACE_MODE_XAUI: 633 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 634 nports = __cvmx_helper_xaui_probe(interface); 635 has_fcs = 1; 636 padding = CVMX_PKO_PADDING_60; 637 break; 638 /* RGMII/GMII/MII are all treated about the same. Most functions 639 refer to these ports as RGMII */ 640 case CVMX_HELPER_INTERFACE_MODE_RGMII: 641 case CVMX_HELPER_INTERFACE_MODE_GMII: 642 nports = __cvmx_helper_rgmii_probe(interface); 643 padding = CVMX_PKO_PADDING_60; 644 break; 645 /* SPI4 can have 1-16 ports depending on the device at the other end */ 646 case CVMX_HELPER_INTERFACE_MODE_SPI: 647 nports = __cvmx_helper_spi_probe(interface); 648 padding = CVMX_PKO_PADDING_60; 649 break; 650 /* SGMII can have 1-4 ports depending on how many are hooked up */ 651 case CVMX_HELPER_INTERFACE_MODE_SGMII: 652 padding = CVMX_PKO_PADDING_60; 653 case CVMX_HELPER_INTERFACE_MODE_PICMG: 654 nports = __cvmx_helper_sgmii_probe(interface); 655 has_fcs = 1; 656 break; 657 /* PCI target Network Packet Interface */ 658 case CVMX_HELPER_INTERFACE_MODE_NPI: 659 nports = __cvmx_helper_npi_probe(interface); 660 break; 661 /* Special loopback only ports. These are not the same as other ports 662 in loopback mode */ 663 case CVMX_HELPER_INTERFACE_MODE_LOOP: 664 nports = __cvmx_helper_loop_probe(interface); 665 break; 666 /* SRIO has 2^N ports, where N is number of interfaces */ 667 case CVMX_HELPER_INTERFACE_MODE_SRIO: 668 nports = __cvmx_helper_srio_probe(interface); 669 break; 670 case CVMX_HELPER_INTERFACE_MODE_ILK: 671 nports = __cvmx_helper_ilk_probe(interface); 672 has_fcs = 1; 673 padding = CVMX_PKO_PADDING_60; 674 break; 675 } 676 677 if (nports == -1) 678 return -1; 679 680 if (!octeon_has_feature(OCTEON_FEATURE_PKND)) 681 has_fcs = 0; 682 683 nports = __cvmx_helper_board_interface_probe(interface, nports); 684 __cvmx_helper_init_interface(interface, nports, has_fcs, padding); 685 cvmx_helper_fcs_op(interface, nports, has_fcs); 686 687 /* Make sure all global variables propagate to other cores */ 688 CVMX_SYNCWS; 689 690 return 0; 691} 692 693 694/** 695 * @INTERNAL 696 * Setup the IPD/PIP for the ports on an interface. Packet 697 * classification and tagging are set for every port on the 698 * interface. The number of ports on the interface must already 699 * have been probed. 700 * 701 * @param interface Interface to setup IPD/PIP for 702 * 703 * @return Zero on success, negative on failure 704 */ 705static int __cvmx_helper_interface_setup_ipd(int interface) 706{ 707 708 cvmx_helper_interface_mode_t mode; 709 int ipd_port = cvmx_helper_get_ipd_port(interface, 0); 710 int num_ports = cvmx_helper_ports_on_interface(interface); 711 int delta; 712 713 if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE) 714 return 0; 715 716 mode = cvmx_helper_interface_get_mode(interface); 717 718 if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) 719 __cvmx_helper_loop_enable(interface); 720 721 delta = 1; 722 if (octeon_has_feature(OCTEON_FEATURE_PKND)) 723 { 724 if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII) 725 delta = 16; 726 } 727 728 while (num_ports--) 729 { 730 __cvmx_helper_port_setup_ipd(ipd_port); 731 ipd_port += delta; 732 } 733 734 return 0; 735} 736 737 738/** 739 * @INTERNAL 740 * Setup global setting for IPD/PIP not related to a specific 741 * interface or port. This must be called before IPD is enabled. 742 * 743 * @return Zero on success, negative on failure. 744 */ 745static int __cvmx_helper_global_setup_ipd(void) 746{ 747#ifndef CVMX_HELPER_IPD_DRAM_MODE 748#define CVMX_HELPER_IPD_DRAM_MODE CVMX_IPD_OPC_MODE_STT 749#endif 750 /* Setup the global packet input options */ 751 cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE/8, 752 CVMX_HELPER_FIRST_MBUFF_SKIP/8, 753 CVMX_HELPER_NOT_FIRST_MBUFF_SKIP/8, 754 (CVMX_HELPER_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */ 755 (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */ 756 CVMX_FPA_WQE_POOL, 757 CVMX_HELPER_IPD_DRAM_MODE, 758 1); 759 return 0; 760} 761 762 763/** 764 * @INTERNAL 765 * Setup the PKO for the ports on an interface. The number of 766 * queues per port and the priority of each PKO output queue 767 * is set here. PKO must be disabled when this function is called. 768 * 769 * @param interface Interface to setup PKO for 770 * 771 * @return Zero on success, negative on failure 772 */ 773static int __cvmx_helper_interface_setup_pko(int interface) 774{ 775 /* Each packet output queue has an associated priority. The higher the 776 priority, the more often it can send a packet. A priority of 8 means 777 it can send in all 8 rounds of contention. We're going to make each 778 queue one less than the last. 779 The vector of priorities has been extended to support CN5xxx CPUs, 780 where up to 16 queues can be associated to a port. 781 To keep backward compatibility we don't change the initial 8 782 priorities and replicate them in the second half. 783 With per-core PKO queues (PKO lockless operation) all queues have 784 the same priority. */ 785 /* uint64_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */ 786 uint64_t priorities[16] = {[0 ... 15] = 8}; 787 788 /* Setup the IPD/PIP and PKO for the ports discovered above. Here packet 789 classification, tagging and output priorities are set */ 790 int ipd_port = cvmx_helper_get_ipd_port(interface, 0); 791 int num_ports = cvmx_helper_ports_on_interface(interface); 792 while (num_ports--) 793 { 794 /* Give the user a chance to override the per queue priorities */ 795 if (cvmx_override_pko_queue_priority) 796 cvmx_override_pko_queue_priority(ipd_port, priorities); 797 798 cvmx_pko_config_port(ipd_port, cvmx_pko_get_base_queue_per_core(ipd_port, 0), 799 cvmx_pko_get_num_queues(ipd_port), priorities); 800 ipd_port++; 801 } 802 return 0; 803} 804 805 806/** 807 * @INTERNAL 808 * Setup global setting for PKO not related to a specific 809 * interface or port. This must be called before PKO is enabled. 810 * 811 * @return Zero on success, negative on failure. 812 */ 813static int __cvmx_helper_global_setup_pko(void) 814{ 815 /* Disable tagwait FAU timeout. This needs to be done before anyone might 816 start packet output using tags */ 817 cvmx_iob_fau_timeout_t fau_to; 818 fau_to.u64 = 0; 819 fau_to.s.tout_val = 0xfff; 820 fau_to.s.tout_enb = 0; 821 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64); 822 823 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { 824 cvmx_pko_reg_min_pkt_t min_pkt; 825 826 min_pkt.u64 = 0; 827 min_pkt.s.size1 = 59; 828 min_pkt.s.size2 = 59; 829 min_pkt.s.size3 = 59; 830 min_pkt.s.size4 = 59; 831 min_pkt.s.size5 = 59; 832 min_pkt.s.size6 = 59; 833 min_pkt.s.size7 = 59; 834 cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64); 835 } 836 837 return 0; 838} 839 840 841/** 842 * @INTERNAL 843 * Setup global backpressure setting. 844 * 845 * @return Zero on success, negative on failure 846 */ 847static int __cvmx_helper_global_setup_backpressure(void) 848{ 849#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 850 /* Disable backpressure if configured to do so */ 851 /* Disable backpressure (pause frame) generation */ 852 int num_interfaces = cvmx_helper_get_number_of_interfaces(); 853 int interface; 854 for (interface=0; interface<num_interfaces; interface++) 855 { 856 switch (cvmx_helper_interface_get_mode(interface)) 857 { 858 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 859 case CVMX_HELPER_INTERFACE_MODE_PCIE: 860 case CVMX_HELPER_INTERFACE_MODE_SRIO: 861 case CVMX_HELPER_INTERFACE_MODE_ILK: 862 case CVMX_HELPER_INTERFACE_MODE_NPI: 863 case CVMX_HELPER_INTERFACE_MODE_LOOP: 864 case CVMX_HELPER_INTERFACE_MODE_XAUI: 865 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 866 break; 867 case CVMX_HELPER_INTERFACE_MODE_RGMII: 868 case CVMX_HELPER_INTERFACE_MODE_GMII: 869 case CVMX_HELPER_INTERFACE_MODE_SPI: 870 case CVMX_HELPER_INTERFACE_MODE_SGMII: 871 case CVMX_HELPER_INTERFACE_MODE_PICMG: 872 cvmx_gmx_set_backpressure_override(interface, 0xf); 873 break; 874 } 875 } 876 //cvmx_dprintf("Disabling backpressure\n"); 877#endif 878 879 return 0; 880} 881 882/** 883 * @INTERNAL 884 * Verify the per port IPD backpressure is aligned properly. 885 * @return Zero if working, non zero if misaligned 886 */ 887static int __cvmx_helper_backpressure_is_misaligned(void) 888{ 889 uint64_t ipd_int_enb; 890 cvmx_ipd_ctl_status_t ipd_reg; 891 uint64_t bp_status0; 892 uint64_t bp_status1; 893 const int port0 = 0; 894 const int port1 = 16; 895 cvmx_helper_interface_mode_t mode0 = cvmx_helper_interface_get_mode(0); 896 cvmx_helper_interface_mode_t mode1 = cvmx_helper_interface_get_mode(1); 897 898 /* Disable error interrupts while we check backpressure */ 899 ipd_int_enb = cvmx_read_csr(CVMX_IPD_INT_ENB); 900 cvmx_write_csr(CVMX_IPD_INT_ENB, 0); 901 902 /* Enable per port backpressure */ 903 ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 904 ipd_reg.s.pbp_en = 1; 905 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); 906 907 if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED) 908 { 909 /* Enable backpressure for port with a zero threshold */ 910 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 1<<17); 911 /* Add 1000 to the page count to simulate packets coming in */ 912 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | 1000); 913 } 914 915 if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED) 916 { 917 /* Enable backpressure for port with a zero threshold */ 918 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 1<<17); 919 /* Add 1000 to the page count to simulate packets coming in */ 920 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | 1000); 921 } 922 923 /* Wait 500 cycles for the BP to update */ 924 cvmx_wait(500); 925 926 /* Read the BP state from the debug select register */ 927 switch (mode0) 928 { 929 case CVMX_HELPER_INTERFACE_MODE_SPI: 930 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9004); 931 bp_status0 = cvmx_read_csr(CVMX_DBG_DATA); 932 bp_status0 = 0xffff & ~bp_status0; 933 break; 934 case CVMX_HELPER_INTERFACE_MODE_RGMII: 935 case CVMX_HELPER_INTERFACE_MODE_GMII: 936 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x0e00); 937 bp_status0 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA); 938 break; 939 case CVMX_HELPER_INTERFACE_MODE_XAUI: 940 case CVMX_HELPER_INTERFACE_MODE_SGMII: 941 case CVMX_HELPER_INTERFACE_MODE_PICMG: 942 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x0e00); 943 bp_status0 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 944 break; 945 default: 946 bp_status0 = 1<<port0; 947 break; 948 } 949 950 /* Read the BP state from the debug select register */ 951 switch (mode1) 952 { 953 case CVMX_HELPER_INTERFACE_MODE_SPI: 954 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9804); 955 bp_status1 = cvmx_read_csr(CVMX_DBG_DATA); 956 bp_status1 = 0xffff & ~bp_status1; 957 break; 958 case CVMX_HELPER_INTERFACE_MODE_RGMII: 959 case CVMX_HELPER_INTERFACE_MODE_GMII: 960 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x1600); 961 bp_status1 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA); 962 break; 963 case CVMX_HELPER_INTERFACE_MODE_XAUI: 964 case CVMX_HELPER_INTERFACE_MODE_SGMII: 965 case CVMX_HELPER_INTERFACE_MODE_PICMG: 966 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x1600); 967 bp_status1 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); 968 break; 969 default: 970 bp_status1 = 1<<(port1-16); 971 break; 972 } 973 974 if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED) 975 { 976 /* Shutdown BP */ 977 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | (0x1ffffff & -1000)); 978 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 0); 979 } 980 981 if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED) 982 { 983 /* Shutdown BP */ 984 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | (0x1ffffff & -1000)); 985 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 0); 986 } 987 988 /* Clear any error interrupts that might have been set */ 989 cvmx_write_csr(CVMX_IPD_INT_SUM, 0x1f); 990 cvmx_write_csr(CVMX_IPD_INT_ENB, ipd_int_enb); 991 992 return ((bp_status0 != 1ull<<port0) || (bp_status1 != 1ull<<(port1-16))); 993} 994 995 996/** 997 * @INTERNAL 998 * Enable packet input/output from the hardware. This function is 999 * called after all internal setup is complete and IPD is enabled. 1000 * After this function completes, packets will be accepted from the 1001 * hardware ports. PKO should still be disabled to make sure packets 1002 * aren't sent out partially setup hardware. 1003 * 1004 * @param interface Interface to enable 1005 * 1006 * @return Zero on success, negative on failure 1007 */ 1008static int __cvmx_helper_packet_hardware_enable(int interface) 1009{ 1010 int result = 0; 1011 switch (cvmx_helper_interface_get_mode(interface)) 1012 { 1013 /* These types don't support ports to IPD/PKO */ 1014 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1015 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1016 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1017 /* Nothing to do */ 1018 break; 1019 /* XAUI is a single high speed port */ 1020 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1021 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1022 result = __cvmx_helper_xaui_enable(interface); 1023 break; 1024 /* RGMII/GMII/MII are all treated about the same. Most functions 1025 refer to these ports as RGMII */ 1026 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1027 case CVMX_HELPER_INTERFACE_MODE_GMII: 1028 result = __cvmx_helper_rgmii_enable(interface); 1029 break; 1030 /* SPI4 can have 1-16 ports depending on the device at the other end */ 1031 case CVMX_HELPER_INTERFACE_MODE_SPI: 1032 result = __cvmx_helper_spi_enable(interface); 1033 break; 1034 /* SGMII can have 1-4 ports depending on how many are hooked up */ 1035 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1036 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1037 result = __cvmx_helper_sgmii_enable(interface); 1038 break; 1039 /* PCI target Network Packet Interface */ 1040 case CVMX_HELPER_INTERFACE_MODE_NPI: 1041 result = __cvmx_helper_npi_enable(interface); 1042 break; 1043 /* SRIO has 2^N ports, where N is number of interfaces */ 1044 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1045 result = __cvmx_helper_srio_enable(interface); 1046 break; 1047 case CVMX_HELPER_INTERFACE_MODE_ILK: 1048 result = __cvmx_helper_ilk_enable(interface); 1049 break; 1050 } 1051 result |= __cvmx_helper_board_hardware_enable(interface); 1052 return result; 1053} 1054 1055 1056/** 1057 * Called after all internal packet IO paths are setup. This 1058 * function enables IPD/PIP and begins packet input and output. 1059 * 1060 * @return Zero on success, negative on failure 1061 */ 1062int cvmx_helper_ipd_and_packet_input_enable(void) 1063{ 1064 int num_interfaces; 1065 int interface; 1066 1067 /* Enable IPD */ 1068 cvmx_ipd_enable(); 1069 1070 /* Time to enable hardware ports packet input and output. Note that at this 1071 point IPD/PIP must be fully functional and PKO must be disabled */ 1072 num_interfaces = cvmx_helper_get_number_of_interfaces(); 1073 for (interface=0; interface<num_interfaces; interface++) 1074 { 1075 if (cvmx_helper_ports_on_interface(interface) > 0) 1076 { 1077 //cvmx_dprintf("Enabling packet I/O on interface %d\n", interface); 1078 __cvmx_helper_packet_hardware_enable(interface); 1079 } 1080 } 1081 1082 /* Finally enable PKO now that the entire path is up and running */ 1083 cvmx_pko_enable(); 1084 1085 if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1) || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1)) && 1086 (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)) 1087 __cvmx_helper_errata_fix_ipd_ptr_alignment(); 1088 return 0; 1089} 1090#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1091EXPORT_SYMBOL(cvmx_helper_ipd_and_packet_input_enable); 1092#endif 1093 1094#define __CVMX_SSO_RWQ_SIZE 256 1095 1096int cvmx_helper_initialize_sso(int wqe_entries) 1097{ 1098 int cvm_oct_sso_number_rwq_bufs; 1099 char *mem; 1100 int i; 1101 cvmx_sso_cfg_t sso_cfg; 1102 cvmx_fpa_fpfx_marks_t fpa_marks; 1103 1104 if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) 1105 return 0; 1106 1107 /* 1108 * CN68XX-P1 may reset with the wrong values, put in 1109 * the correct values. 1110 */ 1111 fpa_marks.u64 = 0; 1112 fpa_marks.s.fpf_wr = 0xa4; 1113 fpa_marks.s.fpf_rd = 0x40; 1114 cvmx_write_csr(CVMX_FPA_FPF8_MARKS, fpa_marks.u64); 1115 1116 cvm_oct_sso_number_rwq_bufs = ((wqe_entries - 1) / 26) + 1 + 48 + 8; 1117 1118 mem = cvmx_bootmem_alloc(__CVMX_SSO_RWQ_SIZE * cvm_oct_sso_number_rwq_bufs, CVMX_CACHE_LINE_SIZE); 1119 if (mem == NULL) { 1120 cvmx_dprintf("Out of memory initializing sso pool\n"); 1121 return -1; 1122 } 1123 /* Make sure RWI/RWO is disabled. */ 1124 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG); 1125 sso_cfg.s.rwen = 0; 1126 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64); 1127 1128 for (i = cvm_oct_sso_number_rwq_bufs - 8; i > 0; i--) { 1129 cvmx_sso_rwq_psh_fptr_t fptr; 1130 1131 for (;;) { 1132 fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR); 1133 if (!fptr.s.full) 1134 break; 1135 cvmx_wait(1000); 1136 } 1137 fptr.s.fptr = cvmx_ptr_to_phys(mem) >> 7; 1138 cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64); 1139 mem = mem + __CVMX_SSO_RWQ_SIZE; 1140 } 1141 1142 for (i = 0; i < 8; i++) { 1143 cvmx_sso_rwq_head_ptrx_t head_ptr; 1144 cvmx_sso_rwq_tail_ptrx_t tail_ptr; 1145 1146 head_ptr.u64 = 0; 1147 tail_ptr.u64 = 0; 1148 head_ptr.s.ptr = cvmx_ptr_to_phys(mem) >> 7; 1149 tail_ptr.s.ptr = head_ptr.s.ptr; 1150 cvmx_write_csr(CVMX_SSO_RWQ_HEAD_PTRX(i), head_ptr.u64); 1151 cvmx_write_csr(CVMX_SSO_RWQ_TAIL_PTRX(i), tail_ptr.u64); 1152 mem = mem + __CVMX_SSO_RWQ_SIZE; 1153 } 1154 1155 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG); 1156 sso_cfg.s.rwen = 1; 1157 sso_cfg.s.dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB); 1158 sso_cfg.s.rwq_byp_dis = 0; 1159 sso_cfg.s.rwio_byp_dis = 0; 1160 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64); 1161 1162 return 0; 1163} 1164 1165int cvmx_helper_uninitialize_sso(void) 1166{ 1167 cvmx_fpa_quex_available_t queue_available; 1168 cvmx_sso_cfg_t sso_cfg; 1169 cvmx_sso_rwq_pop_fptr_t pop_fptr; 1170 cvmx_sso_rwq_psh_fptr_t fptr; 1171 cvmx_sso_fpage_cnt_t fpage_cnt; 1172 int num_to_transfer, i; 1173 char *mem; 1174 1175 if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) 1176 return 0; 1177 1178 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG); 1179 sso_cfg.s.rwen = 0; 1180 sso_cfg.s.rwq_byp_dis = 1; 1181 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64); 1182 cvmx_read_csr(CVMX_SSO_CFG); 1183 queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8)); 1184 1185 /* Make CVMX_FPA_QUEX_AVAILABLE(8) % 16 == 0*/ 1186 for (num_to_transfer = (16 - queue_available.s.que_siz) % 16; 1187 num_to_transfer > 0; num_to_transfer--) { 1188 do { 1189 pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR); 1190 } while (!pop_fptr.s.val); 1191 for (;;) { 1192 fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR); 1193 if (!fptr.s.full) 1194 break; 1195 cvmx_wait(1000); 1196 } 1197 fptr.s.fptr = pop_fptr.s.fptr; 1198 cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64); 1199 } 1200 cvmx_read_csr(CVMX_SSO_CFG); 1201 1202 do { 1203 queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8)); 1204 } while (queue_available.s.que_siz % 16); 1205 1206 sso_cfg.s.rwen = 1; 1207 sso_cfg.s.rwq_byp_dis = 0; 1208 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64); 1209 1210 for (i = 0; i < 8; i++) { 1211 cvmx_sso_rwq_head_ptrx_t head_ptr; 1212 cvmx_sso_rwq_tail_ptrx_t tail_ptr; 1213 1214 head_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_HEAD_PTRX(i)); 1215 tail_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_TAIL_PTRX(i)); 1216 if (head_ptr.s.ptr != tail_ptr.s.ptr) { 1217 cvmx_dprintf("head_ptr.s.ptr != tail_ptr.s.ptr, idx: %d\n", i); 1218 } 1219 1220 mem = cvmx_phys_to_ptr(((uint64_t)head_ptr.s.ptr) << 7); 1221 /* Leak the memory */ 1222 } 1223 1224 do { 1225 do { 1226 pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR); 1227 if (pop_fptr.s.val) { 1228 mem = cvmx_phys_to_ptr(((uint64_t)pop_fptr.s.fptr) << 7); 1229 /* Leak the memory */ 1230 } 1231 } while (pop_fptr.s.val); 1232 fpage_cnt.u64 = cvmx_read_csr(CVMX_SSO_FPAGE_CNT); 1233 } while (fpage_cnt.s.fpage_cnt); 1234 1235 sso_cfg.s.rwen = 0; 1236 sso_cfg.s.rwq_byp_dis = 0; 1237 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64); 1238 1239 return 0; 1240} 1241 1242/** 1243 * Initialize the PIP, IPD, and PKO hardware to support 1244 * simple priority based queues for the ethernet ports. Each 1245 * port is configured with a number of priority queues based 1246 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower 1247 * priority than the previous. 1248 * 1249 * @return Zero on success, non-zero on failure 1250 */ 1251int cvmx_helper_initialize_packet_io_global(void) 1252{ 1253 int result = 0; 1254 int interface; 1255 cvmx_l2c_cfg_t l2c_cfg; 1256 cvmx_smix_en_t smix_en; 1257 const int num_interfaces = cvmx_helper_get_number_of_interfaces(); 1258 1259 /* CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to be disabled */ 1260 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) 1261 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1); 1262 1263 /* Tell L2 to give the IOB statically higher priority compared to the 1264 cores. This avoids conditions where IO blocks might be starved under 1265 very high L2 loads */ 1266 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) 1267 { 1268 cvmx_l2c_ctl_t l2c_ctl; 1269 l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL); 1270 l2c_ctl.s.rsp_arb_mode = 1; 1271 l2c_ctl.s.xmc_arb_mode = 0; 1272 cvmx_write_csr(CVMX_L2C_CTL, l2c_ctl.u64); 1273 } 1274 else 1275 { 1276 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); 1277 l2c_cfg.s.lrf_arb_mode = 0; 1278 l2c_cfg.s.rfb_arb_mode = 0; 1279 cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64); 1280 } 1281 1282 if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) 1283 { 1284 int smi_inf = 1; 1285 int i; 1286 1287 /* Newer chips have more than one SMI/MDIO interface */ 1288 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1289 smi_inf = 4; 1290 else if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) 1291 && !OCTEON_IS_MODEL(OCTEON_CN58XX) 1292 && !OCTEON_IS_MODEL(OCTEON_CN50XX)) 1293 smi_inf = 2; 1294 1295 for (i = 0; i < smi_inf; i++) 1296 { 1297 /* Make sure SMI/MDIO is enabled so we can query PHYs */ 1298 smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(i)); 1299 if (!smix_en.s.en) 1300 { 1301 smix_en.s.en = 1; 1302 cvmx_write_csr(CVMX_SMIX_EN(i), smix_en.u64); 1303 } 1304 } 1305 } 1306 1307 __cvmx_helper_cfg_init(); 1308 1309 for (interface=0; interface<num_interfaces; interface++) 1310 result |= cvmx_helper_interface_probe(interface); 1311 1312 cvmx_pko_initialize_global(); 1313 for (interface=0; interface<num_interfaces; interface++) 1314 { 1315 if (cvmx_helper_ports_on_interface(interface) > 0) 1316 cvmx_dprintf("Interface %d has %d ports (%s)\n", 1317 interface, cvmx_helper_ports_on_interface(interface), 1318 cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(interface))); 1319 result |= __cvmx_helper_interface_setup_ipd(interface); 1320 if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) 1321 result |= __cvmx_helper_interface_setup_pko(interface); 1322 } 1323 1324 result |= __cvmx_helper_global_setup_ipd(); 1325 result |= __cvmx_helper_global_setup_pko(); 1326 1327 /* Enable any flow control and backpressure */ 1328 result |= __cvmx_helper_global_setup_backpressure(); 1329 1330#if CVMX_HELPER_ENABLE_IPD 1331 result |= cvmx_helper_ipd_and_packet_input_enable(); 1332#endif 1333 return result; 1334} 1335#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1336EXPORT_SYMBOL(cvmx_helper_initialize_packet_io_global); 1337#endif 1338 1339 1340/** 1341 * Does core local initialization for packet io 1342 * 1343 * @return Zero on success, non-zero on failure 1344 */ 1345int cvmx_helper_initialize_packet_io_local(void) 1346{ 1347 return cvmx_pko_initialize_local(); 1348} 1349 1350/** 1351 * wait for the pko queue to drain 1352 * 1353 * @param queue a valid pko queue 1354 * @return count is the length of the queue after calling this 1355 * function 1356 */ 1357static int cvmx_helper_wait_pko_queue_drain(int queue) 1358{ 1359 const int timeout = 5; /* Wait up to 5 seconds for timeouts */ 1360 int count; 1361 uint64_t start_cycle, stop_cycle; 1362 1363 count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue)); 1364 start_cycle = cvmx_get_cycle(); 1365 stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout; 1366 while (count && (cvmx_get_cycle() < stop_cycle)) 1367 { 1368 cvmx_wait(10000); 1369 count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue)); 1370 } 1371 1372 return count; 1373} 1374 1375struct cvmx_buffer_list { 1376 struct cvmx_buffer_list *next; 1377}; 1378 1379/** 1380 * Undo the initialization performed in 1381 * cvmx_helper_initialize_packet_io_global(). After calling this routine and the 1382 * local version on each core, packet IO for Octeon will be disabled and placed 1383 * in the initial reset state. It will then be safe to call the initialize 1384 * later on. Note that this routine does not empty the FPA pools. It frees all 1385 * buffers used by the packet IO hardware to the FPA so a function emptying the 1386 * FPA after shutdown should find all packet buffers in the FPA. 1387 * 1388 * @return Zero on success, negative on failure. 1389 */ 1390int cvmx_helper_shutdown_packet_io_global(void) 1391{ 1392 const int timeout = 5; /* Wait up to 5 seconds for timeouts */ 1393 int result = 0; 1394 int num_interfaces; 1395 int interface; 1396 int num_ports; 1397 int index; 1398 struct cvmx_buffer_list *pool0_buffers; 1399 struct cvmx_buffer_list *pool0_buffers_tail; 1400 cvmx_wqe_t *work; 1401 1402 /* Step 1: Disable all backpressure */ 1403 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++) 1404 if (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_DISABLED) 1405 cvmx_gmx_set_backpressure_override(interface, 0xf); 1406 1407step2: 1408 /* Step 2: Wait for the PKO queues to drain */ 1409 if (octeon_has_feature(OCTEON_FEATURE_PKND)) 1410 { 1411 int queue, max_queue; 1412 1413 max_queue = __cvmx_helper_cfg_pko_max_queue(); 1414 for (queue = 0; queue < max_queue; queue++) 1415 { 1416 if (cvmx_helper_wait_pko_queue_drain(queue)) 1417 { 1418 result = -1; 1419 goto step3; 1420 } 1421 } 1422 } 1423 else 1424 { 1425 num_interfaces = cvmx_helper_get_number_of_interfaces(); 1426 for (interface=0; interface<num_interfaces; interface++) 1427 { 1428 num_ports = cvmx_helper_ports_on_interface(interface); 1429 for (index=0; index<num_ports; index++) 1430 { 1431 int pko_port = cvmx_helper_get_ipd_port(interface, index); 1432 int queue = cvmx_pko_get_base_queue(pko_port); 1433 int max_queue = queue + cvmx_pko_get_num_queues(pko_port); 1434 while (queue < max_queue) 1435 { 1436 if (cvmx_helper_wait_pko_queue_drain(queue)) 1437 { 1438 result = -1; 1439 goto step3; 1440 } 1441 queue++; 1442 } 1443 } 1444 } 1445 } 1446 1447step3: 1448 /* Step 3: Disable TX and RX on all ports */ 1449 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++) 1450 { 1451 switch (cvmx_helper_interface_get_mode(interface)) 1452 { 1453 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1454 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1455 /* Not a packet interface */ 1456 break; 1457 case CVMX_HELPER_INTERFACE_MODE_NPI: 1458 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1459 case CVMX_HELPER_INTERFACE_MODE_ILK: 1460 /* We don't handle the NPI/NPEI/SRIO packet engines. The caller 1461 must know these are idle */ 1462 break; 1463 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1464 /* Nothing needed. Once PKO is idle, the loopback devices 1465 must be idle */ 1466 break; 1467 case CVMX_HELPER_INTERFACE_MODE_SPI: 1468 /* SPI cannot be disabled from Octeon. It is the responsibility 1469 of the caller to make sure SPI is idle before doing 1470 shutdown */ 1471 /* Fall through and do the same processing as RGMII/GMII */ 1472 case CVMX_HELPER_INTERFACE_MODE_GMII: 1473 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1474 /* Disable outermost RX at the ASX block */ 1475 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), 0); 1476 num_ports = cvmx_helper_ports_on_interface(interface); 1477 if (num_ports > 4) 1478 num_ports = 4; 1479 for (index=0; index<num_ports; index++) 1480 { 1481 cvmx_gmxx_prtx_cfg_t gmx_cfg; 1482 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 1483 gmx_cfg.s.en = 0; 1484 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 1485 /* Poll the GMX state machine waiting for it to become idle */ 1486 cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880); 1487 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, timeout*1000000)) 1488 { 1489 cvmx_dprintf("GMX RX path timeout waiting for idle\n"); 1490 result = -1; 1491 } 1492 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, timeout*1000000)) 1493 { 1494 cvmx_dprintf("GMX TX path timeout waiting for idle\n"); 1495 result = -1; 1496 } 1497 } 1498 /* Disable outermost TX at the ASX block */ 1499 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), 0); 1500 /* Disable interrupts for interface */ 1501 cvmx_write_csr(CVMX_ASXX_INT_EN(interface), 0); 1502 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0); 1503 break; 1504 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1505 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1506 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1507 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1508 num_ports = cvmx_helper_ports_on_interface(interface); 1509 if (num_ports > 4) 1510 num_ports = 4; 1511 for (index=0; index<num_ports; index++) 1512 { 1513 cvmx_gmxx_prtx_cfg_t gmx_cfg; 1514 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 1515 gmx_cfg.s.en = 0; 1516 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 1517 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, timeout*1000000)) 1518 { 1519 cvmx_dprintf("GMX RX path timeout waiting for idle\n"); 1520 result = -1; 1521 } 1522 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, timeout*1000000)) 1523 { 1524 cvmx_dprintf("GMX TX path timeout waiting for idle\n"); 1525 result = -1; 1526 } 1527 } 1528 break; 1529 } 1530 } 1531 1532 /* Step 4: Retrieve all packets from the POW and free them */ 1533 while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) 1534 { 1535 cvmx_helper_free_packet_data(work); 1536 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0); 1537 } 1538 1539 /* Step 4b: Special workaround for pass 2 errata */ 1540 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) 1541 { 1542 cvmx_ipd_ptr_count_t ipd_cnt; 1543 int to_add; 1544 ipd_cnt.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT); 1545 to_add = (ipd_cnt.s.wqev_cnt + ipd_cnt.s.wqe_pcnt) & 0x7; 1546 if (to_add) 1547 { 1548 int port = -1; 1549 cvmx_dprintf("Aligning CN38XX pass 2 IPD counters\n"); 1550 if (cvmx_helper_interface_get_mode(0) == CVMX_HELPER_INTERFACE_MODE_RGMII) 1551 port = 0; 1552 else if (cvmx_helper_interface_get_mode(1) == CVMX_HELPER_INTERFACE_MODE_RGMII) 1553 port = 16; 1554 1555 if (port != -1) 1556 { 1557 char *buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); 1558 if (buffer) 1559 { 1560 int queue = cvmx_pko_get_base_queue(port); 1561 cvmx_pko_command_word0_t pko_command; 1562 cvmx_buf_ptr_t packet; 1563 uint64_t start_cycle; 1564 uint64_t stop_cycle; 1565 1566 /* Populate a minimal packet */ 1567 memset(buffer, 0xff, 6); 1568 memset(buffer+6, 0, 54); 1569 pko_command.u64 = 0; 1570 pko_command.s.dontfree = 1; 1571 pko_command.s.total_bytes = 60; 1572 pko_command.s.segs = 1; 1573 packet.u64 = 0; 1574 packet.s.addr = cvmx_ptr_to_phys(buffer); 1575 packet.s.size = CVMX_FPA_PACKET_POOL_SIZE; 1576 __cvmx_helper_rgmii_configure_loopback(port, 1, 0); 1577 while (to_add--) 1578 { 1579 cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE); 1580 if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet, CVMX_PKO_LOCK_CMD_QUEUE)) 1581 { 1582 cvmx_dprintf("ERROR: Unable to align IPD counters (PKO failed)\n"); 1583 break; 1584 } 1585 } 1586 cvmx_fpa_free(buffer, CVMX_FPA_PACKET_POOL, 0); 1587 1588 /* Wait for the packets to loop back */ 1589 start_cycle = cvmx_get_cycle(); 1590 stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout; 1591 while (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue)) && 1592 (cvmx_get_cycle() < stop_cycle)) 1593 { 1594 cvmx_wait(1000); 1595 } 1596 cvmx_wait(1000); 1597 __cvmx_helper_rgmii_configure_loopback(port, 0, 0); 1598 if (to_add == -1) 1599 goto step2; 1600 } 1601 else 1602 cvmx_dprintf("ERROR: Unable to align IPD counters (Packet pool empty)\n"); 1603 } 1604 else 1605 cvmx_dprintf("ERROR: Unable to align IPD counters\n"); 1606 } 1607 } 1608 1609 /* Step 5 */ 1610 cvmx_ipd_disable(); 1611 1612 /* Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP 1613 have not been reset yet */ 1614 __cvmx_ipd_free_ptr(); 1615 1616 /* Step 7: Free the PKO command buffers and put PKO in reset */ 1617 cvmx_pko_shutdown(); 1618 1619 /* Step 8: Disable MAC address filtering */ 1620 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++) 1621 { 1622 switch (cvmx_helper_interface_get_mode(interface)) 1623 { 1624 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1625 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1626 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1627 case CVMX_HELPER_INTERFACE_MODE_ILK: 1628 case CVMX_HELPER_INTERFACE_MODE_NPI: 1629 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1630 break; 1631 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1632 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1633 case CVMX_HELPER_INTERFACE_MODE_GMII: 1634 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1635 case CVMX_HELPER_INTERFACE_MODE_SPI: 1636 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1637 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1638 num_ports = cvmx_helper_ports_on_interface(interface); 1639 if (num_ports > 4) 1640 num_ports = 4; 1641 for (index=0; index<num_ports; index++) 1642 { 1643 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1); 1644 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0); 1645 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0); 1646 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0); 1647 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0); 1648 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0); 1649 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0); 1650 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0); 1651 } 1652 break; 1653 } 1654 } 1655 1656 /* Step 9: Drain all FPA buffers out of pool 0 before we reset 1657 * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in 1658 * sync. We temporarily keep the buffers in the pool0_buffers 1659 * list. 1660 */ 1661 pool0_buffers = NULL; 1662 pool0_buffers_tail = NULL; 1663 while (1) 1664 { 1665 struct cvmx_buffer_list *buffer = cvmx_fpa_alloc(0); 1666 if (buffer) { 1667 buffer->next = NULL; 1668 1669 if (pool0_buffers == NULL) 1670 pool0_buffers = buffer; 1671 else 1672 pool0_buffers_tail->next = buffer; 1673 1674 pool0_buffers_tail = buffer; 1675 } 1676 else 1677 break; 1678 } 1679 1680 /* Step 10: Reset IPD and PIP */ 1681 { 1682 cvmx_ipd_ctl_status_t ipd_ctl_status; 1683 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 1684 ipd_ctl_status.s.reset = 1; 1685 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64); 1686 1687 if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && 1688 (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 1689 { 1690 /* only try 1000 times. Normally if this works it will happen in 1691 ** the first 50 loops. */ 1692 int max_loops = 1000; 1693 int loop = 0; 1694 /* Per port backpressure counters can get misaligned after an 1695 IPD reset. This code realigns them by performing repeated 1696 resets. See IPD-13473 */ 1697 cvmx_wait(100); 1698 if (__cvmx_helper_backpressure_is_misaligned()) 1699 { 1700 cvmx_dprintf("Starting to align per port backpressure counters.\n"); 1701 while (__cvmx_helper_backpressure_is_misaligned() && (loop++ < max_loops)) 1702 { 1703 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64); 1704 cvmx_wait(123); 1705 } 1706 if (loop < max_loops) 1707 cvmx_dprintf("Completed aligning per port backpressure counters (%d loops).\n", loop); 1708 else 1709 { 1710 cvmx_dprintf("ERROR: unable to align per port backpressure counters.\n"); 1711 /* For now, don't hang.... */ 1712 } 1713 } 1714 } 1715 1716 /* PIP_SFT_RST not present in CN38XXp{1,2} */ 1717 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) 1718 { 1719 cvmx_pip_sft_rst_t pip_sft_rst; 1720 pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST); 1721 pip_sft_rst.s.rst = 1; 1722 cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64); 1723 } 1724 1725 /* Make sure IPD has finished reset. */ 1726 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) 1727 { 1728 if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, cvmx_ipd_ctl_status_t, rst_done, ==, 0, 1000)) 1729 { 1730 cvmx_dprintf("IPD reset timeout waiting for idle\n"); 1731 result = -1; 1732 } 1733 } 1734 } 1735 1736 /* Step 11: Restore the FPA buffers into pool 0 */ 1737 while (pool0_buffers) { 1738 struct cvmx_buffer_list *n = pool0_buffers->next; 1739 cvmx_fpa_free(pool0_buffers, 0, 0); 1740 pool0_buffers = n; 1741 } 1742 1743 /* Step 12: Release interface structures */ 1744 __cvmx_helper_shutdown_interfaces(); 1745 1746 return result; 1747} 1748#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1749EXPORT_SYMBOL(cvmx_helper_shutdown_packet_io_global); 1750#endif 1751 1752 1753/** 1754 * Does core local shutdown of packet io 1755 * 1756 * @return Zero on success, non-zero on failure 1757 */ 1758int cvmx_helper_shutdown_packet_io_local(void) 1759{ 1760 /* Currently there is nothing to do per core. This may change in 1761 the future */ 1762 return 0; 1763} 1764 1765 1766 1767/** 1768 * Auto configure an IPD/PKO port link state and speed. This 1769 * function basically does the equivalent of: 1770 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port)); 1771 * 1772 * @param ipd_port IPD/PKO port to auto configure 1773 * 1774 * @return Link state after configure 1775 */ 1776cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port) 1777{ 1778 cvmx_helper_link_info_t link_info; 1779 int interface = cvmx_helper_get_interface_num(ipd_port); 1780 int index = cvmx_helper_get_interface_index_num(ipd_port); 1781 1782 if (index >= cvmx_helper_ports_on_interface(interface)) 1783 { 1784 link_info.u64 = 0; 1785 return link_info; 1786 } 1787 1788 link_info = cvmx_helper_link_get(ipd_port); 1789 if (link_info.u64 == (__cvmx_helper_get_link_info(interface, index)).u64) 1790 return link_info; 1791 1792#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) 1793 if (!link_info.s.link_up) 1794 cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port); 1795#endif 1796 1797 /* If we fail to set the link speed, port_link_info will not change */ 1798 cvmx_helper_link_set(ipd_port, link_info); 1799 1800#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) 1801 if (link_info.s.link_up) 1802 cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port); 1803#endif 1804 1805 return link_info; 1806} 1807#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1808EXPORT_SYMBOL(cvmx_helper_link_autoconf); 1809#endif 1810 1811/** 1812 * Return the link state of an IPD/PKO port as returned by 1813 * auto negotiation. The result of this function may not match 1814 * Octeon's link config if auto negotiation has changed since 1815 * the last call to cvmx_helper_link_set(). 1816 * 1817 * @param ipd_port IPD/PKO port to query 1818 * 1819 * @return Link state 1820 */ 1821cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) 1822{ 1823 cvmx_helper_link_info_t result; 1824 int interface = cvmx_helper_get_interface_num(ipd_port); 1825 int index = cvmx_helper_get_interface_index_num(ipd_port); 1826 1827 /* The default result will be a down link unless the code below 1828 changes it */ 1829 result.u64 = 0; 1830 1831 if (index >= cvmx_helper_ports_on_interface(interface)) 1832 return result; 1833 1834 switch (cvmx_helper_interface_get_mode(interface)) 1835 { 1836 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1837 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1838 /* Network links are not supported */ 1839 break; 1840 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1841 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1842 result = __cvmx_helper_xaui_link_get(ipd_port); 1843 break; 1844 case CVMX_HELPER_INTERFACE_MODE_GMII: 1845 if (index == 0) 1846 result = __cvmx_helper_rgmii_link_get(ipd_port); 1847 else 1848 { 1849 result.s.full_duplex = 1; 1850 result.s.link_up = 1; 1851 result.s.speed = 1000; 1852 } 1853 break; 1854 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1855 result = __cvmx_helper_rgmii_link_get(ipd_port); 1856 break; 1857 case CVMX_HELPER_INTERFACE_MODE_SPI: 1858 result = __cvmx_helper_spi_link_get(ipd_port); 1859 break; 1860 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1861 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1862 result = __cvmx_helper_sgmii_link_get(ipd_port); 1863 break; 1864 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1865 result = __cvmx_helper_srio_link_get(ipd_port); 1866 break; 1867 case CVMX_HELPER_INTERFACE_MODE_ILK: 1868 result = __cvmx_helper_ilk_link_get(ipd_port); 1869 break; 1870 case CVMX_HELPER_INTERFACE_MODE_NPI: 1871 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1872 /* Network links are not supported */ 1873 break; 1874 } 1875 return result; 1876} 1877#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1878EXPORT_SYMBOL(cvmx_helper_link_get); 1879#endif 1880 1881 1882/** 1883 * Configure an IPD/PKO port for the specified link state. This 1884 * function does not influence auto negotiation at the PHY level. 1885 * The passed link state must always match the link state returned 1886 * by cvmx_helper_link_get(). It is normally best to use 1887 * cvmx_helper_link_autoconf() instead. 1888 * 1889 * @param ipd_port IPD/PKO port to configure 1890 * @param link_info The new link state 1891 * 1892 * @return Zero on success, negative on failure 1893 */ 1894int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info) 1895{ 1896 int result = -1; 1897 int interface = cvmx_helper_get_interface_num(ipd_port); 1898 int index = cvmx_helper_get_interface_index_num(ipd_port); 1899 1900 if (index >= cvmx_helper_ports_on_interface(interface)) 1901 return -1; 1902 1903 switch (cvmx_helper_interface_get_mode(interface)) 1904 { 1905 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1906 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1907 break; 1908 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1909 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1910 result = __cvmx_helper_xaui_link_set(ipd_port, link_info); 1911 break; 1912 /* RGMII/GMII/MII are all treated about the same. Most functions 1913 refer to these ports as RGMII */ 1914 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1915 case CVMX_HELPER_INTERFACE_MODE_GMII: 1916 result = __cvmx_helper_rgmii_link_set(ipd_port, link_info); 1917 break; 1918 case CVMX_HELPER_INTERFACE_MODE_SPI: 1919 result = __cvmx_helper_spi_link_set(ipd_port, link_info); 1920 break; 1921 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1922 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1923 result = __cvmx_helper_sgmii_link_set(ipd_port, link_info); 1924 break; 1925 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1926 result = __cvmx_helper_srio_link_set(ipd_port, link_info); 1927 break; 1928 case CVMX_HELPER_INTERFACE_MODE_ILK: 1929 result = __cvmx_helper_ilk_link_set(ipd_port, link_info); 1930 break; 1931 case CVMX_HELPER_INTERFACE_MODE_NPI: 1932 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1933 break; 1934 } 1935 /* Set the port_link_info here so that the link status is updated 1936 no matter how cvmx_helper_link_set is called. We don't change 1937 the value if link_set failed */ 1938 if (result == 0) 1939 __cvmx_helper_set_link_info(interface, index, link_info); 1940 return result; 1941} 1942#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 1943EXPORT_SYMBOL(cvmx_helper_link_set); 1944#endif 1945 1946 1947/** 1948 * Configure a port for internal and/or external loopback. Internal loopback 1949 * causes packets sent by the port to be received by Octeon. External loopback 1950 * causes packets received from the wire to sent out again. 1951 * 1952 * @param ipd_port IPD/PKO port to loopback. 1953 * @param enable_internal 1954 * Non zero if you want internal loopback 1955 * @param enable_external 1956 * Non zero if you want external loopback 1957 * 1958 * @return Zero on success, negative on failure. 1959 */ 1960int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external) 1961{ 1962 int result = -1; 1963 int interface = cvmx_helper_get_interface_num(ipd_port); 1964 int index = cvmx_helper_get_interface_index_num(ipd_port); 1965 1966 if (index >= cvmx_helper_ports_on_interface(interface)) 1967 return -1; 1968 1969 switch (cvmx_helper_interface_get_mode(interface)) 1970 { 1971 case CVMX_HELPER_INTERFACE_MODE_DISABLED: 1972 case CVMX_HELPER_INTERFACE_MODE_PCIE: 1973 case CVMX_HELPER_INTERFACE_MODE_SRIO: 1974 case CVMX_HELPER_INTERFACE_MODE_ILK: 1975 case CVMX_HELPER_INTERFACE_MODE_SPI: 1976 case CVMX_HELPER_INTERFACE_MODE_NPI: 1977 case CVMX_HELPER_INTERFACE_MODE_LOOP: 1978 break; 1979 case CVMX_HELPER_INTERFACE_MODE_XAUI: 1980 case CVMX_HELPER_INTERFACE_MODE_RXAUI: 1981 result = __cvmx_helper_xaui_configure_loopback(ipd_port, enable_internal, enable_external); 1982 break; 1983 case CVMX_HELPER_INTERFACE_MODE_RGMII: 1984 case CVMX_HELPER_INTERFACE_MODE_GMII: 1985 result = __cvmx_helper_rgmii_configure_loopback(ipd_port, enable_internal, enable_external); 1986 break; 1987 case CVMX_HELPER_INTERFACE_MODE_SGMII: 1988 case CVMX_HELPER_INTERFACE_MODE_PICMG: 1989 result = __cvmx_helper_sgmii_configure_loopback(ipd_port, enable_internal, enable_external); 1990 break; 1991 } 1992 return result; 1993} 1994 1995#endif /* CVMX_ENABLE_PKO_FUNCTIONS */ 1996