1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2018-2022 Marvell International Ltd. 4 * 5 */ 6 7#ifndef __CVMX_PKO3_H__ 8#define __CVMX_PKO3_H__ 9 10DECLARE_GLOBAL_DATA_PTR; 11 12/* Use full LMTDMA when PARAMETER_CHECKINS is enabled */ 13#undef CVMX_ENABLE_PARAMETER_CHECKING 14#define CVMX_ENABLE_PARAMETER_CHECKING 0 15 16/* 17 * CVMSEG, scratch line for LMTDMA/LMTST operations: 18 * 1. It should differ from other CVMSEG uses, e.g. IOBDMA, 19 * 2. It must agree with the setting of CvmCtl[LMTLINE] control register. 20 * Contains 16 words, words 1-15 are cleared when word 0 is written to. 21 */ 22#define CVMX_PKO_LMTLINE 2ull 23 24/* PKO3 queue level identifier */ 25enum cvmx_pko3_level_e { 26 CVMX_PKO_LEVEL_INVAL = 0, 27 CVMX_PKO_PORT_QUEUES = 0xd1, 28 CVMX_PKO_L2_QUEUES = 0xc2, 29 CVMX_PKO_L3_QUEUES = 0xb3, 30 CVMX_PKO_L4_QUEUES = 0xa4, 31 CVMX_PKO_L5_QUEUES = 0x95, 32 CVMX_PKO_DESCR_QUEUES = 0x86, 33}; 34 35enum cvmx_pko_dqop { 36 CVMX_PKO_DQ_SEND = 0ULL, 37 CVMX_PKO_DQ_OPEN = 1ULL, 38 CVMX_PKO_DQ_CLOSE = 2ULL, 39 CVMX_PKO_DQ_QUERY = 3ULL 40}; 41 42/** 43 * Returns the PKO DQ..L2 Shaper Time-Wheel clock rate for specified node. 44 */ 45static inline u64 cvmx_pko3_dq_tw_clock_rate_node(int node) 46{ 47 return gd->bus_clk / 768; 48} 49 50/** 51 * Returns the PKO Port Shaper Time-Wheel clock rate for specified node. 52 */ 53static inline u64 cvmx_pko3_pq_tw_clock_rate_node(int node) 54{ 55 int div; 56 57 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) 58 div = 96; 59 else 60 div = 48; 61 return gd->bus_clk / div; 62} 63 64/** 65 * @INTERNAL 66 * Return the number of MACs in the PKO (exclusing the NULL MAC) 67 * in a model-dependent manner. 68 */ 69static inline unsigned int __cvmx_pko3_num_macs(void) 70{ 71 if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) 72 return 10; 73 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) 74 return 14; 75 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) 76 return 28; 77 return 0; 78} 79 80/** 81 * @INTERNAL 82 * Return the number of queue levels, depending on SoC model 83 */ 84static inline int __cvmx_pko3_sq_lvl_max(void) 85{ 86 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) 87 return CVMX_PKO_L3_QUEUES; 88 if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) 89 return CVMX_PKO_L3_QUEUES; 90 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) 91 return CVMX_PKO_L5_QUEUES; 92 return -1; 93} 94 95/** 96 * @INTERNAL 97 * Return the next (lower) queue level for a given level 98 */ 99static inline enum cvmx_pko3_level_e 100__cvmx_pko3_sq_lvl_next(enum cvmx_pko3_level_e level) 101{ 102 switch (level) { 103 default: 104 return CVMX_PKO_LEVEL_INVAL; 105 case CVMX_PKO_PORT_QUEUES: 106 return CVMX_PKO_L2_QUEUES; 107 case CVMX_PKO_L2_QUEUES: 108 return CVMX_PKO_L3_QUEUES; 109 case CVMX_PKO_L3_QUEUES: 110 if (OCTEON_IS_MODEL(OCTEON_CN73XX) || 111 OCTEON_IS_MODEL(OCTEON_CNF75XX)) 112 return CVMX_PKO_DESCR_QUEUES; 113 return CVMX_PKO_L4_QUEUES; 114 case CVMX_PKO_L4_QUEUES: 115 if (OCTEON_IS_MODEL(OCTEON_CN73XX) || 116 OCTEON_IS_MODEL(OCTEON_CNF75XX)) 117 return CVMX_PKO_LEVEL_INVAL; 118 return CVMX_PKO_L5_QUEUES; 119 case CVMX_PKO_L5_QUEUES: 120 if (OCTEON_IS_MODEL(OCTEON_CN73XX) || 121 OCTEON_IS_MODEL(OCTEON_CNF75XX)) 122 return CVMX_PKO_LEVEL_INVAL; 123 return CVMX_PKO_DESCR_QUEUES; 124 } 125} 126 127/** 128 * @INTERNAL 129 * Return an SQ identifier string, for debug messages. 130 */ 131static inline char *__cvmx_pko3_sq_str(char *buf, enum cvmx_pko3_level_e level, 132 unsigned int q) 133{ 134 char *p; 135 136 switch (level) { 137 default: 138 strcpy(buf, "ERR-SQ/"); 139 break; 140 case CVMX_PKO_PORT_QUEUES: 141 strcpy(buf, "PQ_L1/"); 142 break; 143 case CVMX_PKO_L2_QUEUES: 144 strcpy(buf, "SQ_L2/"); 145 break; 146 case CVMX_PKO_L3_QUEUES: 147 strcpy(buf, "SQ_L3/"); 148 break; 149 case CVMX_PKO_L4_QUEUES: 150 strcpy(buf, "SQ_L4/"); 151 break; 152 case CVMX_PKO_L5_QUEUES: 153 strcpy(buf, "SQ_L5/"); 154 break; 155 case CVMX_PKO_DESCR_QUEUES: 156 strcpy(buf, "DQ/"); 157 break; 158 } 159 160 for (p = buf; *p; p++) 161 ; 162 *p++ = '0' + q / 1000; 163 q -= (q / 1000) * 1000; 164 *p++ = '0' + q / 100; 165 q -= (q / 100) * 100; 166 *p++ = '0' + q / 10; 167 q -= (q / 10) * 10; 168 *p++ = '0' + q; 169 *p++ = ':'; 170 *p++ = '\0'; 171 return buf; 172} 173 174union cvmx_pko_query_rtn { 175 u64 u64; 176 struct { 177 u64 dqstatus : 4; 178 u64 rsvd_50_59 : 10; 179 u64 dqop : 2; 180 u64 depth : 48; 181 } s; 182}; 183 184typedef union cvmx_pko_query_rtn cvmx_pko_query_rtn_t; 185 186/* PKO_QUERY_RTN_S[DQSTATUS] - cvmx_pko_query_rtn_t->s.dqstatus */ 187enum pko_query_dqstatus { 188 PKO_DQSTATUS_PASS = 0, /* No error */ 189 PKO_DQSTATUS_BADSTATE = 0x8, /* queue was not ready to enqueue */ 190 PKO_DQSTATUS_NOFPABUF = 0x9, /* FPA out of buffers */ 191 PKO_DQSTATUS_NOPKOBUF = 0xA, /* PKO out of buffers */ 192 PKO_DQSTATUS_FAILRTNPTR = 0xB, /* can't return buffer ptr to FPA */ 193 PKO_DQSTATUS_ALREADY = 0xC, /* already created */ 194 PKO_DQSTATUS_NOTCREATED = 0xD, /* not created */ 195 PKO_DQSTATUS_NOTEMPTY = 0xE, /* queue not empty */ 196 PKO_DQSTATUS_SENDPKTDROP = 0xF /* packet dropped, illegal construct */ 197}; 198 199typedef enum pko_query_dqstatus pko_query_dqstatus_t; 200 201/* Sub-command three bit codes (SUBDC3) */ 202#define CVMX_PKO_SENDSUBDC_LINK 0x0 203#define CVMX_PKO_SENDSUBDC_GATHER 0x1 204#define CVMX_PKO_SENDSUBDC_JUMP 0x2 205/* Sub-command four bit codes (SUBDC4) */ 206#define CVMX_PKO_SENDSUBDC_TSO 0x8 207#define CVMX_PKO_SENDSUBDC_FREE 0x9 208#define CVMX_PKO_SENDSUBDC_WORK 0xA 209#define CVMX_PKO_SENDSUBDC_AURA 0xB 210#define CVMX_PKO_SENDSUBDC_MEM 0xC 211#define CVMX_PKO_SENDSUBDC_EXT 0xD 212#define CVMX_PKO_SENDSUBDC_CRC 0xE 213#define CVMX_PKO_SENDSUBDC_IMM 0xF 214 215/** 216 * pko buf ptr 217 * This is good for LINK_S, GATHER_S and PKI_BUFLINK_S structure use. 218 * It can also be used for JUMP_S with F-bit represented by "i" field, 219 * and the size limited to 8-bit. 220 */ 221 222union cvmx_pko_buf_ptr { 223 u64 u64; 224 struct { 225 u64 size : 16; 226 u64 subdc3 : 3; 227 u64 i : 1; 228 u64 rsvd_42_43 : 2; 229 u64 addr : 42; 230 } s; 231}; 232 233typedef union cvmx_pko_buf_ptr cvmx_pko_buf_ptr_t; 234 235/** 236 * pko_auraalg_e 237 */ 238enum pko_auraalg_e { 239 AURAALG_NOP = 0x0, /* aura_cnt = No change */ 240 AURAALG_SUB = 0x3, /* aura_cnt -= pko_send_aura_t.offset */ 241 AURAALG_SUBLEN = 0x7, /* aura_cnt -= pko_send_aura_t.offset + 242 * pko_send_hdr_t.total_bytes 243 */ 244 AURAALG_SUBMBUF = 0xB /* aura_cnt -= pko_send_aura_t.offset + 245 * mbufs_freed 246 */ 247}; 248 249/** 250 * PKO_CKL4ALG_E 251 */ 252enum pko_clk4alg_e { 253 CKL4ALG_NONE = 0x0, /* No checksum. */ 254 CKL4ALG_UDP = 0x1, /* UDP L4 checksum. */ 255 CKL4ALG_TCP = 0x2, /* TCP L4 checksum. */ 256 CKL4ALG_SCTP = 0x3, /* SCTP L4 checksum. */ 257}; 258 259/** 260 * pko_send_aura 261 */ 262union cvmx_pko_send_aura { 263 u64 u64; 264 struct { 265 u64 rsvd_60_63 : 4; 266 u64 aura : 12; /* NODE+LAURA */ 267 u64 subdc4 : 4; 268 u64 alg : 4; /* pko_auraalg_e */ 269 u64 rsvd_08_39 : 32; 270 u64 offset : 8; 271 } s; 272}; 273 274typedef union cvmx_pko_send_aura cvmx_pko_send_aura_t; 275 276/** 277 * pko_send_tso 278 */ 279union cvmx_pko_send_tso { 280 u64 u64; 281 struct { 282 u64 l2len : 8; 283 u64 rsvd_48_55 : 8; 284 u64 subdc4 : 4; /* 0x8 */ 285 u64 rsvd_32_43 : 12; 286 u64 sb : 8; 287 u64 mss : 16; 288 u64 eom : 1; 289 u64 fn : 7; 290 } s; 291}; 292 293typedef union cvmx_pko_send_tso cvmx_pko_send_tso_t; 294 295/** 296 * pko_send_free 297 */ 298union cvmx_pko_send_free { 299 u64 u64; 300 struct { 301 u64 rsvd_48_63 : 16; 302 u64 subdc4 : 4; /* 0x9 */ 303 u64 rsvd : 2; 304 u64 addr : 42; 305 } s; 306}; 307 308typedef union cvmx_pko_send_free cvmx_pko_send_free_t; 309 310/* PKO_SEND_HDR_S - PKO header subcommand */ 311union cvmx_pko_send_hdr { 312 u64 u64; 313 struct { 314 u64 rsvd_60_63 : 4; 315 u64 aura : 12; 316 u64 ckl4 : 2; /* PKO_CKL4ALG_E */ 317 u64 ckl3 : 1; 318 u64 ds : 1; 319 u64 le : 1; 320 u64 n2 : 1; 321 u64 ii : 1; 322 u64 df : 1; 323 u64 rsvd_39 : 1; 324 u64 format : 7; 325 u64 l4ptr : 8; 326 u64 l3ptr : 8; 327 u64 total : 16; 328 } s; 329}; 330 331typedef union cvmx_pko_send_hdr cvmx_pko_send_hdr_t; 332 333/* PKO_SEND_EXT_S - extended header subcommand */ 334union cvmx_pko_send_ext { 335 u64 u64; 336 struct { 337 u64 rsvd_48_63 : 16; 338 u64 subdc4 : 4; /* _SENDSUBDC_EXT */ 339 u64 col : 2; /* _COLORALG_E */ 340 u64 ra : 2; /* _REDALG_E */ 341 u64 tstmp : 1; 342 u64 rsvd_24_38 : 15; 343 u64 markptr : 8; 344 u64 rsvd_9_15 : 7; 345 u64 shapechg : 9; 346 } s; 347}; 348 349typedef union cvmx_pko_send_ext cvmx_pko_send_ext_t; 350 351/* PKO_MEMDSZ_E */ 352enum cvmx_pko_memdsz_e { 353 MEMDSZ_B64 = 0, 354 MEMDSZ_B32 = 1, 355 MEMDSZ_B16 = 2, /* Not in HRM, assumed unsupported */ 356 MEMDSZ_B8 = 3 357}; 358 359/* PKO_MEMALG_E */ 360enum cvmx_pko_memalg_e { 361 MEMALG_SET = 0, /* Set mem = PKO_SEND_MEM_S[OFFSET] */ 362 MEMALG_SETTSTMP = 1, /* Set the memory location to the timestamp 363 * PKO_SEND_MEM_S[DSZ] must be B64 and a 364 * PKO_SEND_EXT_S subdescriptor must be in 365 * the descriptor with PKO_SEND_EXT_S[TSTMP]=1 366 */ 367 MEMALG_SETRSLT = 2, /* [DSZ] = B64; mem = PKO_MEM_RESULT_S. */ 368 MEMALG_ADD = 8, /* mem = mem + PKO_SEND_MEM_S[OFFSET] */ 369 MEMALG_SUB = 9, /* mem = mem - PKO_SEND_MEM_S[OFFSET] */ 370 MEMALG_ADDLEN = 0xA, /* mem += [OFFSET] + PKO_SEND_HDR_S[TOTAL] */ 371 MEMALG_SUBLEN = 0xB, /* mem -= [OFFSET] + PKO_SEND_HDR_S[TOTAL] */ 372 MEMALG_ADDMBUF = 0xC, /* mem += [OFFSET] + mbufs_freed */ 373 MEMALG_SUBMBUF = 0xD /* mem -= [OFFSET] + mbufs_freed */ 374}; 375 376union cvmx_pko_send_mem { 377 u64 u64; 378 struct { 379 u64 rsvd_63 : 1; 380 u64 wmem : 1; 381 u64 dsz : 2; 382 u64 alg : 4; 383 u64 offset : 8; 384 u64 subdc4 : 4; 385 u64 rsvd_42_43 : 2; 386 u64 addr : 42; 387 } s; 388}; 389 390typedef union cvmx_pko_send_mem cvmx_pko_send_mem_t; 391 392union cvmx_pko_send_work { 393 u64 u64; 394 struct { 395 u64 rsvd_62_63 : 2; 396 u64 grp : 10; 397 u64 tt : 2; 398 u64 rsvd_48_49 : 2; 399 u64 subdc4 : 4; 400 u64 rsvd_42_43 : 2; 401 u64 addr : 42; 402 } s; 403}; 404 405typedef union cvmx_pko_send_work cvmx_pko_send_work_t; 406 407/*** PKO_SEND_DMA_S - format of IOBDMA/LMTDMA data word ***/ 408union cvmx_pko_lmtdma_data { 409 u64 u64; 410 struct { 411 u64 scraddr : 8; 412 u64 rtnlen : 8; 413 u64 did : 8; /* 0x51 */ 414 u64 node : 4; 415 u64 rsvd_34_35 : 2; 416 u64 dqop : 2; /* PKO_DQOP_E */ 417 u64 rsvd_26_31 : 6; 418 u64 dq : 10; 419 u64 rsvd_0_15 : 16; 420 } s; 421}; 422 423typedef union cvmx_pko_lmtdma_data cvmx_pko_lmtdma_data_t; 424 425typedef struct cvmx_pko3_dq_params_s { 426 s32 depth; 427 s32 limit; 428 u64 pad[15]; 429} cvmx_pko3_dq_params_t; 430 431/* DQ depth cached value */ 432extern cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES]; 433 434int cvmx_pko3_internal_buffer_count(unsigned int node); 435 436/** 437 * @INTERNAL 438 * PKO3 DQ parameter location 439 * @param node node 440 * @param dq dq 441 */ 442static inline cvmx_pko3_dq_params_t *cvmx_pko3_dq_parameters(unsigned int node, 443 unsigned int dq) 444{ 445 cvmx_pko3_dq_params_t *pparam = NULL; 446 static cvmx_pko3_dq_params_t dummy; 447 448 dummy.depth = 0; 449 dummy.limit = (1 << 16); 450 451 if (cvmx_likely(node < CVMX_MAX_NODES)) 452 pparam = __cvmx_pko3_dq_params[node]; 453 454 if (cvmx_likely(pparam)) 455 pparam += dq; 456 else 457 pparam = &dummy; 458 459 return pparam; 460} 461 462static inline void cvmx_pko3_dq_set_limit(unsigned int node, unsigned int dq, 463 unsigned int limit) 464{ 465 cvmx_pko3_dq_params_t *pparam; 466 467 pparam = cvmx_pko3_dq_parameters(node, dq); 468 pparam->limit = limit; 469} 470 471/** 472 * PKO descriptor queue operation error string 473 * 474 * @param dqstatus is the enumeration returned from hardware, 475 * PKO_QUERY_RTN_S[DQSTATUS]. 476 * 477 * @return static constant string error description 478 */ 479const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus); 480 481/* 482 * This function gets PKO mac num for a interface/port. 483 * 484 * @param interface is the interface number. 485 * @param index is the port number. 486 * @return returns mac number if successful or -1 on failure. 487 */ 488static inline int __cvmx_pko3_get_mac_num(int xiface, int index) 489{ 490 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface); 491 cvmx_helper_interface_mode_t mode; 492 int interface_index; 493 int ilk_mac_base = -1, bgx_mac_base = -1, bgx_ports = 4; 494 495 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) 496 bgx_mac_base = 2; 497 498 if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) 499 bgx_mac_base = 2; 500 501 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) { 502 ilk_mac_base = 2; 503 bgx_mac_base = 4; 504 } 505 506 mode = cvmx_helper_interface_get_mode(xiface); 507 switch (mode) { 508 case CVMX_HELPER_INTERFACE_MODE_LOOP: 509 return 0; 510 case CVMX_HELPER_INTERFACE_MODE_NPI: 511 return 1; 512 case CVMX_HELPER_INTERFACE_MODE_ILK: 513 if (ilk_mac_base < 0) 514 return -1; 515 interface_index = (xi.interface - CVMX_ILK_GBL_BASE()); 516 if (interface_index < 0) 517 return -1; 518 return (ilk_mac_base + interface_index); 519 case CVMX_HELPER_INTERFACE_MODE_SRIO: 520 return (4 + 2 * xi.interface + index); 521 default: 522 if (xi.interface >= CVMX_ILK_GBL_BASE() && ilk_mac_base >= 0) 523 return -1; 524 /* All other modes belong to BGX */ 525 return (bgx_mac_base + bgx_ports * xi.interface + index); 526 } 527} 528 529/** 530 * @INTERNAL 531 * 532 * Get scratch offset for LMTDMA/LMTST data buffer 533 * 534 */ 535static inline unsigned int cvmx_pko3_lmtdma_scr_base(void) 536{ 537 return CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE; 538} 539 540/** 541 * @INTERNAL 542 * 543 * Get address for LMTDMA/LMTST data buffer 544 * 545 */ 546static inline u64 *cvmx_pko3_cvmseg_addr(void) 547{ 548 const unsigned int scr = cvmx_pko3_lmtdma_scr_base(); 549 550 return (u64 *)(CVMX_SCRATCH_BASE + scr); 551} 552 553/** 554 * Save scratchpad area 555 * @param buf storage buffer for saving previous scratchpad contents. 556 * 557 * This function should be used whenever the cache line is used 558 * from a context that might preempt another context that too uses 559 * the same cache line designated for LMTST/LMTDMA and Wide-Atomic 560 * operations, such as the hard interrupt context in Linux kernel, 561 * that could preempt a user-space application on the same processor 562 * core also using the same scratchpad. 563 * 'cvmx_lmtline_save()' should be called upon entry into the 564 * potentially interrupting context, and 'cvmx_lmtline_restore()' should 565 * be called prior to exitting that context. 566 */ 567static inline void cvmx_lmtline_save(u64 buf[16]) 568{ 569 unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base(); 570 unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64); 571 572 /* wait LMTDMA to finish (if any) */ 573 CVMX_SYNCIOBDMA; 574 575 /* Copy LMTLINE to user-provided buffer */ 576 for (i = 0; i < sz; i++) 577 buf[i] = cvmx_scratch_read64(scr_off + i * sizeof(u64)); 578} 579 580/** 581 * Restore scratchpad area 582 * @param buf storage buffer containing the previous content of scratchpad. 583 */ 584static inline void cvmx_lmtline_restore(const u64 buf[16]) 585{ 586 unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base(); 587 unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64); 588 589 /* wait LMTDMA to finsh (if any) */ 590 CVMX_SYNCIOBDMA; 591 592 /* restore scratchpad area from buf[] */ 593 for (i = 0; i < sz; i++) 594 cvmx_scratch_write64(scr_off + i * sizeof(u64), buf[i]); 595} 596 597/* 598 * @INTERNAL 599 * Deliver PKO SEND commands via CVMSEG LM and LMTDMA/LMTST. 600 * The command should be already stored in the CVMSEG address. 601 * 602 * @param node is the destination node 603 * @param dq is the destination descriptor queue. 604 * @param numwords is the number of outgoing words 605 * @param tag_wait Wait to finish tag switch just before issueing LMTDMA 606 * @return the PKO3 native query result structure. 607 * 608 * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command 609 * 610 * NOTE: Internal use only. 611 */ 612static inline cvmx_pko_query_rtn_t 613__cvmx_pko3_lmtdma(u8 node, uint16_t dq, unsigned int numwords, bool tag_wait) 614{ 615 const enum cvmx_pko_dqop dqop = CVMX_PKO_DQ_SEND; 616 cvmx_pko_query_rtn_t pko_status; 617 cvmx_pko_lmtdma_data_t pko_send_dma_data; 618 u64 dma_addr; 619 unsigned int scr_base = cvmx_pko3_lmtdma_scr_base(); 620 unsigned int scr_off; 621 cvmx_pko3_dq_params_t *pparam; 622 623 if (cvmx_unlikely(numwords < 1 || numwords > 15)) { 624 debug("%s: ERROR: Internal error\n", __func__); 625 pko_status.u64 = ~0ull; 626 return pko_status; 627 } 628 629 pparam = cvmx_pko3_dq_parameters(node, dq); 630 631 pko_status.u64 = 0; 632 pko_send_dma_data.u64 = 0; 633 634 /* LMTDMA address offset is (nWords-1) */ 635 dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR; 636 dma_addr += (numwords - 1) << 3; 637 638 scr_off = scr_base + numwords * sizeof(u64); 639 640 /* Write all-ones into the return area */ 641 cvmx_scratch_write64(scr_off, ~0ull); 642 643 /* Barrier: make sure all prior writes complete before the following */ 644 CVMX_SYNCWS; 645 646 /* If cached depth exceeds limit, check the real depth */ 647 if (cvmx_unlikely(pparam->depth > pparam->limit)) { 648 cvmx_pko_dqx_wm_cnt_t wm_cnt; 649 650 wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq)); 651 pko_status.s.depth = wm_cnt.s.count; 652 pparam->depth = pko_status.s.depth; 653 654 if (pparam->depth > pparam->limit) { 655 pko_status.s.dqop = dqop; 656 pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF; 657 return pko_status; 658 } 659 } else { 660 cvmx_atomic_add32_nosync(&pparam->depth, 1); 661 } 662 663 if (CVMX_ENABLE_PARAMETER_CHECKING) { 664 /* Request one return word */ 665 pko_send_dma_data.s.rtnlen = 1; 666 } else { 667 /* Do not expect a return word */ 668 pko_send_dma_data.s.rtnlen = 0; 669 } 670 671 /* build store data for DMA */ 672 pko_send_dma_data.s.scraddr = scr_off >> 3; 673 pko_send_dma_data.s.did = 0x51; 674 pko_send_dma_data.s.node = node; 675 pko_send_dma_data.s.dqop = dqop; 676 pko_send_dma_data.s.dq = dq; 677 678 /* Wait to finish tag switch just before issueing LMTDMA */ 679 if (tag_wait) 680 cvmx_pow_tag_sw_wait(); 681 682 /* issue PKO DMA */ 683 cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64); 684 685 if (cvmx_unlikely(pko_send_dma_data.s.rtnlen)) { 686 /* Wait for LMTDMA completion */ 687 CVMX_SYNCIOBDMA; 688 689 /* Retrieve real result */ 690 pko_status.u64 = cvmx_scratch_read64(scr_off); 691 pparam->depth = pko_status.s.depth; 692 } else { 693 /* Fake positive result */ 694 pko_status.s.dqop = dqop; 695 pko_status.s.dqstatus = PKO_DQSTATUS_PASS; 696 } 697 698 return pko_status; 699} 700 701/* 702 * @INTERNAL 703 * Sends PKO descriptor commands via CVMSEG LM and LMTDMA. 704 * @param node is the destination node 705 * @param dq is the destination descriptor queue. 706 * @param cmds[] is an array of 64-bit PKO3 headers/subheaders 707 * @param numwords is the number of outgoing words 708 * @param dqop is the operation code 709 * @return the PKO3 native query result structure. 710 * 711 * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command 712 * otherwise it must be 0. 713 * 714 * NOTE: Internal use only. 715 */ 716static inline cvmx_pko_query_rtn_t __cvmx_pko3_do_dma(u8 node, uint16_t dq, 717 u64 cmds[], 718 unsigned int numwords, 719 enum cvmx_pko_dqop dqop) 720{ 721 const unsigned int scr_base = cvmx_pko3_lmtdma_scr_base(); 722 cvmx_pko_query_rtn_t pko_status; 723 cvmx_pko_lmtdma_data_t pko_send_dma_data; 724 u64 dma_addr; 725 unsigned int i, scr_off; 726 cvmx_pko3_dq_params_t *pparam; 727 728 pparam = cvmx_pko3_dq_parameters(node, dq); 729 CVMX_PREFETCH0(pparam); 730 /* Push WB */ 731 CVMX_SYNCWS; 732 733 pko_status.u64 = 0; 734 pko_send_dma_data.u64 = 0; 735 736 if (cvmx_unlikely(numwords > 15)) { 737 debug("%s: ERROR: Internal error\n", __func__); 738 pko_status.u64 = ~0ull; 739 return pko_status; 740 } 741 742 /* Store the command words into CVMSEG LM */ 743 for (i = 0, scr_off = scr_base; i < numwords; i++) { 744 cvmx_scratch_write64(scr_off, cmds[i]); 745 scr_off += sizeof(cmds[0]); 746 } 747 748 /* With 0 data to send, this is an IOBDMA, else LMTDMA operation */ 749 if (numwords == 0) { 750 dma_addr = CVMX_IOBDMA_ORDERED_IO_ADDR; 751 } else { 752 /* LMTDMA address offset is (nWords-1) */ 753 dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR; 754 dma_addr += (numwords - 1) << 3; 755 } 756 757 if (cvmx_likely(dqop == CVMX_PKO_DQ_SEND)) { 758 if (cvmx_unlikely(pparam->depth > pparam->limit)) { 759 cvmx_pko_dqx_wm_cnt_t wm_cnt; 760 761 wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq)); 762 pko_status.s.depth = wm_cnt.s.count; 763 pparam->depth = pko_status.s.depth; 764 } 765 766 if (cvmx_unlikely(pparam->depth > pparam->limit)) { 767 pko_status.s.dqop = dqop; 768 pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF; 769 return pko_status; 770 } 771 772 cvmx_atomic_add32_nosync(&pparam->depth, 1); 773 } 774 775 if (cvmx_unlikely(dqop != CVMX_PKO_DQ_SEND) || 776 CVMX_ENABLE_PARAMETER_CHECKING) { 777 /* Request one return word */ 778 pko_send_dma_data.s.rtnlen = 1; 779 /* Write all-ones into the return area */ 780 cvmx_scratch_write64(scr_off, ~0ull); 781 } else { 782 /* Do not expext a return word */ 783 pko_send_dma_data.s.rtnlen = 0; 784 } 785 786 /* build store data for DMA */ 787 pko_send_dma_data.s.scraddr = scr_off >> 3; 788 pko_send_dma_data.s.did = 0x51; 789 pko_send_dma_data.s.node = node; 790 pko_send_dma_data.s.dqop = dqop; 791 pko_send_dma_data.s.dq = dq; 792 793 /* Barrier: make sure all prior writes complete before the following */ 794 CVMX_SYNCWS; 795 796 /* Wait to finish tag switch just before issueing LMTDMA */ 797 cvmx_pow_tag_sw_wait(); 798 799 /* issue PKO DMA */ 800 cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64); 801 802 if (pko_send_dma_data.s.rtnlen) { 803 /* Wait LMTDMA for completion */ 804 CVMX_SYNCIOBDMA; 805 806 /* Retrieve real result */ 807 pko_status.u64 = cvmx_scratch_read64(scr_off); 808 pparam->depth = pko_status.s.depth; 809 } else { 810 /* Fake positive result */ 811 pko_status.s.dqop = dqop; 812 pko_status.s.dqstatus = PKO_DQSTATUS_PASS; 813 } 814 815 return pko_status; 816} 817 818/* 819 * Transmit packets through PKO, simplified API 820 * 821 * @INTERNAL 822 * 823 * @param dq is a global destination queue number 824 * @param pki_ptr specifies packet first linked pointer as returned from 825 * 'cvmx_wqe_get_pki_pkt_ptr()'. 826 * @param len is the total number of bytes in the packet. 827 * @param gaura is the aura to free packet buffers after trasnmit. 828 * @param pCounter is an address of a 64-bit counter to atomically 829 * @param ptag is a Flow Tag pointer for packet odering or NULL 830 * decrement when packet transmission is complete. 831 * 832 * @return returns 0 if successful and -1 on failure. 833 * 834 * 835 * NOTE: This is a provisional API, and is subject to change. 836 */ 837static inline int cvmx_pko3_xmit_link_buf(int dq, cvmx_buf_ptr_pki_t pki_ptr, 838 unsigned int len, int gaura, 839 u64 *pcounter, u32 *ptag) 840{ 841 cvmx_pko_query_rtn_t pko_status; 842 cvmx_pko_send_hdr_t hdr_s; 843 cvmx_pko_buf_ptr_t gtr_s; 844 unsigned int node, nwords; 845 unsigned int scr_base = cvmx_pko3_lmtdma_scr_base(); 846 847 /* Separate global DQ# into node and local DQ */ 848 node = dq >> 10; 849 dq &= (1 << 10) - 1; 850 851 /* Fill in header */ 852 hdr_s.u64 = 0; 853 hdr_s.s.total = len; 854 hdr_s.s.df = (gaura < 0); 855 hdr_s.s.ii = 1; 856 hdr_s.s.aura = (gaura >= 0) ? gaura : 0; 857 858 /* Fill in gather */ 859 gtr_s.u64 = 0; 860 gtr_s.s.subdc3 = CVMX_PKO_SENDSUBDC_LINK; 861 gtr_s.s.addr = pki_ptr.addr; 862 gtr_s.s.size = pki_ptr.size; 863 864 /* Setup command word pointers */ 865 cvmx_scratch_write64(scr_base + sizeof(u64) * 0, hdr_s.u64); 866 cvmx_scratch_write64(scr_base + sizeof(u64) * 1, gtr_s.u64); 867 nwords = 2; 868 869 /* Conditionally setup an atomic decrement counter */ 870 if (pcounter) { 871 cvmx_pko_send_mem_t mem_s; 872 873 mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM; 874 mem_s.s.dsz = MEMDSZ_B64; 875 mem_s.s.alg = MEMALG_SUB; 876 mem_s.s.offset = 1; 877 mem_s.s.wmem = 0; 878 mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, pcounter)); 879 cvmx_scratch_write64(scr_base + sizeof(u64) * nwords++, 880 mem_s.u64); 881 } 882 883 /* To preserve packet order, go atomic with DQ-specific tag */ 884 if (ptag) 885 cvmx_pow_tag_sw(*ptag ^ dq, CVMX_POW_TAG_TYPE_ATOMIC); 886 887 /* Do LMTDMA */ 888 pko_status = __cvmx_pko3_lmtdma(node, dq, nwords, ptag); 889 890 if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS)) 891 return 0; 892 else 893 return -1; 894} 895 896/** 897 * @INTERNAL 898 * 899 * Retrieve PKO internal AURA from register. 900 */ 901static inline unsigned int __cvmx_pko3_aura_get(unsigned int node) 902{ 903 static s16 aura = -1; 904 cvmx_pko_dpfi_fpa_aura_t pko_aura; 905 906 if (aura >= 0) 907 return aura; 908 909 pko_aura.u64 = csr_rd_node(node, CVMX_PKO_DPFI_FPA_AURA); 910 911 aura = (pko_aura.s.node << 10) | pko_aura.s.laura; 912 return aura; 913} 914 915/** Open configured descriptor queues before queueing packets into them. 916 * 917 * @param node is to specify the node to which this configuration is applied. 918 * @param dq is the descriptor queue number to be opened. 919 * @return returns 0 on success or -1 on failure. 920 */ 921int cvmx_pko_dq_open(int node, int dq); 922 923/** Close a descriptor queue 924 * 925 * @param node is to specify the node to which this configuration is applied. 926 * @param dq is the descriptor queue number to be opened. 927 * @return returns 0 on success or -1 on failure. 928 * 929 * This should be called before changing the DQ parent link, topology, 930 * or when shutting down the PKO. 931 */ 932int cvmx_pko3_dq_close(int node, int dq); 933 934/** Query a descriptor queue 935 * 936 * @param node is to specify the node to which this configuration is applied. 937 * @param dq is the descriptor queue number to be opened. 938 * @return returns the descriptor queue depth on success or -1 on failure. 939 * 940 * This should be called before changing the DQ parent link, topology, 941 * or when shutting down the PKO. 942 */ 943int cvmx_pko3_dq_query(int node, int dq); 944 945/** Drain a descriptor queue 946 * 947 * Before closing a DQ, this call will drain all pending traffic 948 * on the DQ to the NULL MAC, which will circumvent any traffic 949 * shaping and flow control to quickly reclaim all packet buffers. 950 */ 951void cvmx_pko3_dq_drain(int node, int dq); 952 953/* 954 * PKO global initialization for 78XX. 955 * 956 * @param node is the node on which PKO block is initialized. 957 * @param aura is the 12-bit AURA (including node) for PKO internal use. 958 * @return none. 959 */ 960int cvmx_pko3_hw_init_global(int node, uint16_t aura); 961 962/** 963 * Shutdown the entire PKO 964 */ 965int cvmx_pko3_hw_disable(int node); 966 967/* Define legacy type here to break circular dependency */ 968typedef struct cvmx_pko_port_status cvmx_pko_port_status_t; 969 970/** 971 * @INTERNAL 972 * Backward compatibility for collecting statistics from PKO3 973 * 974 */ 975void cvmx_pko3_get_legacy_port_stats(u16 ipd_port, unsigned int clear, 976 cvmx_pko_port_status_t *status); 977 978/** Set MAC options 979 * 980 * The options supported are the parameters below: 981 * 982 * @param xiface The physical interface number 983 * @param index The physical sub-interface port 984 * @param fcs_enable Enable FCS generation 985 * @param pad_enable Enable padding to minimum packet size 986 * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS 987 * 988 * The typical use for `fcs_sop_off` is when the interface is configured 989 * to use a header such as HighGig to precede every Ethernet packet, 990 * such a header usually does not partake in the CRC32 computation stream, 991 * and its size must be set with this parameter. 992 * 993 * @return Returns 0 on success, -1 if interface/port is invalid. 994 */ 995int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable, 996 bool pad_enable, unsigned int fcs_sop_off); 997 998/** Set Descriptor Queue options 999 * 1000 * The `min_pad` parameter must be in agreement with the interface-level 1001 * padding option for all descriptor queues assigned to that particular 1002 * interface/port. 1003 */ 1004void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad); 1005 1006int cvmx_pko3_port_fifo_size(unsigned int xiface, unsigned int index); 1007int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level); 1008int cvmx_pko3_port_xoff(unsigned int xiface, unsigned int index); 1009int cvmx_pko3_port_xon(unsigned int xiface, unsigned int index); 1010 1011/* Packet descriptor - PKO3 command buffer + internal state */ 1012typedef struct cvmx_pko3_pdesc_s { 1013 u64 *jump_buf; /**< jump buffer vaddr */ 1014 s16 last_aura; /**< AURA of the latest LINK_S/GATHER_S */ 1015 unsigned num_words : 5, /**< valid words in word array 2..16 */ 1016 headroom : 10, /**< free bytes at start of 1st buf */ 1017 hdr_offsets : 1, pki_word4_present : 1; 1018 /* PKO3 command buffer: */ 1019 cvmx_pko_send_hdr_t *hdr_s; 1020 u64 word[16]; /**< header and subcommands buffer */ 1021 /* Bookkeeping fields: */ 1022 u64 send_work_s; /**< SEND_WORK_S must be the very last subdc */ 1023 s16 jb_aura; /**< AURA where the jump buffer belongs */ 1024 u16 mem_s_ix; /**< index of first MEM_S subcommand */ 1025 u8 ckl4_alg; /**< L3/L4 alg to use if recalc is needed */ 1026 /* Fields saved from WQE for later inspection */ 1027 cvmx_pki_wqe_word4_t pki_word4; 1028 cvmx_pki_wqe_word2_t pki_word2; 1029} cvmx_pko3_pdesc_t; 1030 1031void cvmx_pko3_pdesc_init(cvmx_pko3_pdesc_t *pdesc); 1032int cvmx_pko3_pdesc_from_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe, 1033 bool free_bufs); 1034int cvmx_pko3_pdesc_transmit(cvmx_pko3_pdesc_t *pdesc, uint16_t dq, 1035 u32 *flow_tag); 1036int cvmx_pko3_pdesc_notify_decrement(cvmx_pko3_pdesc_t *pdesc, 1037 volatile u64 *p_counter); 1038int cvmx_pko3_pdesc_notify_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe, 1039 u8 node, uint8_t group, uint8_t tt, u32 tag); 1040int cvmx_pko3_pdesc_buf_append(cvmx_pko3_pdesc_t *pdesc, void *p_data, 1041 unsigned int data_bytes, unsigned int gaura); 1042int cvmx_pko3_pdesc_append_free(cvmx_pko3_pdesc_t *pdesc, u64 addr, 1043 unsigned int gaura); 1044int cvmx_pko3_pdesc_hdr_push(cvmx_pko3_pdesc_t *pdesc, const void *p_data, 1045 u8 data_bytes, uint8_t layer); 1046int cvmx_pko3_pdesc_hdr_pop(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf, 1047 unsigned int num_bytes); 1048int cvmx_pko3_pdesc_hdr_peek(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf, 1049 unsigned int num_bytes, unsigned int offset); 1050void cvmx_pko3_pdesc_set_free(cvmx_pko3_pdesc_t *pdesc, bool free_bufs); 1051 1052#endif /* __CVMX_PKO3_H__ */ 1053