1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2018 Marvell International Ltd. 4 */ 5 6#include <config.h> 7#include <net.h> 8#include <netdev.h> 9#include <malloc.h> 10#include <miiphy.h> 11#include <dm.h> 12#include <misc.h> 13#include <pci.h> 14#include <pci_ids.h> 15#include <asm/io.h> 16#include <linux/delay.h> 17 18#include "nic_reg.h" 19#include "nic.h" 20#include "q_struct.h" 21 22unsigned long rounddown_pow_of_two(unsigned long n) 23{ 24 n |= n >> 1; 25 n |= n >> 2; 26 n |= n >> 4; 27 n |= n >> 8; 28 n |= n >> 16; 29 n |= n >> 32; 30 31 return(n + 1); 32} 33 34static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg); 35static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, 36 struct sq_cfg_msg *sq); 37static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf); 38static int nic_rcv_queue_sw_sync(struct nicpf *nic); 39 40/* Register read/write APIs */ 41static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) 42{ 43 writeq(val, nic->reg_base + offset); 44} 45 46static u64 nic_reg_read(struct nicpf *nic, u64 offset) 47{ 48 return readq(nic->reg_base + offset); 49} 50 51static u64 nic_get_mbx_addr(int vf) 52{ 53 return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); 54} 55 56static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 57{ 58 void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf)); 59 u64 *msg = (u64 *)mbx; 60 61 /* In first revision HW, mbox interrupt is triggerred 62 * when PF writes to MBOX(1), in next revisions when 63 * PF writes to MBOX(0) 64 */ 65 if (pass1_silicon(nic->rev_id, nic->hw->model_id)) { 66 /* see the comment for nic_reg_write()/nic_reg_read() 67 * functions above 68 */ 69 writeq(msg[0], mbx_addr); 70 writeq(msg[1], mbx_addr + 8); 71 } else { 72 writeq(msg[1], mbx_addr + 8); 73 writeq(msg[0], mbx_addr); 74 } 75} 76 77static void nic_mbx_send_ready(struct nicpf *nic, int vf) 78{ 79 union nic_mbx mbx = {}; 80 int bgx_idx, lmac, timeout = 5, link = -1; 81 const u8 *mac; 82 83 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 84 mbx.nic_cfg.vf_id = vf; 85 86 if (nic->flags & NIC_TNS_ENABLED) 87 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 88 else 89 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 90 91 if (vf < nic->num_vf_en) { 92 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 93 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 94 95 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 96 if (mac) 97 memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6); 98 99 while (timeout-- && (link <= 0)) { 100 link = bgx_poll_for_link(nic->node, bgx_idx, lmac); 101 debug("Link status: %d\n", link); 102 if (link <= 0) 103 mdelay(2000); 104 } 105 } 106#ifdef VNIC_MULTI_QSET_SUPPORT 107 mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; 108#endif 109 mbx.nic_cfg.node_id = nic->node; 110 111 mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; 112 113 nic_send_msg_to_vf(nic, vf, &mbx); 114} 115 116/* ACKs VF's mailbox message 117 * @vf: VF to which ACK to be sent 118 */ 119static void nic_mbx_send_ack(struct nicpf *nic, int vf) 120{ 121 union nic_mbx mbx = {}; 122 123 mbx.msg.msg = NIC_MBOX_MSG_ACK; 124 nic_send_msg_to_vf(nic, vf, &mbx); 125} 126 127/* NACKs VF's mailbox message that PF is not able to 128 * complete the action 129 * @vf: VF to which ACK to be sent 130 */ 131static void nic_mbx_send_nack(struct nicpf *nic, int vf) 132{ 133 union nic_mbx mbx = {}; 134 135 mbx.msg.msg = NIC_MBOX_MSG_NACK; 136 nic_send_msg_to_vf(nic, vf, &mbx); 137} 138 139static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 140{ 141 int bgx_idx, lmac_idx; 142 143 if (lbk->vf_id > nic->num_vf_en) 144 return -1; 145 146 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 147 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 148 149 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 150 151 return 0; 152} 153 154/* Interrupt handler to handle mailbox messages from VFs */ 155void nic_handle_mbx_intr(struct nicpf *nic, int vf) 156{ 157 union nic_mbx mbx = {}; 158 u64 *mbx_data; 159 u64 mbx_addr; 160 u64 reg_addr; 161 u64 cfg; 162 int bgx, lmac; 163 int i; 164 int ret = 0; 165 166 nic->mbx_lock[vf] = true; 167 168 mbx_addr = nic_get_mbx_addr(vf); 169 mbx_data = (u64 *)&mbx; 170 171 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 172 *mbx_data = nic_reg_read(nic, mbx_addr); 173 mbx_data++; 174 mbx_addr += sizeof(u64); 175 } 176 177 debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf); 178 switch (mbx.msg.msg) { 179 case NIC_MBOX_MSG_READY: 180 nic_mbx_send_ready(nic, vf); 181 if (vf < nic->num_vf_en) { 182 nic->link[vf] = 0; 183 nic->duplex[vf] = 0; 184 nic->speed[vf] = 0; 185 } 186 ret = 1; 187 break; 188 case NIC_MBOX_MSG_QS_CFG: 189 reg_addr = NIC_PF_QSET_0_127_CFG | 190 (mbx.qs.num << NIC_QS_ID_SHIFT); 191 cfg = mbx.qs.cfg; 192#ifdef VNIC_MULTI_QSET_SUPPORT 193 /* Check if its a secondary Qset */ 194 if (vf >= nic->num_vf_en) { 195 cfg = cfg & (~0x7FULL); 196 /* Assign this Qset to primary Qset's VF */ 197 cfg |= nic->pqs_vf[vf]; 198 } 199#endif 200 nic_reg_write(nic, reg_addr, cfg); 201 break; 202 case NIC_MBOX_MSG_RQ_CFG: 203 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 204 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 205 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 206 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 207 /* Enable CQE_RX2_S extension in CQE_RX descriptor. 208 * This gets appended by default on 81xx/83xx chips, 209 * for consistency enabling the same on 88xx pass2 210 * where this is introduced. 211 */ 212 if (pass2_silicon(nic->rev_id, nic->hw->model_id)) 213 nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); 214 break; 215 case NIC_MBOX_MSG_RQ_BP_CFG: 216 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 217 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 218 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 219 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 220 break; 221 case NIC_MBOX_MSG_RQ_SW_SYNC: 222 ret = nic_rcv_queue_sw_sync(nic); 223 break; 224 case NIC_MBOX_MSG_RQ_DROP_CFG: 225 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 226 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 227 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 228 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 229 break; 230 case NIC_MBOX_MSG_SQ_CFG: 231 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 232 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 233 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 234 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 235 nic_tx_channel_cfg(nic, mbx.qs.num, 236 (struct sq_cfg_msg *)&mbx.sq); 237 break; 238 case NIC_MBOX_MSG_SET_MAC: 239#ifdef VNIC_MULTI_QSET_SUPPORT 240 if (vf >= nic->num_vf_en) 241 break; 242#endif 243 lmac = mbx.mac.vf_id; 244 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 245 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 246 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 247 break; 248 case NIC_MBOX_MSG_SET_MAX_FRS: 249 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, 250 mbx.frs.vf_id); 251 break; 252 case NIC_MBOX_MSG_CPI_CFG: 253 nic_config_cpi(nic, &mbx.cpi_cfg); 254 break; 255#ifdef VNIC_RSS_SUPPORT 256 case NIC_MBOX_MSG_RSS_SIZE: 257 nic_send_rss_size(nic, vf); 258 goto unlock; 259 case NIC_MBOX_MSG_RSS_CFG: 260 case NIC_MBOX_MSG_RSS_CFG_CONT: 261 nic_config_rss(nic, &mbx.rss_cfg); 262 break; 263#endif 264 case NIC_MBOX_MSG_CFG_DONE: 265 /* Last message of VF config msg sequence */ 266 nic->vf_enabled[vf] = true; 267 if (vf >= nic->lmac_cnt) 268 goto unlock; 269 270 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 271 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 272 273 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); 274 goto unlock; 275 case NIC_MBOX_MSG_SHUTDOWN: 276 /* First msg in VF teardown sequence */ 277 nic->vf_enabled[vf] = false; 278#ifdef VNIC_MULTI_QSET_SUPPORT 279 if (vf >= nic->num_vf_en) 280 nic->sqs_used[vf - nic->num_vf_en] = false; 281 nic->pqs_vf[vf] = 0; 282#endif 283 if (vf >= nic->lmac_cnt) 284 break; 285 286 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 287 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 288 289 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); 290 break; 291#ifdef VNIC_MULTI_QSET_SUPPORT 292 case NIC_MBOX_MSG_ALLOC_SQS: 293 nic_alloc_sqs(nic, &mbx.sqs_alloc); 294 goto unlock; 295 case NIC_MBOX_MSG_NICVF_PTR: 296 nic->nicvf[vf] = mbx.nicvf.nicvf; 297 break; 298 case NIC_MBOX_MSG_PNICVF_PTR: 299 nic_send_pnicvf(nic, vf); 300 goto unlock; 301 case NIC_MBOX_MSG_SNICVF_PTR: 302 nic_send_snicvf(nic, &mbx.nicvf); 303 goto unlock; 304#endif 305 case NIC_MBOX_MSG_LOOPBACK: 306 ret = nic_config_loopback(nic, &mbx.lbk); 307 break; 308 default: 309 printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 310 break; 311 } 312 313 if (!ret) 314 nic_mbx_send_ack(nic, vf); 315 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 316 nic_mbx_send_nack(nic, vf); 317unlock: 318 nic->mbx_lock[vf] = false; 319} 320 321static int nic_rcv_queue_sw_sync(struct nicpf *nic) 322{ 323 int timeout = 20; 324 325 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 326 while (timeout) { 327 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 328 break; 329 udelay(2000); 330 timeout--; 331 } 332 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 333 if (!timeout) { 334 printf("Recevie queue software sync failed"); 335 return 1; 336 } 337 return 0; 338} 339 340static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 341{ 342 u64 *pkind = (u64 *)&nic->pkind; 343 344 if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) { 345 printf("Invalid MTU setting from VF%d rejected,", vf); 346 printf(" should be between %d and %d\n", NIC_HW_MIN_FRS, 347 NIC_HW_MAX_FRS); 348 return 1; 349 } 350 new_frs += ETH_HLEN; 351 if (new_frs <= nic->pkind.maxlen) 352 return 0; 353 354 nic->pkind.maxlen = new_frs; 355 356 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind); 357 return 0; 358} 359 360/* Set minimum transmit packet size */ 361static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) 362{ 363 int lmac; 364 u64 lmac_cfg; 365 struct hw_info *hw = nic->hw; 366 int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; 367 368 /* Max value that can be set is 60 */ 369 if (size > 52) 370 size = 52; 371 372 /* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */ 373 if (hw->chans_per_rgx) 374 max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; 375 376 for (lmac = 0; lmac < max_lmac; lmac++) { 377 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 378 lmac_cfg &= ~(0xF << 2); 379 lmac_cfg |= ((size / 4) << 2); 380 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 381 } 382} 383 384/* Function to check number of LMACs present and set VF to LMAC mapping. 385 * Mapping will be used while initializing channels. 386 */ 387static void nic_set_lmac_vf_mapping(struct nicpf *nic) 388{ 389 int bgx, bgx_count, next_bgx_lmac = 0; 390 int lmac, lmac_cnt = 0; 391 u64 lmac_credit; 392 393 nic->num_vf_en = 0; 394 if (nic->flags & NIC_TNS_ENABLED) { 395 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 396 return; 397 } 398 399 bgx_get_count(nic->node, &bgx_count); 400 debug("bgx_count: %d\n", bgx_count); 401 402 for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { 403 if (!(bgx_count & (1 << bgx))) 404 continue; 405 nic->bgx_cnt++; 406 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 407 debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx); 408 for (lmac = 0; lmac < lmac_cnt; lmac++) 409 nic->vf_lmac_map[next_bgx_lmac++] = 410 NIC_SET_VF_LMAC_MAP(bgx, lmac); 411 nic->num_vf_en += lmac_cnt; 412 413 /* Program LMAC credits */ 414 lmac_credit = (1ull << 1); /* chennel credit enable */ 415 lmac_credit |= (0x1ff << 2); 416 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 417 NIC_HW_MAX_FRS) / 16) << 12); 418 lmac = bgx * MAX_LMAC_PER_BGX; 419 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) 420 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 421 lmac_credit); 422 } 423} 424 425static void nic_get_hw_info(struct nicpf *nic) 426{ 427 u16 sdevid; 428 struct hw_info *hw = nic->hw; 429 430 dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid); 431 432 switch (sdevid) { 433 case PCI_SUBSYS_DEVID_88XX_NIC_PF: 434 hw->bgx_cnt = MAX_BGX_PER_NODE; 435 hw->chans_per_lmac = 16; 436 hw->chans_per_bgx = 128; 437 hw->cpi_cnt = 2048; 438 hw->rssi_cnt = 4096; 439 hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 440 hw->tl3_cnt = 256; 441 hw->tl2_cnt = 64; 442 hw->tl1_cnt = 2; 443 hw->tl1_per_bgx = true; 444 hw->model_id = 0x88; 445 break; 446 case PCI_SUBSYS_DEVID_81XX_NIC_PF: 447 hw->bgx_cnt = MAX_BGX_PER_NODE; 448 hw->chans_per_lmac = 8; 449 hw->chans_per_bgx = 32; 450 hw->chans_per_rgx = 8; 451 hw->chans_per_lbk = 24; 452 hw->cpi_cnt = 512; 453 hw->rssi_cnt = 256; 454 hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ 455 hw->tl3_cnt = 64; 456 hw->tl2_cnt = 16; 457 hw->tl1_cnt = 10; 458 hw->tl1_per_bgx = false; 459 hw->model_id = 0x81; 460 break; 461 case PCI_SUBSYS_DEVID_83XX_NIC_PF: 462 hw->bgx_cnt = MAX_BGX_PER_NODE; 463 hw->chans_per_lmac = 8; 464 hw->chans_per_bgx = 32; 465 hw->chans_per_lbk = 64; 466 hw->cpi_cnt = 2048; 467 hw->rssi_cnt = 1024; 468 hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ 469 hw->tl3_cnt = 256; 470 hw->tl2_cnt = 64; 471 hw->tl1_cnt = 18; 472 hw->tl1_per_bgx = false; 473 hw->model_id = 0x83; 474 break; 475 } 476 477 hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev); 478} 479 480static void nic_init_hw(struct nicpf *nic) 481{ 482 int i; 483 u64 reg; 484 u64 *pkind = (u64 *)&nic->pkind; 485 486 /* Get HW capability info */ 487 nic_get_hw_info(nic); 488 489 /* Enable NIC HW block */ 490 nic_reg_write(nic, NIC_PF_CFG, 0x3); 491 492 /* Enable backpressure */ 493 nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); 494 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08); 495 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 496 (1ULL << 63) | 0x09); 497 498 for (i = 0; i < NIC_MAX_CHANS; i++) 499 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1); 500 501 if (nic->flags & NIC_TNS_ENABLED) { 502 reg = NIC_TNS_MODE << 7; 503 reg |= 0x06; 504 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg); 505 reg &= ~0xFull; 506 reg |= 0x07; 507 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg); 508 } else { 509 /* Disable TNS mode on both interfaces */ 510 reg = NIC_TNS_BYPASS_MODE << 7; 511 reg |= 0x08; /* Block identifier */ 512 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg); 513 reg &= ~0xFull; 514 reg |= 0x09; 515 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg); 516 } 517 518 /* PKIND configuration */ 519 nic->pkind.minlen = 0; 520 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 521 nic->pkind.lenerr_en = 1; 522 nic->pkind.rx_hdr = 0; 523 nic->pkind.hdr_sl = 0; 524 525 for (i = 0; i < NIC_MAX_PKIND; i++) 526 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind); 527 528 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 529 530 /* Timer config */ 531 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 532} 533 534/* Channel parse index configuration */ 535static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 536{ 537 struct hw_info *hw = nic->hw; 538 u32 vnic, bgx, lmac, chan; 539 u32 padd, cpi_count = 0; 540 u64 cpi_base, cpi, rssi_base, rssi; 541 u8 qset, rq_idx = 0; 542 543 vnic = cfg->vf_id; 544 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 545 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 546 547 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); 548 cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; 549 rssi_base = vnic * hw->rss_ind_tbl_size; 550 551 /* Rx channel configuration */ 552 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 553 (1ull << 63) | (vnic << 0)); 554 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 555 ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); 556 557 if (cfg->cpi_alg == CPI_ALG_NONE) 558 cpi_count = 1; 559 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 560 cpi_count = 8; 561 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 562 cpi_count = 16; 563 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 564 cpi_count = NIC_MAX_CPI_PER_LMAC; 565 566 /* RSS Qset, Qidx mapping */ 567 qset = cfg->vf_id; 568 rssi = rssi_base; 569 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 570 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 571 (qset << 3) | rq_idx); 572 rq_idx++; 573 } 574 575 rssi = 0; 576 cpi = cpi_base; 577 for (; cpi < (cpi_base + cpi_count); cpi++) { 578 /* Determine port to channel adder */ 579 if (cfg->cpi_alg != CPI_ALG_DIFF) 580 padd = cpi % cpi_count; 581 else 582 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 583 584 /* Leave RSS_SIZE as '0' to disable RSS */ 585 if (pass1_silicon(nic->rev_id, nic->hw->model_id)) { 586 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 587 (vnic << 24) | (padd << 16) | 588 (rssi_base + rssi)); 589 } else { 590 /* Set MPI_ALG to '0' to disable MCAM parsing */ 591 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 592 (padd << 16)); 593 /* MPI index is same as CPI if MPI_ALG is not enabled */ 594 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 595 (vnic << 24) | (rssi_base + rssi)); 596 } 597 598 if ((rssi + 1) >= cfg->rq_cnt) 599 continue; 600 601 if (cfg->cpi_alg == CPI_ALG_VLAN) 602 rssi++; 603 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 604 rssi = ((cpi - cpi_base) & 0xe) >> 1; 605 else if (cfg->cpi_alg == CPI_ALG_DIFF) 606 rssi = ((cpi - cpi_base) & 0x38) >> 3; 607 } 608 nic->cpi_base[cfg->vf_id] = cpi_base; 609 nic->rssi_base[cfg->vf_id] = rssi_base; 610} 611 612/* Transmit channel configuration (TL4 -> TL3 -> Chan) 613 * VNIC0-SQ0 -> TL4(0) -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0 614 * VNIC1-SQ0 -> TL4(8) -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0 615 * VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0 616 * VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0 617 * VNIC4-SQ0 -> TL4(512) -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0 618 * VNIC5-SQ0 -> TL4(520) -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0 619 * VNIC6-SQ0 -> TL4(528) -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0 620 * VNIC7-SQ0 -> TL4(536) -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0 621 */ 622static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, 623 struct sq_cfg_msg *sq) 624{ 625 struct hw_info *hw = nic->hw; 626 u32 bgx, lmac, chan; 627 u32 tl2, tl3, tl4; 628 u32 rr_quantum; 629 u8 sq_idx = sq->sq_num; 630 u8 pqs_vnic = vnic; 631 int svf; 632 u16 sdevid; 633 634 dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid); 635 636 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 637 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 638 639 /* 24 bytes for FCS, IPG and preamble */ 640 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 641 642 /* For 88xx 0-511 TL4 transmits via BGX0 and 643 * 512-1023 TL4s transmit via BGX1. 644 */ 645 if (hw->tl1_per_bgx) { 646 tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); 647 if (!sq->sqs_mode) { 648 tl4 += (lmac * MAX_QUEUES_PER_QSET); 649 } else { 650 for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) { 651 if (nic->vf_sqs[pqs_vnic][svf] == vnic) 652 break; 653 } 654 tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); 655 tl4 += (lmac * MAX_QUEUES_PER_QSET * 656 MAX_SQS_PER_VF_SINGLE_NODE); 657 tl4 += (svf * MAX_QUEUES_PER_QSET); 658 } 659 } else { 660 tl4 = (vnic * MAX_QUEUES_PER_QSET); 661 } 662 663 tl4 += sq_idx; 664 665 tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); 666 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 667 ((u64)vnic << NIC_QS_ID_SHIFT) | 668 ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); 669 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 670 ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); 671 672 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 673 674 /* On 88xx 0-127 channels are for BGX0 and 675 * 127-255 channels for BGX1. 676 * 677 * On 81xx/83xx TL3_CHAN reg should be configured with channel 678 * within LMAC i.e 0-7 and not the actual channel number like on 88xx 679 */ 680 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); 681 if (hw->tl1_per_bgx) 682 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 683 else 684 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); 685 686 /* Enable backpressure on the channel */ 687 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 688 689 tl2 = tl3 >> 2; 690 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 691 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 692 /* No priorities as of now */ 693 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 694 695 /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' 696 * on 81xx/83xx TL2 needs to be configured to transmit to one of the 697 * possible LMACs. 698 * 699 * This register doesn't exist on 88xx. 700 */ 701 if (!hw->tl1_per_bgx) 702 nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), 703 lmac + (bgx * MAX_LMAC_PER_BGX)); 704} 705 706int nic_initialize(struct udevice *dev) 707{ 708 struct nicpf *nic = dev_get_priv(dev); 709 710 nic->udev = dev; 711 nic->hw = calloc(1, sizeof(struct hw_info)); 712 if (!nic->hw) 713 return -ENOMEM; 714 715 /* MAP PF's configuration registers */ 716 nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, 717 PCI_REGION_MEM); 718 if (!nic->reg_base) { 719 printf("Cannot map config register space, aborting\n"); 720 goto exit; 721 } 722 723 nic->node = node_id(nic->reg_base); 724 dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id); 725 726 /* By default set NIC in TNS bypass mode */ 727 nic->flags &= ~NIC_TNS_ENABLED; 728 729 /* Initialize hardware */ 730 nic_init_hw(nic); 731 732 nic_set_lmac_vf_mapping(nic); 733 734 /* Set RSS TBL size for each VF */ 735 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 736 737 nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size); 738 739 return 0; 740exit: 741 free(nic->hw); 742 return -ENODEV; 743} 744 745int octeontx_nic_probe(struct udevice *dev) 746{ 747 int ret = 0; 748 struct nicpf *nicpf = dev_get_priv(dev); 749 750 nicpf->udev = dev; 751 ret = nic_initialize(dev); 752 if (ret < 0) { 753 printf("couldn't initialize NIC PF\n"); 754 return ret; 755 } 756 757 ret = pci_sriov_init(dev, nicpf->num_vf_en); 758 if (ret < 0) 759 printf("enabling SRIOV failed for num VFs %d\n", 760 nicpf->num_vf_en); 761 762 return ret; 763} 764 765U_BOOT_DRIVER(octeontx_nic) = { 766 .name = "octeontx_nic", 767 .id = UCLASS_MISC, 768 .probe = octeontx_nic_probe, 769 .priv_auto = sizeof(struct nicpf), 770}; 771 772static struct pci_device_id octeontx_nic_supported[] = { 773 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) }, 774 {} 775}; 776 777U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported); 778