1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright 2019-2021 Broadcom. 4 */ 5 6#include <common.h> 7 8#include <asm/io.h> 9#include <dm.h> 10#include <linux/delay.h> 11#include <memalign.h> 12#include <net.h> 13 14#include "bnxt.h" 15#include "bnxt_dbg.h" 16 17#define bnxt_down_chip(bp) bnxt_hwrm_run(down_chip, bp, 0) 18#define bnxt_bring_chip(bp) bnxt_hwrm_run(bring_chip, bp, 1) 19 20/* Broadcom ethernet driver PCI APIs. */ 21static void bnxt_bring_pci(struct bnxt *bp) 22{ 23 u16 cmd_reg = 0; 24 25 dm_pci_read_config16(bp->pdev, PCI_VENDOR_ID, &bp->vendor_id); 26 dm_pci_read_config16(bp->pdev, PCI_DEVICE_ID, &bp->device_id); 27 dm_pci_read_config16(bp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &bp->subsystem_vendor); 28 dm_pci_read_config16(bp->pdev, PCI_SUBSYSTEM_ID, &bp->subsystem_device); 29 dm_pci_read_config16(bp->pdev, PCI_COMMAND, &bp->cmd_reg); 30 dm_pci_read_config8(bp->pdev, PCI_INTERRUPT_LINE, &bp->irq); 31 bp->bar0 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_0, 0, 0, 32 PCI_REGION_TYPE, PCI_REGION_MEM); 33 bp->bar1 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_2, 0, 0, 34 PCI_REGION_TYPE, PCI_REGION_MEM); 35 bp->bar2 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_4, 0, 0, 36 PCI_REGION_TYPE, PCI_REGION_MEM); 37 cmd_reg = bp->cmd_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; 38 cmd_reg |= PCI_COMMAND_INTX_DISABLE; /* disable intr */ 39 dm_pci_write_config16(bp->pdev, PCI_COMMAND, cmd_reg); 40 dm_pci_read_config16(bp->pdev, PCI_COMMAND, &cmd_reg); 41 dbg_pci(bp, __func__, cmd_reg); 42} 43 44int bnxt_free_rx_iob(struct bnxt *bp) 45{ 46 unsigned int i; 47 48 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RX_IOB))) 49 return STATUS_SUCCESS; 50 51 for (i = 0; i < bp->rx.buf_cnt; i++) { 52 if (bp->rx.iob[i]) { 53 free(bp->rx.iob[i]); 54 bp->rx.iob[i] = NULL; 55 } 56 } 57 58 FLAG_RESET(bp->flag_hwrm, VALID_RX_IOB); 59 60 return STATUS_SUCCESS; 61} 62 63static void set_rx_desc(u8 *buf, void *iob, u16 cons_id, u32 iob_idx) 64{ 65 struct rx_prod_pkt_bd *desc; 66 u16 off = cons_id * sizeof(struct rx_prod_pkt_bd); 67 68 desc = (struct rx_prod_pkt_bd *)&buf[off]; 69 desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; 70 desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE; 71 desc->opaque = iob_idx; 72 desc->dma.addr = virt_to_bus(iob); 73} 74 75static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx) 76{ 77 void *iob; 78 79 iob = memalign(BNXT_DMA_ALIGNMENT, RX_STD_DMA_ALIGNED); 80 if (!iob) 81 return -ENOMEM; 82 83 dbg_rx_iob(iob, iob_idx, cons_id); 84 set_rx_desc((u8 *)bp->rx.bd_virt, iob, cons_id, (u32)iob_idx); 85 bp->rx.iob[iob_idx] = iob; 86 87 return 0; 88} 89 90void bnxt_mm_init(struct bnxt *bp, const char *func) 91{ 92 memset(bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE); 93 memset(bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE); 94 memset(bp->cq.bd_virt, 0, CQ_RING_DMA_BUFFER_SIZE); 95 memset(bp->tx.bd_virt, 0, TX_RING_DMA_BUFFER_SIZE); 96 memset(bp->rx.bd_virt, 0, RX_RING_DMA_BUFFER_SIZE); 97 98 bp->data_addr_mapping = virt_to_bus(bp->hwrm_addr_data); 99 bp->req_addr_mapping = virt_to_bus(bp->hwrm_addr_req); 100 bp->resp_addr_mapping = virt_to_bus(bp->hwrm_addr_resp); 101 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; 102 bp->link_status = STATUS_LINK_DOWN; 103 bp->media_change = 1; 104 bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; 105 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 106 bp->rx.buf_cnt = NUM_RX_BUFFERS; 107 bp->rx.ring_cnt = MAX_RX_DESC_CNT; 108 bp->tx.ring_cnt = MAX_TX_DESC_CNT; 109 bp->cq.ring_cnt = MAX_CQ_DESC_CNT; 110 bp->cq.completion_bit = 0x1; 111 bp->link_set = LINK_SPEED_DRV_100G; 112 dbg_mem(bp, func); 113} 114 115void bnxt_free_mem(struct bnxt *bp) 116{ 117 if (bp->cq.bd_virt) { 118 free(bp->cq.bd_virt); 119 bp->cq.bd_virt = NULL; 120 } 121 122 if (bp->rx.bd_virt) { 123 free(bp->rx.bd_virt); 124 bp->rx.bd_virt = NULL; 125 } 126 127 if (bp->tx.bd_virt) { 128 free(bp->tx.bd_virt); 129 bp->tx.bd_virt = NULL; 130 } 131 132 if (bp->hwrm_addr_resp) { 133 free(bp->hwrm_addr_resp); 134 bp->resp_addr_mapping = 0; 135 bp->hwrm_addr_resp = NULL; 136 } 137 138 if (bp->hwrm_addr_req) { 139 free(bp->hwrm_addr_req); 140 bp->req_addr_mapping = 0; 141 bp->hwrm_addr_req = NULL; 142 } 143 144 if (bp->hwrm_addr_data) { 145 free(bp->hwrm_addr_data); 146 bp->data_addr_mapping = 0; 147 bp->hwrm_addr_data = NULL; 148 } 149 150 dbg_mem_free_done(__func__); 151} 152 153int bnxt_alloc_mem(struct bnxt *bp) 154{ 155 bp->hwrm_addr_data = memalign(BNXT_DMA_ALIGNMENT, DMA_BUF_SIZE_ALIGNED); 156 bp->hwrm_addr_req = memalign(BNXT_DMA_ALIGNMENT, REQ_BUF_SIZE_ALIGNED); 157 bp->hwrm_addr_resp = MEM_HWRM_RESP; 158 159 memset(&bp->tx, 0, sizeof(struct lm_tx_info_t)); 160 memset(&bp->rx, 0, sizeof(struct lm_rx_info_t)); 161 memset(&bp->cq, 0, sizeof(struct lm_cmp_info_t)); 162 163 bp->tx.bd_virt = memalign(BNXT_DMA_ALIGNMENT, TX_RING_DMA_BUFFER_SIZE); 164 bp->rx.bd_virt = memalign(BNXT_DMA_ALIGNMENT, RX_RING_DMA_BUFFER_SIZE); 165 bp->cq.bd_virt = memalign(BNXT_DMA_ALIGNMENT, CQ_RING_DMA_BUFFER_SIZE); 166 167 if (bp->hwrm_addr_req && 168 bp->hwrm_addr_resp && 169 bp->hwrm_addr_data && 170 bp->tx.bd_virt && 171 bp->rx.bd_virt && 172 bp->cq.bd_virt) { 173 bnxt_mm_init(bp, __func__); 174 return STATUS_SUCCESS; 175 } 176 177 dbg_mem_alloc_fail(__func__); 178 bnxt_free_mem(bp); 179 180 return -ENOMEM; 181} 182 183static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len) 184{ 185 memset(req, 0, len); 186 req->req_type = cmd; 187 req->cmpl_ring = (u16)HWRM_NA_SIGNATURE; 188 req->target_id = (u16)HWRM_NA_SIGNATURE; 189 req->resp_addr = bp->resp_addr_mapping; 190 req->seq_id = bp->seq_id++; 191} 192 193static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt) 194{ 195 u32 i = 0; 196 197 for (i = 0; i < cnt; i++) 198 writel(((u32 *)req)[i], bp->bar0 + GRC_COM_CHAN_BASE + (i * 4)); 199 200 writel(0x1, (bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG)); 201} 202 203static void short_hwrm_cmd_req(struct bnxt *bp, u16 len) 204{ 205 struct hwrm_short_input sreq; 206 207 memset(&sreq, 0, sizeof(struct hwrm_short_input)); 208 sreq.req_type = (u16)((struct input *)bp->hwrm_addr_req)->req_type; 209 sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD; 210 sreq.size = len; 211 sreq.req_addr = bp->req_addr_mapping; 212 dbg_short_cmd((u8 *)&sreq, __func__, sizeof(struct hwrm_short_input)); 213 hwrm_write_req(bp, &sreq, sizeof(struct hwrm_short_input) / 4); 214} 215 216static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func) 217{ 218 struct input *req = (struct input *)bp->hwrm_addr_req; 219 struct output *resp = (struct output *)bp->hwrm_addr_resp; 220 u8 *ptr = (u8 *)resp; 221 u32 idx; 222 u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER((u32)tmo); 223 u16 resp_len = 0; 224 u16 ret = STATUS_TIMEOUT; 225 226 if (len > bp->hwrm_max_req_len) 227 short_hwrm_cmd_req(bp, len); 228 else 229 hwrm_write_req(bp, req, (u32)(len / 4)); 230 231 for (idx = 0; idx < wait_cnt; idx++) { 232 resp_len = resp->resp_len; 233 if (resp->seq_id == req->seq_id && resp->req_type == req->req_type && 234 ptr[resp_len - 1] == 1) { 235 bp->last_resp_code = resp->error_code; 236 ret = resp->error_code; 237 break; 238 } 239 240 udelay(HWRM_CMD_POLL_WAIT_TIME); 241 } 242 243 dbg_hw_cmd(bp, func, len, resp_len, tmo, ret); 244 245 return (int)ret; 246} 247 248static void bnxt_db_cq(struct bnxt *bp) 249{ 250 writel(CQ_DOORBELL_KEY_IDX(bp->cq.cons_idx), bp->bar1); 251} 252 253static void bnxt_db_rx(struct bnxt *bp, u32 idx) 254{ 255 writel(RX_DOORBELL_KEY_RX | idx, bp->bar1); 256} 257 258static void bnxt_db_tx(struct bnxt *bp, u32 idx) 259{ 260 writel((u32)(TX_DOORBELL_KEY_TX | idx), bp->bar1); 261} 262 263int iob_pad(void *packet, int length) 264{ 265 if (length >= ETH_ZLEN) 266 return length; 267 268 memset(((u8 *)packet + length), 0x00, (ETH_ZLEN - length)); 269 270 return ETH_ZLEN; 271} 272 273static inline u32 bnxt_tx_avail(struct bnxt *bp) 274{ 275 barrier(); 276 277 return TX_AVAIL(bp->tx.ring_cnt) - 278 ((bp->tx.prod_id - bp->tx.cons_id) & 279 (bp->tx.ring_cnt - 1)); 280} 281 282void set_txq(struct bnxt *bp, int entry, dma_addr_t mapping, int len) 283{ 284 struct tx_bd_short *prod_bd; 285 286 prod_bd = (struct tx_bd_short *)BD_NOW(bp->tx.bd_virt, 287 entry, 288 sizeof(struct tx_bd_short)); 289 if (len < 512) 290 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT512; 291 else if (len < 1024) 292 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT1K; 293 else if (len < 2048) 294 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT2K; 295 else 296 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K; 297 298 prod_bd->flags_type |= TX_BD_FLAGS; 299 prod_bd->dma.addr = mapping; 300 prod_bd->len = len; 301 prod_bd->opaque = (u32)entry; 302 dump_tx_bd(prod_bd, (u16)(sizeof(struct tx_bd_short))); 303} 304 305static void bnxt_tx_complete(struct bnxt *bp) 306{ 307 bp->tx.cons_id = NEXT_IDX(bp->tx.cons_id, bp->tx.ring_cnt); 308 bp->tx.cnt++; 309 dump_tx_stat(bp); 310} 311 312int post_rx_buffers(struct bnxt *bp) 313{ 314 u16 cons_id = (bp->rx.cons_idx % bp->rx.ring_cnt); 315 u16 iob_idx; 316 317 while (bp->rx.iob_cnt < bp->rx.buf_cnt) { 318 iob_idx = (cons_id % bp->rx.buf_cnt); 319 if (!bp->rx.iob[iob_idx]) { 320 if (bnxt_alloc_rx_iob(bp, cons_id, iob_idx) < 0) { 321 dbg_rx_alloc_iob_fail(iob_idx, cons_id); 322 break; 323 } 324 } 325 326 cons_id = NEXT_IDX(cons_id, bp->rx.ring_cnt); 327 bp->rx.iob_cnt++; 328 } 329 330 if (cons_id != bp->rx.cons_idx) { 331 dbg_rx_cid(bp->rx.cons_idx, cons_id); 332 bp->rx.cons_idx = cons_id; 333 bnxt_db_rx(bp, (u32)cons_id); 334 } 335 336 FLAG_SET(bp->flag_hwrm, VALID_RX_IOB); 337 338 return STATUS_SUCCESS; 339} 340 341u8 bnxt_rx_drop(struct bnxt *bp, u8 *rx_buf, struct rx_pkt_cmpl_hi *rx_cmp_hi) 342{ 343 u8 chksum_err = 0; 344 u8 i; 345 u16 error_flags; 346 347 error_flags = (rx_cmp_hi->errors_v2 >> 348 RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT); 349 if (rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21) 350 chksum_err = 1; 351 352 if (error_flags && !chksum_err) { 353 bp->rx.err++; 354 return 1; 355 } 356 357 for (i = 0; i < 6; i++) { 358 if (rx_buf[6 + i] != bp->mac_set[i]) 359 break; 360 } 361 362 if (i == 6) { 363 bp->rx.dropped++; 364 return 2; /* Drop the loopback packets */ 365 } 366 367 return 0; 368} 369 370static void bnxt_adv_cq_index(struct bnxt *bp, u16 count) 371{ 372 u16 cons_idx = bp->cq.cons_idx + count; 373 374 if (cons_idx >= MAX_CQ_DESC_CNT) { 375 /* Toggle completion bit when the ring wraps. */ 376 bp->cq.completion_bit ^= 1; 377 cons_idx = cons_idx - MAX_CQ_DESC_CNT; 378 } 379 380 bp->cq.cons_idx = cons_idx; 381} 382 383void bnxt_adv_rx_index(struct bnxt *bp, u8 *iob, u32 iob_idx) 384{ 385 u16 cons_id = (bp->rx.cons_idx % bp->rx.ring_cnt); 386 387 set_rx_desc((u8 *)bp->rx.bd_virt, (void *)iob, cons_id, iob_idx); 388 cons_id = NEXT_IDX(cons_id, bp->rx.ring_cnt); 389 if (cons_id != bp->rx.cons_idx) { 390 dbg_rx_cid(bp->rx.cons_idx, cons_id); 391 bp->rx.cons_idx = cons_id; 392 bnxt_db_rx(bp, (u32)cons_id); 393 } 394} 395 396void rx_process(struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, 397 struct rx_pkt_cmpl_hi *rx_cmp_hi) 398{ 399 u32 desc_idx = rx_cmp->opaque; 400 u8 *iob = bp->rx.iob[desc_idx]; 401 402 dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx); 403 bp->rx.iob_len = rx_cmp->len; 404 bp->rx.iob_rx = iob; 405 if (bnxt_rx_drop(bp, iob, rx_cmp_hi)) 406 bp->rx.iob_recv = PKT_DROPPED; 407 else 408 bp->rx.iob_recv = PKT_RECEIVED; 409 410 bp->rx.rx_cnt++; 411 412 dbg_rxp(bp->rx.iob_rx, bp->rx.iob_len, bp->rx.iob_recv); 413 bnxt_adv_rx_index(bp, iob, desc_idx); 414 bnxt_adv_cq_index(bp, 2); /* Rx completion is 2 entries. */ 415} 416 417static int bnxt_rx_complete(struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp) 418{ 419 struct rx_pkt_cmpl_hi *rx_cmp_hi; 420 u8 completion_bit = bp->cq.completion_bit; 421 422 if (bp->cq.cons_idx == (bp->cq.ring_cnt - 1)) { 423 rx_cmp_hi = (struct rx_pkt_cmpl_hi *)bp->cq.bd_virt; 424 completion_bit ^= 0x1; /* Ring has wrapped. */ 425 } else { 426 rx_cmp_hi = (struct rx_pkt_cmpl_hi *)(rx_cmp + 1); 427 } 428 429 if (!((rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2) ^ completion_bit)) 430 rx_process(bp, rx_cmp, rx_cmp_hi); 431 432 return NO_MORE_CQ_BD_TO_SERVICE; 433} 434 435static int bnxt_hwrm_ver_get(struct bnxt *bp) 436{ 437 u16 cmd_len = (u16)sizeof(struct hwrm_ver_get_input); 438 struct hwrm_ver_get_input *req; 439 struct hwrm_ver_get_output *resp; 440 int rc; 441 442 req = (struct hwrm_ver_get_input *)bp->hwrm_addr_req; 443 resp = (struct hwrm_ver_get_output *)bp->hwrm_addr_resp; 444 hwrm_init(bp, (void *)req, (u16)HWRM_VER_GET, cmd_len); 445 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 446 req->hwrm_intf_min = HWRM_VERSION_MINOR; 447 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 448 rc = wait_resp(bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__); 449 if (rc) 450 return STATUS_FAILURE; 451 452 bp->hwrm_spec_code = 453 resp->hwrm_intf_maj_8b << 16 | 454 resp->hwrm_intf_min_8b << 8 | 455 resp->hwrm_intf_upd_8b; 456 bp->hwrm_cmd_timeout = (u32)resp->def_req_timeout; 457 if (!bp->hwrm_cmd_timeout) 458 bp->hwrm_cmd_timeout = (u32)HWRM_CMD_DEFAULT_TIMEOUT; 459 460 if (resp->hwrm_intf_maj_8b >= 1) 461 bp->hwrm_max_req_len = resp->max_req_win_len; 462 463 bp->chip_id = 464 resp->chip_rev << 24 | 465 resp->chip_metal << 16 | 466 resp->chip_bond_id << 8 | 467 resp->chip_platform_type; 468 bp->chip_num = resp->chip_num; 469 if ((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) && 470 (resp->dev_caps_cfg & SHORT_CMD_REQUIRED)) 471 FLAG_SET(bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP); 472 473 bp->hwrm_max_ext_req_len = resp->max_ext_req_len; 474 bp->fw_maj = resp->hwrm_fw_maj_8b; 475 bp->fw_min = resp->hwrm_fw_min_8b; 476 bp->fw_bld = resp->hwrm_fw_bld_8b; 477 bp->fw_rsvd = resp->hwrm_fw_rsvd_8b; 478 print_fw_ver(resp, bp->hwrm_cmd_timeout); 479 480 return STATUS_SUCCESS; 481} 482 483/* Broadcom ethernet driver Function HW cmds APIs. */ 484static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp) 485{ 486 u16 cmd_len = (u16)sizeof(struct hwrm_func_resource_qcaps_input); 487 struct hwrm_func_resource_qcaps_input *req; 488 struct hwrm_func_resource_qcaps_output *resp; 489 int rc; 490 491 req = (struct hwrm_func_resource_qcaps_input *)bp->hwrm_addr_req; 492 resp = (struct hwrm_func_resource_qcaps_output *)bp->hwrm_addr_resp; 493 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_RESOURCE_QCAPS, cmd_len); 494 req->fid = (u16)HWRM_NA_SIGNATURE; 495 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 496 if (rc != STATUS_SUCCESS) 497 return STATUS_SUCCESS; 498 499 FLAG_SET(bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT); 500 /* VFs */ 501 bp->max_vfs = resp->max_vfs; 502 bp->vf_res_strategy = resp->vf_reservation_strategy; 503 /* vNICs */ 504 bp->min_vnics = resp->min_vnics; 505 bp->max_vnics = resp->max_vnics; 506 /* MSI-X */ 507 bp->max_msix = resp->max_msix; 508 /* Ring Groups */ 509 bp->min_hw_ring_grps = resp->min_hw_ring_grps; 510 bp->max_hw_ring_grps = resp->max_hw_ring_grps; 511 /* TX Rings */ 512 bp->min_tx_rings = resp->min_tx_rings; 513 bp->max_tx_rings = resp->max_tx_rings; 514 /* RX Rings */ 515 bp->min_rx_rings = resp->min_rx_rings; 516 bp->max_rx_rings = resp->max_rx_rings; 517 /* Completion Rings */ 518 bp->min_cp_rings = resp->min_cmpl_rings; 519 bp->max_cp_rings = resp->max_cmpl_rings; 520 /* RSS Contexts */ 521 bp->min_rsscos_ctxs = resp->min_rsscos_ctx; 522 bp->max_rsscos_ctxs = resp->max_rsscos_ctx; 523 /* L2 Contexts */ 524 bp->min_l2_ctxs = resp->min_l2_ctxs; 525 bp->max_l2_ctxs = resp->max_l2_ctxs; 526 /* Statistic Contexts */ 527 bp->min_stat_ctxs = resp->min_stat_ctx; 528 bp->max_stat_ctxs = resp->max_stat_ctx; 529 dbg_func_resource_qcaps(bp); 530 531 return STATUS_SUCCESS; 532} 533 534static u32 set_ring_info(struct bnxt *bp) 535{ 536 u32 enables = 0; 537 538 bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS; 539 bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS; 540 bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS; 541 bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS; 542 bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS; 543 if (bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS) 544 bp->num_cmpl_rings = bp->min_cp_rings; 545 546 if (bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS) 547 bp->num_tx_rings = bp->min_tx_rings; 548 549 if (bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS) 550 bp->num_rx_rings = bp->min_rx_rings; 551 552 if (bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS) 553 bp->num_hw_ring_grps = bp->min_hw_ring_grps; 554 555 if (bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS) 556 bp->num_stat_ctxs = bp->min_stat_ctxs; 557 558 print_num_rings(bp); 559 enables = (FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 560 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 561 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 562 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 563 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 564 565 return enables; 566} 567 568static void bnxt_hwrm_assign_resources(struct bnxt *bp) 569{ 570 struct hwrm_func_cfg_input *req; 571 u32 enables = 0; 572 573 if (FLAG_TEST(bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT)) 574 enables = set_ring_info(bp); 575 576 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req; 577 req->num_cmpl_rings = bp->num_cmpl_rings; 578 req->num_tx_rings = bp->num_tx_rings; 579 req->num_rx_rings = bp->num_rx_rings; 580 req->num_stat_ctxs = bp->num_stat_ctxs; 581 req->num_hw_ring_grps = bp->num_hw_ring_grps; 582 req->enables = enables; 583} 584 585int bnxt_hwrm_nvm_flush(struct bnxt *bp) 586{ 587 u16 cmd_len = (u16)sizeof(struct hwrm_nvm_flush_input); 588 struct hwrm_nvm_flush_input *req; 589 int rc; 590 591 req = (struct hwrm_nvm_flush_input *)bp->hwrm_addr_req; 592 593 hwrm_init(bp, (void *)req, (u16)HWRM_NVM_FLUSH, cmd_len); 594 595 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 596 if (rc) 597 return STATUS_FAILURE; 598 599 return STATUS_SUCCESS; 600} 601 602static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp) 603{ 604 u16 cmd_len = (u16)sizeof(struct hwrm_func_qcaps_input); 605 struct hwrm_func_qcaps_input *req; 606 struct hwrm_func_qcaps_output *resp; 607 int rc; 608 609 req = (struct hwrm_func_qcaps_input *)bp->hwrm_addr_req; 610 resp = (struct hwrm_func_qcaps_output *)bp->hwrm_addr_resp; 611 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_QCAPS, cmd_len); 612 req->fid = (u16)HWRM_NA_SIGNATURE; 613 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 614 if (rc) 615 return STATUS_FAILURE; 616 617 bp->fid = resp->fid; 618 bp->port_idx = (u8)resp->port_id; 619 620 /* Get MAC address for this PF */ 621 memcpy(&bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN); 622 623 memcpy(&bp->mac_set[0], &bp->mac_addr[0], ETH_ALEN); 624 625 print_func_qcaps(bp); 626 627 return STATUS_SUCCESS; 628} 629 630static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp) 631{ 632 u16 cmd_len = (u16)sizeof(struct hwrm_func_qcfg_input); 633 struct hwrm_func_qcfg_input *req; 634 struct hwrm_func_qcfg_output *resp; 635 int rc; 636 637 req = (struct hwrm_func_qcfg_input *)bp->hwrm_addr_req; 638 resp = (struct hwrm_func_qcfg_output *)bp->hwrm_addr_resp; 639 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_QCFG, cmd_len); 640 req->fid = (u16)HWRM_NA_SIGNATURE; 641 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 642 if (rc) 643 return STATUS_FAILURE; 644 645 if (resp->flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) 646 FLAG_SET(bp->flags, BNXT_FLAG_MULTI_HOST); 647 648 if (resp->port_partition_type & 649 FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0) 650 FLAG_SET(bp->flags, BNXT_FLAG_NPAR_MODE); 651 652 bp->ordinal_value = (u8)resp->pci_id & 0x0F; 653 bp->stat_ctx_id = resp->stat_ctx_id; 654 memcpy(&bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN); 655 print_func_qcfg(bp); 656 dbg_flags(__func__, bp->flags); 657 658 return STATUS_SUCCESS; 659} 660 661static int bnxt_hwrm_func_reset_req(struct bnxt *bp) 662{ 663 u16 cmd_len = (u16)sizeof(struct hwrm_func_reset_input); 664 struct hwrm_func_reset_input *req; 665 666 req = (struct hwrm_func_reset_input *)bp->hwrm_addr_req; 667 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_RESET, cmd_len); 668 req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME; 669 670 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 671} 672 673static int bnxt_hwrm_func_cfg_req(struct bnxt *bp) 674{ 675 u16 cmd_len = (u16)sizeof(struct hwrm_func_cfg_input); 676 struct hwrm_func_cfg_input *req; 677 678 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req; 679 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_CFG, cmd_len); 680 req->fid = (u16)HWRM_NA_SIGNATURE; 681 bnxt_hwrm_assign_resources(bp); 682 683 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 684} 685 686static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 687{ 688 u16 cmd_len = (u16)sizeof(struct hwrm_func_drv_rgtr_input); 689 struct hwrm_func_drv_rgtr_input *req; 690 int rc; 691 692 req = (struct hwrm_func_drv_rgtr_input *)bp->hwrm_addr_req; 693 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_DRV_RGTR, cmd_len); 694 /* Register with HWRM */ 695 req->enables = FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 696 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD | 697 FUNC_DRV_RGTR_REQ_ENABLES_VER; 698 req->async_event_fwd[0] |= 0x01; 699 req->os_type = FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER; 700 req->ver_maj = DRIVER_VERSION_MAJOR; 701 req->ver_min = DRIVER_VERSION_MINOR; 702 req->ver_upd = DRIVER_VERSION_UPDATE; 703 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 704 if (rc) 705 return STATUS_FAILURE; 706 707 FLAG_SET(bp->flag_hwrm, VALID_DRIVER_REG); 708 709 return STATUS_SUCCESS; 710} 711 712static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 713{ 714 u16 cmd_len = (u16)sizeof(struct hwrm_func_drv_unrgtr_input); 715 struct hwrm_func_drv_unrgtr_input *req; 716 int rc; 717 718 if (!(FLAG_TEST(bp->flag_hwrm, VALID_DRIVER_REG))) 719 return STATUS_SUCCESS; 720 721 req = (struct hwrm_func_drv_unrgtr_input *)bp->hwrm_addr_req; 722 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_DRV_UNRGTR, cmd_len); 723 req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN; 724 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 725 if (rc) 726 return STATUS_FAILURE; 727 728 FLAG_RESET(bp->flag_hwrm, VALID_DRIVER_REG); 729 730 return STATUS_SUCCESS; 731} 732 733static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp) 734{ 735 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_filter_alloc_input); 736 struct hwrm_cfa_l2_filter_alloc_input *req; 737 struct hwrm_cfa_l2_filter_alloc_output *resp; 738 int rc; 739 u32 flags = CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX; 740 u32 enables; 741 742 req = (struct hwrm_cfa_l2_filter_alloc_input *)bp->hwrm_addr_req; 743 resp = (struct hwrm_cfa_l2_filter_alloc_output *)bp->hwrm_addr_resp; 744 enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 745 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 746 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK; 747 748 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_FILTER_ALLOC, cmd_len); 749 req->flags = flags; 750 req->enables = enables; 751 memcpy((char *)&req->l2_addr[0], (char *)&bp->mac_set[0], ETH_ALEN); 752 memset((char *)&req->l2_addr_mask[0], 0xff, ETH_ALEN); 753 memcpy((char *)&req->t_l2_addr[0], (char *)&bp->mac_set[0], ETH_ALEN); 754 memset((char *)&req->t_l2_addr_mask[0], 0xff, ETH_ALEN); 755 req->src_type = CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT; 756 req->src_id = (u32)bp->port_idx; 757 req->dst_id = bp->vnic_id; 758 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 759 if (rc) 760 return STATUS_FAILURE; 761 762 FLAG_SET(bp->flag_hwrm, VALID_L2_FILTER); 763 bp->l2_filter_id = resp->l2_filter_id; 764 765 return STATUS_SUCCESS; 766} 767 768static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp) 769{ 770 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_filter_free_input); 771 struct hwrm_cfa_l2_filter_free_input *req; 772 int rc; 773 774 if (!(FLAG_TEST(bp->flag_hwrm, VALID_L2_FILTER))) 775 return STATUS_SUCCESS; 776 777 req = (struct hwrm_cfa_l2_filter_free_input *)bp->hwrm_addr_req; 778 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_FILTER_FREE, cmd_len); 779 req->l2_filter_id = bp->l2_filter_id; 780 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 781 if (rc) 782 return STATUS_FAILURE; 783 784 FLAG_RESET(bp->flag_hwrm, VALID_L2_FILTER); 785 786 return STATUS_SUCCESS; 787} 788 789u32 bnxt_set_rx_mask(u32 rx_mask) 790{ 791 u32 mask = 0; 792 793 if (!rx_mask) 794 return mask; 795 mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 796 if (rx_mask != RX_MASK_ACCEPT_NONE) { 797 if (rx_mask & RX_MASK_ACCEPT_MULTICAST) 798 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 799 800 if (rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST) 801 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 802 803 if (rx_mask & RX_MASK_PROMISCUOUS_MODE) 804 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 805 } 806 807 return mask; 808} 809 810static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask) 811{ 812 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_set_rx_mask_input); 813 struct hwrm_cfa_l2_set_rx_mask_input *req; 814 u32 mask = bnxt_set_rx_mask(rx_mask); 815 816 req = (struct hwrm_cfa_l2_set_rx_mask_input *)bp->hwrm_addr_req; 817 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_SET_RX_MASK, cmd_len); 818 req->vnic_id = bp->vnic_id; 819 req->mask = mask; 820 821 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 822} 823 824static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp) 825{ 826 u16 cmd_len = (u16)sizeof(struct hwrm_port_mac_cfg_input); 827 struct hwrm_port_mac_cfg_input *req; 828 829 req = (struct hwrm_port_mac_cfg_input *)bp->hwrm_addr_req; 830 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_MAC_CFG, cmd_len); 831 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 832 833 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 834} 835 836static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx) 837{ 838 u16 cmd_len = (u16)sizeof(struct hwrm_port_phy_qcfg_input); 839 struct hwrm_port_phy_qcfg_input *req; 840 struct hwrm_port_phy_qcfg_output *resp; 841 int rc; 842 843 req = (struct hwrm_port_phy_qcfg_input *)bp->hwrm_addr_req; 844 resp = (struct hwrm_port_phy_qcfg_output *)bp->hwrm_addr_resp; 845 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_PHY_QCFG, cmd_len); 846 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 847 if (rc) 848 return STATUS_FAILURE; 849 850 if (idx & SUPPORT_SPEEDS) 851 bp->support_speeds = resp->support_speeds; 852 853 if (idx & DETECT_MEDIA) 854 bp->media_detect = resp->module_status; 855 856 if (idx & PHY_SPEED) 857 bp->current_link_speed = resp->link_speed; 858 859 if (idx & PHY_STATUS) { 860 if (resp->link == PORT_PHY_QCFG_RESP_LINK_LINK) 861 bp->link_status = STATUS_LINK_ACTIVE; 862 else 863 bp->link_status = STATUS_LINK_DOWN; 864 } 865 866 return STATUS_SUCCESS; 867} 868 869u16 set_link_speed_mask(u16 link_cap) 870{ 871 u16 speed_mask = 0; 872 873 if (link_cap & SPEED_CAPABILITY_DRV_100M) 874 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB; 875 876 if (link_cap & SPEED_CAPABILITY_DRV_1G) 877 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB; 878 879 if (link_cap & SPEED_CAPABILITY_DRV_10G) 880 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB; 881 882 if (link_cap & SPEED_CAPABILITY_DRV_25G) 883 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB; 884 885 if (link_cap & SPEED_CAPABILITY_DRV_40G) 886 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB; 887 888 if (link_cap & SPEED_CAPABILITY_DRV_50G) 889 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB; 890 891 if (link_cap & SPEED_CAPABILITY_DRV_100G) 892 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB; 893 894 return speed_mask; 895} 896 897static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp) 898{ 899 u16 cmd_len = (u16)sizeof(struct hwrm_port_phy_cfg_input); 900 struct hwrm_port_phy_cfg_input *req; 901 u32 flags; 902 u32 enables = 0; 903 u16 force_link_speed = 0; 904 u16 auto_link_speed_mask = 0; 905 u8 auto_mode = 0; 906 u8 auto_pause = 0; 907 u8 auto_duplex = 0; 908 909 /* 910 * If multi_host or NPAR is set to TRUE, 911 * do not issue hwrm_port_phy_cfg 912 */ 913 if (FLAG_TEST(bp->flags, PORT_PHY_FLAGS)) { 914 dbg_flags(__func__, bp->flags); 915 return STATUS_SUCCESS; 916 } 917 918 req = (struct hwrm_port_phy_cfg_input *)bp->hwrm_addr_req; 919 flags = PORT_PHY_CFG_REQ_FLAGS_FORCE | 920 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY; 921 922 switch (GET_MEDIUM_SPEED(bp->medium)) { 923 case MEDIUM_SPEED_1000MBPS: 924 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 925 break; 926 case MEDIUM_SPEED_10GBPS: 927 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 928 break; 929 case MEDIUM_SPEED_25GBPS: 930 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 931 break; 932 case MEDIUM_SPEED_40GBPS: 933 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 934 break; 935 case MEDIUM_SPEED_50GBPS: 936 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 937 break; 938 case MEDIUM_SPEED_100GBPS: 939 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 940 break; 941 default: 942 /* Enable AUTONEG by default */ 943 auto_mode = PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 944 flags &= ~PORT_PHY_CFG_REQ_FLAGS_FORCE; 945 enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE | 946 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK | 947 PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX | 948 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE; 949 auto_pause = PORT_PHY_CFG_REQ_AUTO_PAUSE_TX | 950 PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 951 auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH; 952 auto_link_speed_mask = bp->support_speeds; 953 break; 954 } 955 956 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_PHY_CFG, cmd_len); 957 req->flags = flags; 958 req->enables = enables; 959 req->port_id = bp->port_idx; 960 req->force_link_speed = force_link_speed; 961 req->auto_mode = auto_mode; 962 req->auto_duplex = auto_duplex; 963 req->auto_pause = auto_pause; 964 req->auto_link_speed_mask = auto_link_speed_mask; 965 966 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 967} 968 969static int bnxt_qphy_link(struct bnxt *bp) 970{ 971 u16 flag = QCFG_PHY_ALL; 972 973 /* Query Link Status */ 974 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS) 975 return STATUS_FAILURE; 976 977 if (bp->link_status != STATUS_LINK_ACTIVE) { 978 /* 979 * Configure link if it is not up. 980 * try to bring link up, but don't return 981 * failure if port_phy_cfg() fails 982 */ 983 bnxt_hwrm_port_phy_cfg(bp); 984 /* refresh link speed values after bringing link up */ 985 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS) 986 return STATUS_FAILURE; 987 } 988 989 return STATUS_SUCCESS; 990} 991 992static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 993{ 994 u16 cmd_len = (u16)sizeof(struct hwrm_stat_ctx_alloc_input); 995 struct hwrm_stat_ctx_alloc_input *req; 996 struct hwrm_stat_ctx_alloc_output *resp; 997 int rc; 998 999 req = (struct hwrm_stat_ctx_alloc_input *)bp->hwrm_addr_req; 1000 resp = (struct hwrm_stat_ctx_alloc_output *)bp->hwrm_addr_resp; 1001 hwrm_init(bp, (void *)req, (u16)HWRM_STAT_CTX_ALLOC, cmd_len); 1002 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1003 if (rc) 1004 return STATUS_FAILURE; 1005 1006 FLAG_SET(bp->flag_hwrm, VALID_STAT_CTX); 1007 bp->stat_ctx_id = (u16)resp->stat_ctx_id; 1008 1009 return STATUS_SUCCESS; 1010} 1011 1012static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 1013{ 1014 u16 cmd_len = (u16)sizeof(struct hwrm_stat_ctx_free_input); 1015 struct hwrm_stat_ctx_free_input *req; 1016 int rc; 1017 1018 if (!(FLAG_TEST(bp->flag_hwrm, VALID_STAT_CTX))) 1019 return STATUS_SUCCESS; 1020 1021 req = (struct hwrm_stat_ctx_free_input *)bp->hwrm_addr_req; 1022 hwrm_init(bp, (void *)req, (u16)HWRM_STAT_CTX_FREE, cmd_len); 1023 req->stat_ctx_id = (u32)bp->stat_ctx_id; 1024 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1025 if (rc) 1026 return STATUS_FAILURE; 1027 1028 FLAG_RESET(bp->flag_hwrm, VALID_STAT_CTX); 1029 1030 return STATUS_SUCCESS; 1031} 1032 1033static int bnxt_hwrm_ring_free_grp(struct bnxt *bp) 1034{ 1035 u16 cmd_len = (u16)sizeof(struct hwrm_ring_grp_free_input); 1036 struct hwrm_ring_grp_free_input *req; 1037 int rc; 1038 1039 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_GRP))) 1040 return STATUS_SUCCESS; 1041 1042 req = (struct hwrm_ring_grp_free_input *)bp->hwrm_addr_req; 1043 hwrm_init(bp, (void *)req, (u16)HWRM_RING_GRP_FREE, cmd_len); 1044 req->ring_group_id = (u32)bp->ring_grp_id; 1045 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1046 if (rc) 1047 return STATUS_FAILURE; 1048 1049 FLAG_RESET(bp->flag_hwrm, VALID_RING_GRP); 1050 1051 return STATUS_SUCCESS; 1052} 1053 1054static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp) 1055{ 1056 u16 cmd_len = (u16)sizeof(struct hwrm_ring_grp_alloc_input); 1057 struct hwrm_ring_grp_alloc_input *req; 1058 struct hwrm_ring_grp_alloc_output *resp; 1059 int rc; 1060 1061 req = (struct hwrm_ring_grp_alloc_input *)bp->hwrm_addr_req; 1062 resp = (struct hwrm_ring_grp_alloc_output *)bp->hwrm_addr_resp; 1063 hwrm_init(bp, (void *)req, (u16)HWRM_RING_GRP_ALLOC, cmd_len); 1064 req->cr = bp->cq_ring_id; 1065 req->rr = bp->rx_ring_id; 1066 req->ar = (u16)HWRM_NA_SIGNATURE; 1067 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1068 if (rc) 1069 return STATUS_FAILURE; 1070 1071 FLAG_SET(bp->flag_hwrm, VALID_RING_GRP); 1072 bp->ring_grp_id = (u16)resp->ring_group_id; 1073 1074 return STATUS_SUCCESS; 1075} 1076 1077int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type) 1078{ 1079 u16 cmd_len = (u16)sizeof(struct hwrm_ring_free_input); 1080 struct hwrm_ring_free_input *req; 1081 1082 req = (struct hwrm_ring_free_input *)bp->hwrm_addr_req; 1083 hwrm_init(bp, (void *)req, (u16)HWRM_RING_FREE, cmd_len); 1084 req->ring_type = ring_type; 1085 req->ring_id = ring_id; 1086 1087 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1088} 1089 1090static int bnxt_hwrm_ring_alloc(struct bnxt *bp, 1091 dma_addr_t ring_map, 1092 u16 length, 1093 u16 ring_id, 1094 u8 ring_type, 1095 u8 int_mode) 1096{ 1097 u16 cmd_len = (u16)sizeof(struct hwrm_ring_alloc_input); 1098 struct hwrm_ring_alloc_input *req; 1099 struct hwrm_ring_alloc_output *resp; 1100 int rc; 1101 1102 req = (struct hwrm_ring_alloc_input *)bp->hwrm_addr_req; 1103 resp = (struct hwrm_ring_alloc_output *)bp->hwrm_addr_resp; 1104 hwrm_init(bp, (void *)req, (u16)HWRM_RING_ALLOC, cmd_len); 1105 req->ring_type = ring_type; 1106 req->page_tbl_addr = ring_map; 1107 req->page_size = LM_PAGE_SIZE; 1108 req->length = (u32)length; 1109 req->cmpl_ring_id = ring_id; 1110 req->int_mode = int_mode; 1111 if (ring_type == RING_ALLOC_REQ_RING_TYPE_TX) { 1112 req->queue_id = TX_RING_QID; 1113 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_RX) { 1114 req->queue_id = RX_RING_QID; 1115 req->enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID; 1116 req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE; 1117 } 1118 1119 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1120 if (rc) 1121 return STATUS_FAILURE; 1122 1123 if (ring_type == RING_ALLOC_REQ_RING_TYPE_L2_CMPL) { 1124 FLAG_SET(bp->flag_hwrm, VALID_RING_CQ); 1125 bp->cq_ring_id = resp->ring_id; 1126 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_TX) { 1127 FLAG_SET(bp->flag_hwrm, VALID_RING_TX); 1128 bp->tx_ring_id = resp->ring_id; 1129 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_RX) { 1130 FLAG_SET(bp->flag_hwrm, VALID_RING_RX); 1131 bp->rx_ring_id = resp->ring_id; 1132 } 1133 1134 return STATUS_SUCCESS; 1135} 1136 1137static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp) 1138{ 1139 return bnxt_hwrm_ring_alloc(bp, 1140 virt_to_bus(bp->cq.bd_virt), 1141 bp->cq.ring_cnt, 1142 0, 1143 RING_ALLOC_REQ_RING_TYPE_L2_CMPL, 1144 BNXT_CQ_INTR_MODE()); 1145} 1146 1147static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp) 1148{ 1149 return bnxt_hwrm_ring_alloc(bp, 1150 virt_to_bus(bp->tx.bd_virt), 1151 bp->tx.ring_cnt, bp->cq_ring_id, 1152 RING_ALLOC_REQ_RING_TYPE_TX, 1153 BNXT_INTR_MODE()); 1154} 1155 1156static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp) 1157{ 1158 return bnxt_hwrm_ring_alloc(bp, 1159 virt_to_bus(bp->rx.bd_virt), 1160 bp->rx.ring_cnt, 1161 bp->cq_ring_id, 1162 RING_ALLOC_REQ_RING_TYPE_RX, 1163 BNXT_INTR_MODE()); 1164} 1165 1166static int bnxt_hwrm_ring_free_cq(struct bnxt *bp) 1167{ 1168 int ret = STATUS_SUCCESS; 1169 1170 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_CQ))) 1171 return ret; 1172 1173 ret = RING_FREE(bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL); 1174 if (ret == STATUS_SUCCESS) 1175 FLAG_RESET(bp->flag_hwrm, VALID_RING_CQ); 1176 1177 return ret; 1178} 1179 1180static int bnxt_hwrm_ring_free_tx(struct bnxt *bp) 1181{ 1182 int ret = STATUS_SUCCESS; 1183 1184 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_TX))) 1185 return ret; 1186 1187 ret = RING_FREE(bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX); 1188 if (ret == STATUS_SUCCESS) 1189 FLAG_RESET(bp->flag_hwrm, VALID_RING_TX); 1190 1191 return ret; 1192} 1193 1194static int bnxt_hwrm_ring_free_rx(struct bnxt *bp) 1195{ 1196 int ret = STATUS_SUCCESS; 1197 1198 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_RX))) 1199 return ret; 1200 1201 ret = RING_FREE(bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX); 1202 if (ret == STATUS_SUCCESS) 1203 FLAG_RESET(bp->flag_hwrm, VALID_RING_RX); 1204 1205 return ret; 1206} 1207 1208static int bnxt_hwrm_vnic_alloc(struct bnxt *bp) 1209{ 1210 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_alloc_input); 1211 struct hwrm_vnic_alloc_input *req; 1212 struct hwrm_vnic_alloc_output *resp; 1213 int rc; 1214 1215 req = (struct hwrm_vnic_alloc_input *)bp->hwrm_addr_req; 1216 resp = (struct hwrm_vnic_alloc_output *)bp->hwrm_addr_resp; 1217 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_ALLOC, cmd_len); 1218 req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT; 1219 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1220 if (rc) 1221 return STATUS_FAILURE; 1222 1223 FLAG_SET(bp->flag_hwrm, VALID_VNIC_ID); 1224 bp->vnic_id = resp->vnic_id; 1225 1226 return STATUS_SUCCESS; 1227} 1228 1229static int bnxt_hwrm_vnic_free(struct bnxt *bp) 1230{ 1231 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_free_input); 1232 struct hwrm_vnic_free_input *req; 1233 int rc; 1234 1235 if (!(FLAG_TEST(bp->flag_hwrm, VALID_VNIC_ID))) 1236 return STATUS_SUCCESS; 1237 1238 req = (struct hwrm_vnic_free_input *)bp->hwrm_addr_req; 1239 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_FREE, cmd_len); 1240 req->vnic_id = bp->vnic_id; 1241 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1242 if (rc) 1243 return STATUS_FAILURE; 1244 1245 FLAG_RESET(bp->flag_hwrm, VALID_VNIC_ID); 1246 1247 return STATUS_SUCCESS; 1248} 1249 1250static int bnxt_hwrm_vnic_cfg(struct bnxt *bp) 1251{ 1252 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_cfg_input); 1253 struct hwrm_vnic_cfg_input *req; 1254 1255 req = (struct hwrm_vnic_cfg_input *)bp->hwrm_addr_req; 1256 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_CFG, cmd_len); 1257 req->enables = VNIC_CFG_REQ_ENABLES_MRU; 1258 req->mru = bp->mtu; 1259 req->enables |= VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP; 1260 req->dflt_ring_grp = bp->ring_grp_id; 1261 req->vnic_id = bp->vnic_id; 1262 1263 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1264} 1265 1266static int set_phy_speed(struct bnxt *bp) 1267{ 1268 char name[20]; 1269 u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; 1270 1271 /* Query Link Status */ 1272 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS) 1273 return STATUS_FAILURE; 1274 1275 switch (bp->current_link_speed) { 1276 case PORT_PHY_QCFG_RESP_LINK_SPEED_100GB: 1277 sprintf(name, "%s %s", str_100, str_gbps); 1278 break; 1279 case PORT_PHY_QCFG_RESP_LINK_SPEED_50GB: 1280 sprintf(name, "%s %s", str_50, str_gbps); 1281 break; 1282 case PORT_PHY_QCFG_RESP_LINK_SPEED_40GB: 1283 sprintf(name, "%s %s", str_40, str_gbps); 1284 break; 1285 case PORT_PHY_QCFG_RESP_LINK_SPEED_25GB: 1286 sprintf(name, "%s %s", str_25, str_gbps); 1287 break; 1288 case PORT_PHY_QCFG_RESP_LINK_SPEED_20GB: 1289 sprintf(name, "%s %s", str_20, str_gbps); 1290 break; 1291 case PORT_PHY_QCFG_RESP_LINK_SPEED_10GB: 1292 sprintf(name, "%s %s", str_10, str_gbps); 1293 break; 1294 case PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB: 1295 sprintf(name, "%s %s", str_2_5, str_gbps); 1296 break; 1297 case PORT_PHY_QCFG_RESP_LINK_SPEED_2GB: 1298 sprintf(name, "%s %s", str_2, str_gbps); 1299 break; 1300 case PORT_PHY_QCFG_RESP_LINK_SPEED_1GB: 1301 sprintf(name, "%s %s", str_1, str_gbps); 1302 break; 1303 case PORT_PHY_QCFG_RESP_LINK_SPEED_100MB: 1304 sprintf(name, "%s %s", str_100, str_mbps); 1305 break; 1306 case PORT_PHY_QCFG_RESP_LINK_SPEED_10MB: 1307 sprintf(name, "%s %s", str_10, str_mbps); 1308 break; 1309 default: 1310 sprintf(name, "%s %x", str_unknown, bp->current_link_speed); 1311 } 1312 1313 dbg_phy_speed(bp, name); 1314 1315 return STATUS_SUCCESS; 1316} 1317 1318static int set_phy_link(struct bnxt *bp, u32 tmo) 1319{ 1320 int ret; 1321 1322 set_phy_speed(bp); 1323 dbg_link_status(bp); 1324 ret = STATUS_FAILURE; 1325 if (bp->link_status == STATUS_LINK_ACTIVE) { 1326 dbg_link_state(bp, tmo); 1327 ret = STATUS_SUCCESS; 1328 } 1329 1330 return ret; 1331} 1332 1333static int get_phy_link(struct bnxt *bp) 1334{ 1335 u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; 1336 1337 dbg_chip_info(bp); 1338 /* Query Link Status */ 1339 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS) 1340 return STATUS_FAILURE; 1341 1342 set_phy_link(bp, 100); 1343 1344 return STATUS_SUCCESS; 1345} 1346 1347static int bnxt_hwrm_set_async_event(struct bnxt *bp) 1348{ 1349 int rc; 1350 u16 cmd_len = (u16)sizeof(struct hwrm_func_cfg_input); 1351 struct hwrm_func_cfg_input *req; 1352 1353 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req; 1354 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_CFG, cmd_len); 1355 req->fid = (u16)HWRM_NA_SIGNATURE; 1356 req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR; 1357 req->async_event_cr = bp->cq_ring_id; 1358 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__); 1359 1360 return rc; 1361} 1362 1363int bnxt_hwrm_get_nvmem(struct bnxt *bp, 1364 u16 data_len, 1365 u16 option_num, 1366 u16 dimensions, 1367 u16 index_0) 1368{ 1369 u16 cmd_len = (u16)sizeof(struct hwrm_nvm_get_variable_input); 1370 struct hwrm_nvm_get_variable_input *req; 1371 1372 req = (struct hwrm_nvm_get_variable_input *)bp->hwrm_addr_req; 1373 hwrm_init(bp, (void *)req, (u16)HWRM_NVM_GET_VARIABLE, cmd_len); 1374 req->dest_data_addr = bp->data_addr_mapping; 1375 req->data_len = data_len; 1376 req->option_num = option_num; 1377 req->dimensions = dimensions; 1378 req->index_0 = index_0; 1379 1380 return wait_resp(bp, 1381 HWRM_CMD_FLASH_MULTIPLAYER(bp->hwrm_cmd_timeout), 1382 cmd_len, 1383 __func__); 1384} 1385 1386static void set_medium(struct bnxt *bp) 1387{ 1388 switch (bp->link_set & LINK_SPEED_DRV_MASK) { 1389 case LINK_SPEED_DRV_1G: 1390 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_1000MBPS); 1391 break; 1392 case LINK_SPEED_DRV_2_5G: 1393 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_2500MBPS); 1394 break; 1395 case LINK_SPEED_DRV_10G: 1396 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_10GBPS); 1397 break; 1398 case LINK_SPEED_DRV_25G: 1399 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_25GBPS); 1400 break; 1401 case LINK_SPEED_DRV_40G: 1402 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_40GBPS); 1403 break; 1404 case LINK_SPEED_DRV_50G: 1405 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_50GBPS); 1406 break; 1407 case LINK_SPEED_DRV_100G: 1408 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_100GBPS); 1409 break; 1410 case LINK_SPEED_DRV_200G: 1411 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_200GBPS); 1412 break; 1413 case LINK_SPEED_DRV_AUTONEG: 1414 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_AUTONEG); 1415 break; 1416 default: 1417 bp->medium = SET_MEDIUM_DUPLEX(bp, MEDIUM_FULL_DUPLEX); 1418 break; 1419 } 1420} 1421 1422static int bnxt_hwrm_get_link_speed(struct bnxt *bp) 1423{ 1424 u32 *ptr32 = (u32 *)bp->hwrm_addr_data; 1425 1426 if (bnxt_hwrm_get_nvmem(bp, 1427 4, 1428 (u16)LINK_SPEED_DRV_NUM, 1429 1, 1430 (u16)bp->port_idx) != STATUS_SUCCESS) 1431 return STATUS_FAILURE; 1432 1433 bp->link_set = *ptr32; 1434 bp->link_set &= SPEED_DRV_MASK; 1435 set_medium(bp); 1436 1437 return STATUS_SUCCESS; 1438} 1439 1440typedef int (*hwrm_func_t)(struct bnxt *bp); 1441 1442hwrm_func_t down_chip[] = { 1443 bnxt_hwrm_cfa_l2_filter_free, /* Free l2 filter */ 1444 bnxt_free_rx_iob, /* Free rx iob */ 1445 bnxt_hwrm_vnic_free, /* Free vnic */ 1446 bnxt_hwrm_ring_free_grp, /* Free ring group */ 1447 bnxt_hwrm_ring_free_rx, /* Free rx ring */ 1448 bnxt_hwrm_ring_free_tx, /* Free tx ring */ 1449 bnxt_hwrm_ring_free_cq, /* Free CQ ring */ 1450 bnxt_hwrm_stat_ctx_free, /* Free Stat ctx */ 1451 bnxt_hwrm_func_drv_unrgtr, /* unreg driver */ 1452 NULL, 1453}; 1454 1455hwrm_func_t bring_chip[] = { 1456 bnxt_hwrm_ver_get, /* HWRM_VER_GET */ 1457 bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */ 1458 bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */ 1459 bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */ 1460 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */ 1461 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */ 1462 bnxt_hwrm_get_link_speed, /* HWRM_NVM_GET_VARIABLE - 203 */ 1463 bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */ 1464 bnxt_qphy_link, /* HWRM_PORT_PHY_QCFG */ 1465 bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG - ring resource*/ 1466 bnxt_hwrm_stat_ctx_alloc, /* Allocate Stat Ctx ID */ 1467 bnxt_hwrm_ring_alloc_cq, /* Allocate CQ Ring */ 1468 bnxt_hwrm_ring_alloc_tx, /* Allocate Tx ring */ 1469 bnxt_hwrm_ring_alloc_rx, /* Allocate Rx Ring */ 1470 bnxt_hwrm_ring_alloc_grp, /* Create Ring Group */ 1471 post_rx_buffers, /* Post RX buffers */ 1472 bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */ 1473 bnxt_hwrm_vnic_alloc, /* Alloc VNIC */ 1474 bnxt_hwrm_vnic_cfg, /* Config VNIC */ 1475 bnxt_hwrm_cfa_l2_filter_alloc, /* Alloc L2 Filter */ 1476 get_phy_link, /* Get Physical Link */ 1477 NULL, 1478}; 1479 1480int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp, int flag) 1481{ 1482 hwrm_func_t *ptr; 1483 int ret; 1484 int status = STATUS_SUCCESS; 1485 1486 for (ptr = cmds; *ptr; ++ptr) { 1487 ret = (*ptr)(bp); 1488 if (ret) { 1489 status = STATUS_FAILURE; 1490 /* Continue till all cleanup routines are called */ 1491 if (flag) 1492 return STATUS_FAILURE; 1493 } 1494 } 1495 1496 return status; 1497} 1498 1499/* Broadcom ethernet driver Network interface APIs. */ 1500static int bnxt_start(struct udevice *dev) 1501{ 1502 struct bnxt *bp = dev_get_priv(dev); 1503 1504 if (bnxt_hwrm_set_rx_mask(bp, RX_MASK) != STATUS_SUCCESS) 1505 return STATUS_FAILURE; 1506 1507 bp->card_en = true; 1508 return STATUS_SUCCESS; 1509} 1510 1511static int bnxt_send(struct udevice *dev, void *packet, int length) 1512{ 1513 struct bnxt *bp = dev_get_priv(dev); 1514 int len; 1515 u16 entry; 1516 dma_addr_t mapping; 1517 1518 if (bnxt_tx_avail(bp) < 1) { 1519 dbg_no_tx_bd(); 1520 return -ENOBUFS; 1521 } 1522 1523 entry = bp->tx.prod_id; 1524 len = iob_pad(packet, length); 1525 mapping = virt_to_bus(packet); 1526 set_txq(bp, entry, mapping, len); 1527 entry = NEXT_IDX(entry, bp->tx.ring_cnt); 1528 dump_tx_pkt(packet, mapping, len); 1529 bnxt_db_tx(bp, (u32)entry); 1530 bp->tx.prod_id = entry; 1531 bp->tx.cnt_req++; 1532 bnxt_tx_complete(bp); 1533 1534 return 0; 1535} 1536 1537static void bnxt_link_evt(struct bnxt *bp, struct cmpl_base *cmp) 1538{ 1539 struct hwrm_async_event_cmpl *evt; 1540 1541 evt = (struct hwrm_async_event_cmpl *)cmp; 1542 switch (evt->event_id) { 1543 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1544 if (evt->event_data1 & 0x01) 1545 bp->link_status = STATUS_LINK_ACTIVE; 1546 else 1547 bp->link_status = STATUS_LINK_DOWN; 1548 1549 set_phy_link(bp, 0); 1550 break; 1551 default: 1552 break; 1553 } 1554} 1555 1556static int bnxt_recv(struct udevice *dev, int flags, uchar **packetp) 1557{ 1558 struct bnxt *bp = dev_get_priv(dev); 1559 struct cmpl_base *cmp; 1560 u16 old_cons_idx = bp->cq.cons_idx; 1561 int done = SERVICE_NEXT_CQ_BD; 1562 u32 cq_type; 1563 1564 while (done == SERVICE_NEXT_CQ_BD) { 1565 cmp = (struct cmpl_base *)BD_NOW(bp->cq.bd_virt, 1566 bp->cq.cons_idx, 1567 sizeof(struct cmpl_base)); 1568 if ((cmp->info3_v & CMPL_BASE_V) ^ bp->cq.completion_bit) 1569 break; 1570 1571 cq_type = cmp->type & CMPL_BASE_TYPE_MASK; 1572 dump_evt((u8 *)cmp, cq_type, bp->cq.cons_idx); 1573 dump_CQ(cmp, bp->cq.cons_idx); 1574 switch (cq_type) { 1575 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1576 bnxt_link_evt(bp, cmp); 1577 fallthrough; 1578 case CMPL_BASE_TYPE_TX_L2: 1579 case CMPL_BASE_TYPE_STAT_EJECT: 1580 bnxt_adv_cq_index(bp, 1); 1581 break; 1582 case CMPL_BASE_TYPE_RX_L2: 1583 done = bnxt_rx_complete(bp, (struct rx_pkt_cmpl *)cmp); 1584 break; 1585 default: 1586 done = NO_MORE_CQ_BD_TO_SERVICE; 1587 break; 1588 } 1589 } 1590 1591 if (bp->cq.cons_idx != old_cons_idx) 1592 bnxt_db_cq(bp); 1593 1594 if (bp->rx.iob_recv == PKT_RECEIVED) { 1595 *packetp = bp->rx.iob_rx; 1596 return bp->rx.iob_len; 1597 } 1598 1599 return -EAGAIN; 1600} 1601 1602static void bnxt_stop(struct udevice *dev) 1603{ 1604 struct bnxt *bp = dev_get_priv(dev); 1605 1606 if (bp->card_en) { 1607 bnxt_hwrm_set_rx_mask(bp, 0); 1608 bp->card_en = false; 1609 } 1610} 1611 1612static int bnxt_free_pkt(struct udevice *dev, uchar *packet, int length) 1613{ 1614 struct bnxt *bp = dev_get_priv(dev); 1615 1616 dbg_rx_pkt(bp, __func__, packet, length); 1617 bp->rx.iob_recv = PKT_DONE; 1618 bp->rx.iob_len = 0; 1619 bp->rx.iob_rx = NULL; 1620 1621 return 0; 1622} 1623 1624static int bnxt_read_rom_hwaddr(struct udevice *dev) 1625{ 1626 struct eth_pdata *plat = dev_get_plat(dev); 1627 struct bnxt *bp = dev_get_priv(dev); 1628 1629 memcpy(plat->enetaddr, bp->mac_set, ETH_ALEN); 1630 1631 return 0; 1632} 1633 1634static const struct eth_ops bnxt_eth_ops = { 1635 .start = bnxt_start, 1636 .send = bnxt_send, 1637 .recv = bnxt_recv, 1638 .stop = bnxt_stop, 1639 .free_pkt = bnxt_free_pkt, 1640 .read_rom_hwaddr = bnxt_read_rom_hwaddr, 1641}; 1642 1643static const struct udevice_id bnxt_eth_ids[] = { 1644 { .compatible = "broadcom,nxe" }, 1645 { } 1646}; 1647 1648static int bnxt_eth_bind(struct udevice *dev) 1649{ 1650 char name[20]; 1651 1652 sprintf(name, "bnxt_eth%u", dev_seq(dev)); 1653 1654 return device_set_name(dev, name); 1655} 1656 1657static int bnxt_eth_probe(struct udevice *dev) 1658{ 1659 struct bnxt *bp = dev_get_priv(dev); 1660 int ret; 1661 1662 ret = bnxt_alloc_mem(bp); 1663 if (ret) { 1664 printf("*** error: bnxt_alloc_mem failed! ***\n"); 1665 return ret; 1666 } 1667 1668 bp->cardnum = dev_seq(dev); 1669 bp->name = dev->name; 1670 bp->pdev = (struct udevice *)dev; 1671 1672 bnxt_bring_pci(bp); 1673 1674 ret = bnxt_bring_chip(bp); 1675 if (ret) { 1676 printf("*** error: bnxt_bring_chip failed! ***\n"); 1677 return -ENODATA; 1678 } 1679 1680 return 0; 1681} 1682 1683static int bnxt_eth_remove(struct udevice *dev) 1684{ 1685 struct bnxt *bp = dev_get_priv(dev); 1686 1687 bnxt_down_chip(bp); 1688 bnxt_free_mem(bp); 1689 1690 return 0; 1691} 1692 1693static struct pci_device_id bnxt_nics[] = { 1694 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NXT_57320)}, 1695 {} 1696}; 1697 1698U_BOOT_DRIVER(eth_bnxt) = { 1699 .name = "eth_bnxt", 1700 .id = UCLASS_ETH, 1701 .of_match = bnxt_eth_ids, 1702 .bind = bnxt_eth_bind, 1703 .probe = bnxt_eth_probe, 1704 .remove = bnxt_eth_remove, 1705 .ops = &bnxt_eth_ops, 1706 .priv_auto = sizeof(struct bnxt), 1707 .plat_auto = sizeof(struct eth_pdata), 1708 .flags = DM_FLAG_ACTIVE_DMA, 1709}; 1710 1711U_BOOT_PCI_DEVICE(eth_bnxt, bnxt_nics); 1712