Lines Matching refs:bp

306 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
312 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
314 static void __storm_memset_dma_mapping(struct bnx2x *bp,
317 REG_WR(bp, addr, U64_LO(mapping));
318 REG_WR(bp, addr + 4, U64_HI(mapping));
321 static void storm_memset_spq_addr(struct bnx2x *bp,
327 __storm_memset_dma_mapping(bp, addr, mapping);
330 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
333 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
335 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
337 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
339 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
343 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
346 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
348 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
350 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
352 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
356 static void storm_memset_eq_data(struct bnx2x *bp,
364 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
367 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
371 REG_WR16(bp, addr, eq_prod);
377 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
380 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
381 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
385 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
389 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
390 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
391 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
403 static void bnx2x_dp_dmae(struct bnx2x *bp,
470 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
477 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
479 REG_WR(bp, dmae_reg_go_c[idx], 1);
493 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
503 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
504 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
505 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
518 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
525 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
529 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
530 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
535 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
538 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
541 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
548 spin_lock_bh(&bp->dmae_lock);
554 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
561 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
562 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
577 spin_unlock_bh(&bp->dmae_lock);
582 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
588 if (!bp->dmae_ready) {
589 u32 *data = bnx2x_sp(bp, wb_data[0]);
591 if (CHIP_IS_E1(bp))
592 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
594 bnx2x_init_str_wr(bp, dst_addr, data, len32);
599 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
609 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
618 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
623 if (!bp->dmae_ready) {
624 u32 *data = bnx2x_sp(bp, wb_data[0]);
627 if (CHIP_IS_E1(bp))
629 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
632 data[i] = REG_RD(bp, src_addr + i*4);
638 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
643 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
644 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
648 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
657 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
660 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
664 bnx2x_write_dmae(bp, phys_addr + offset,
670 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
684 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
704 static int bnx2x_mc_assert(struct bnx2x *bp)
730 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
740 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
741 bnx2x_get_assert_list_entry(bp,
759 CHIP_IS_E1(bp) ? "everest1" :
760 CHIP_IS_E1H(bp) ? "everest1h" :
761 CHIP_IS_E2(bp) ? "everest2" : "everest3",
762 bp->fw_major, bp->fw_minor, bp->fw_rev);
768 #define SCRATCH_BUFFER_SIZE(bp) \
769 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
771 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
778 if (BP_NOMCP(bp)) {
782 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
783 (bp->common.bc_ver & 0xff0000) >> 16,
784 (bp->common.bc_ver & 0xff00) >> 8,
785 (bp->common.bc_ver & 0xff));
787 if (pci_channel_offline(bp->pdev)) {
792 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
793 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
796 if (BP_PATH(bp) == 0)
797 trace_shmem_base = bp->common.shmem_base;
799 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
802 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
803 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
804 SCRATCH_BUFFER_SIZE(bp)) {
813 mark = REG_RD(bp, addr);
821 mark = REG_RD(bp, addr);
822 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
834 data[word] = htonl(REG_RD(bp, offset + 4*word));
842 data[word] = htonl(REG_RD(bp, offset + 4*word));
849 static void bnx2x_fw_dump(struct bnx2x *bp)
851 bnx2x_fw_dump_lvl(bp, KERN_ERR);
854 static void bnx2x_hc_int_disable(struct bnx2x *bp)
856 int port = BP_PORT(bp);
858 u32 val = REG_RD(bp, addr);
864 if (CHIP_IS_E1(bp)) {
869 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
884 REG_WR(bp, addr, val);
885 if (REG_RD(bp, addr) != val)
889 static void bnx2x_igu_int_disable(struct bnx2x *bp)
891 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
899 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
900 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
904 static void bnx2x_int_disable(struct bnx2x *bp)
906 if (bp->common.int_block == INT_BLOCK_HC)
907 bnx2x_hc_int_disable(bp);
909 bnx2x_igu_int_disable(bp);
912 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
917 int func = BP_FUNC(bp);
922 if (IS_PF(bp) && disable_int)
923 bnx2x_int_disable(bp);
925 bp->stats_state = STATS_STATE_DISABLED;
926 bp->eth_stats.unrecoverable_error++;
933 if (IS_PF(bp)) {
934 struct host_sp_status_block *def_sb = bp->def_status_blk;
938 bp->def_idx, bp->def_att_idx, bp->attn_state,
939 bp->spq_prod_idx, bp->stats_counter);
956 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
969 for_each_eth_queue(bp, i) {
970 struct bnx2x_fastpath *fp = &bp->fp[i];
975 CHIP_IS_E1x(bp) ?
979 CHIP_IS_E1x(bp) ?
986 if (!bp->fp)
1019 loop = CHIP_IS_E1x(bp) ?
1040 if (IS_VF(bp))
1044 data_size = CHIP_IS_E1x(bp) ?
1048 sb_data_p = CHIP_IS_E1x(bp) ?
1053 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1057 if (!CHIP_IS_E1x(bp)) {
1094 if (IS_PF(bp)) {
1096 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1098 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1101 i, bp->eq_ring[i].message.opcode,
1102 bp->eq_ring[i].message.error);
1110 for_each_valid_rx_queue(bp, i) {
1111 struct bnx2x_fastpath *fp = &bp->fp[i];
1113 if (!bp->fp)
1150 for_each_valid_tx_queue(bp, i) {
1151 struct bnx2x_fastpath *fp = &bp->fp[i];
1153 if (!bp->fp)
1188 if (IS_PF(bp)) {
1189 int tmp_msg_en = bp->msg_enable;
1191 bnx2x_fw_dump(bp);
1192 bp->msg_enable |= NETIF_MSG_HW;
1194 bnx2x_idle_chk(bp);
1196 bnx2x_idle_chk(bp);
1197 bp->msg_enable = tmp_msg_en;
1198 bnx2x_mc_assert(bp);
1227 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1234 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1235 crd = crd_start = REG_RD(bp, regs->crd);
1236 init_crd = REG_RD(bp, regs->init_crd);
1246 crd = REG_RD(bp, regs->crd);
1247 crd_freed = REG_RD(bp, regs->crd_freed);
1262 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1269 occup = to_free = REG_RD(bp, regs->lines_occup);
1270 freed = freed_start = REG_RD(bp, regs->lines_freed);
1278 occup = REG_RD(bp, regs->lines_occup);
1279 freed = REG_RD(bp, regs->lines_freed);
1294 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1300 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1306 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1309 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1318 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1321 if (CHIP_REV_IS_EMUL(bp))
1324 if (CHIP_REV_IS_FPGA(bp))
1330 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1333 {0, (CHIP_IS_E3B0(bp)) ?
1336 (CHIP_IS_E3B0(bp)) ?
1339 {1, (CHIP_IS_E3B0(bp)) ?
1342 (CHIP_IS_E3B0(bp)) ?
1345 {4, (CHIP_IS_E3B0(bp)) ?
1348 (CHIP_IS_E3B0(bp)) ?
1354 {0, (CHIP_IS_E3B0(bp)) ?
1357 (CHIP_IS_E3B0(bp)) ?
1360 (CHIP_IS_E3B0(bp)) ?
1363 {1, (CHIP_IS_E3B0(bp)) ?
1366 (CHIP_IS_E3B0(bp)) ?
1369 (CHIP_IS_E3B0(bp)) ?
1372 {4, (CHIP_IS_E3B0(bp)) ?
1375 (CHIP_IS_E3B0(bp)) ?
1378 (CHIP_IS_E3B0(bp)) ?
1387 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1391 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1403 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1409 if (REG_RD(bp, comp_addr)) {
1420 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1422 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1425 (REG_RD(bp, comp_addr)));
1430 REG_WR(bp, comp_addr, 0);
1445 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1448 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1455 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1462 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1463 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1469 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1470 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1474 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1475 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1481 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1482 dmae_reg_go_c[INIT_DMAE_C(bp)],
1490 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1494 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1497 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1500 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1503 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1506 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1509 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1512 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1515 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1520 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1522 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1524 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1527 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1531 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1537 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1543 bnx2x_tx_hw_flushed(bp, poll_cnt);
1549 if (bnx2x_is_pcie_pending(bp->pdev))
1553 bnx2x_hw_enable_status(bp);
1559 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1564 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1566 int port = BP_PORT(bp);
1568 u32 val = REG_RD(bp, addr);
1569 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1570 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1571 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1591 if (!CHIP_IS_E1(bp)) {
1595 REG_WR(bp, addr, val);
1601 if (CHIP_IS_E1(bp))
1602 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1608 REG_WR(bp, addr, val);
1614 if (!CHIP_IS_E1(bp)) {
1616 if (IS_MF(bp)) {
1617 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1618 if (bp->port.pmf)
1624 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1625 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1629 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1632 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1633 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1634 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1636 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1660 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1661 bnx2x_ack_int(bp);
1669 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1672 pci_intx(bp->pdev, true);
1677 if (IS_MF(bp)) {
1678 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1679 if (bp->port.pmf)
1685 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1686 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1689 void bnx2x_int_enable(struct bnx2x *bp)
1691 if (bp->common.int_block == INT_BLOCK_HC)
1692 bnx2x_hc_int_enable(bp);
1694 bnx2x_igu_int_enable(bp);
1697 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1699 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1704 bnx2x_int_disable(bp);
1708 synchronize_irq(bp->msix_table[0].vector);
1710 if (CNIC_SUPPORT(bp))
1712 for_each_eth_queue(bp, i)
1713 synchronize_irq(bp->msix_table[offset++].vector);
1715 synchronize_irq(bp->pdev->irq);
1718 cancel_delayed_work(&bp->sp_task);
1719 cancel_delayed_work(&bp->period_task);
1730 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1734 int func = BP_FUNC(bp);
1755 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1756 lock_status = REG_RD(bp, hw_lock_control_reg);
1768 * @bp: driver handle
1773 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1775 if (BP_PATH(bp))
1784 * @bp: driver handle
1788 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1790 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1793 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1796 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1802 atomic_set(&bp->interrupt_occurred, 1);
1811 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816 struct bnx2x *bp = fp->bp;
1820 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1824 fp->index, cid, command, bp->state,
1832 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1877 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1881 * In this case we don't want to increase the bp->spq_left
1892 atomic_inc(&bp->cq_spq_left);
1893 /* push the change in bp->spq_left and towards the memory */
1896 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1899 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1910 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1912 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1916 bnx2x_schedule_sp_task(bp);
1924 struct bnx2x *bp = netdev_priv(dev_instance);
1925 u16 status = bnx2x_ack_int(bp);
1938 if (unlikely(bp->panic))
1942 for_each_eth_queue(bp, i) {
1943 struct bnx2x_fastpath *fp = &bp->fp[i];
1945 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1951 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1956 if (CNIC_SUPPORT(bp)) {
1962 c_ops = rcu_dereference(bp->cnic_ops);
1963 if (c_ops && (bp->cnic_eth_dev.drv_state &
1965 c_ops->cnic_handler(bp->cnic_data, NULL);
1977 bnx2x_schedule_sp_task(bp);
1997 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
2001 int func = BP_FUNC(bp);
2020 lock_status = REG_RD(bp, hw_lock_control_reg);
2030 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2031 lock_status = REG_RD(bp, hw_lock_control_reg);
2041 int bnx2x_release_leader_lock(struct bnx2x *bp)
2043 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2046 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2050 int func = BP_FUNC(bp);
2068 lock_status = REG_RD(bp, hw_lock_control_reg);
2075 REG_WR(bp, hw_lock_control_reg, resource_bit);
2079 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2082 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2083 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2096 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2107 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2110 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2111 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2124 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2157 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2158 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2163 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2170 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2172 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2203 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2210 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2213 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2214 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2225 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2227 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2252 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2253 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2258 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2268 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2270 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2297 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2298 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2303 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2305 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2307 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2309 switch (bp->link_vars.ieee_fc &
2312 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2317 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2325 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2331 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2332 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2334 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2337 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2341 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2342 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2345 REG_WR(bp, BAR_USTRORM_INTMEM +
2346 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2354 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2356 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2357 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2359 if (!BP_NOMCP(bp)) {
2360 bnx2x_set_requested_fc(bp);
2361 bnx2x_acquire_phy_lock(bp);
2364 struct link_params *lp = &bp->link_params;
2383 struct link_params *lp = &bp->link_params;
2387 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2389 bnx2x_release_phy_lock(bp);
2391 bnx2x_init_dropless_fc(bp);
2393 bnx2x_calc_fc_adv(bp);
2395 if (bp->link_vars.link_up) {
2396 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2397 bnx2x_link_report(bp);
2399 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2400 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2407 void bnx2x_link_set(struct bnx2x *bp)
2409 if (!BP_NOMCP(bp)) {
2410 bnx2x_acquire_phy_lock(bp);
2411 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2412 bnx2x_release_phy_lock(bp);
2414 bnx2x_init_dropless_fc(bp);
2416 bnx2x_calc_fc_adv(bp);
2421 static void bnx2x__link_reset(struct bnx2x *bp)
2423 if (!BP_NOMCP(bp)) {
2424 bnx2x_acquire_phy_lock(bp);
2425 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2426 bnx2x_release_phy_lock(bp);
2431 void bnx2x_force_link_reset(struct bnx2x *bp)
2433 bnx2x_acquire_phy_lock(bp);
2434 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2435 bnx2x_release_phy_lock(bp);
2438 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2442 if (!BP_NOMCP(bp)) {
2443 bnx2x_acquire_phy_lock(bp);
2444 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2446 bnx2x_release_phy_lock(bp);
2462 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2468 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2469 u32 vn_cfg = bp->mf_config[vn];
2486 if (BNX2X_IS_ETS_ENABLED(bp)) {
2500 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2504 u32 vn_cfg = bp->mf_config[vn];
2509 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2511 if (IS_MF_PERCENT_BW(bp)) {
2513 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2524 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2526 if (CHIP_REV_IS_SLOW(bp))
2528 if (IS_MF(bp))
2534 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2536 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2538 if (BP_NOMCP(bp))
2552 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2553 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2558 bp->mf_config[vn] =
2559 MF_CFG_RD(bp, func_mf_config[func].config);
2561 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2563 bp->flags |= MF_FUNC_DIS;
2566 bp->flags &= ~MF_FUNC_DIS;
2570 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2575 input.port_rate = bp->link_vars.line_speed;
2582 bnx2x_read_mf_cfg(bp);
2585 bnx2x_calc_vn_min(bp, &input);
2588 if (bp->port.pmf)
2589 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2590 bnx2x_calc_vn_max(bp, vn, &input);
2596 bnx2x_init_cmng(&input, &bp->cmng);
2605 static void storm_memset_cmng(struct bnx2x *bp,
2615 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2617 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2618 int func = func_by_vn(bp, vn);
2623 __storm_memset_struct(bp, addr, size,
2629 __storm_memset_struct(bp, addr, size,
2635 void bnx2x_set_local_cmng(struct bnx2x *bp)
2637 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2640 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2641 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2650 static void bnx2x_link_attn(struct bnx2x *bp)
2653 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2655 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2657 bnx2x_init_dropless_fc(bp);
2659 if (bp->link_vars.link_up) {
2661 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2664 pstats = bnx2x_sp(bp, port_stats);
2669 if (bp->state == BNX2X_STATE_OPEN)
2670 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2673 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2674 bnx2x_set_local_cmng(bp);
2676 __bnx2x_link_report(bp);
2678 if (IS_MF(bp))
2679 bnx2x_link_sync_notify(bp);
2682 void bnx2x__link_status_update(struct bnx2x *bp)
2684 if (bp->state != BNX2X_STATE_OPEN)
2688 if (IS_PF(bp)) {
2689 bnx2x_dcbx_pmf_update(bp);
2690 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2691 if (bp->link_vars.link_up)
2692 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2694 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2696 bnx2x_link_report(bp);
2699 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2711 bp->port.advertising[0] = bp->port.supported[0];
2713 bp->link_params.bp = bp;
2714 bp->link_params.port = BP_PORT(bp);
2715 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2716 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2717 bp->link_params.req_line_speed[0] = SPEED_10000;
2718 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2719 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2720 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2721 bp->link_vars.line_speed = SPEED_10000;
2722 bp->link_vars.link_status =
2725 bp->link_vars.link_up = 1;
2726 bp->link_vars.duplex = DUPLEX_FULL;
2727 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2728 __bnx2x_link_report(bp);
2730 bnx2x_sample_bulletin(bp);
2737 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2741 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2748 func_params.f_obj = &bp->func_obj;
2760 if (bnx2x_func_state_change(bp, &func_params) < 0)
2761 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2766 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2780 func_params.f_obj = &bp->func_obj;
2797 rc = bnx2x_func_state_change(bp, &func_params);
2799 bnx2x_fw_command(bp, drv_msg_code, 0);
2804 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2807 u32 func = BP_ABS_FUNC(bp);
2817 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2820 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2824 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2825 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2829 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2834 addr_to_write = SHMEM2_RD(bp,
2835 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2836 stats_type = SHMEM2_RD(bp,
2837 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2843 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2847 REG_WR(bp, addr_to_write + i*sizeof(u32),
2851 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2855 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2856 bp->mf_config[BP_VN(bp)] = mf_config;
2868 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2870 bp->mf_config[BP_VN(bp)] = mf_config;
2872 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2874 cmng_input.vnic_max_rate[BP_VN(bp)];
2879 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2883 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2887 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2895 (MF_CFG_RD(bp,
2900 (MF_CFG_RD(bp,
2906 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2910 bp->afex_def_vlan_tag = vlan_val;
2911 bp->afex_vlan_mode = vlan_mode;
2914 bnx2x_link_report(bp);
2917 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2920 bp->afex_def_vlan_tag = -1;
2925 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2932 func_params.f_obj = &bp->func_obj;
2939 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2940 int func = BP_ABS_FUNC(bp);
2944 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2947 bp->mf_ov = val;
2954 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2955 bp->mf_ov);
2960 switch_update_params->vlan = bp->mf_ov;
2962 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2964 bp->mf_ov);
2968 bp->mf_ov);
2974 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2977 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2980 static void bnx2x_pmf_update(struct bnx2x *bp)
2982 int port = BP_PORT(bp);
2985 bp->port.pmf = 1;
2986 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2990 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2995 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2997 bnx2x_dcbx_pmf_update(bp);
3000 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
3001 if (bp->common.int_block == INT_BLOCK_HC) {
3002 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
3003 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
3004 } else if (!CHIP_IS_E1x(bp)) {
3005 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
3006 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
3009 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3021 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3023 int mb_idx = BP_FW_MB_IDX(bp);
3027 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3029 mutex_lock(&bp->fw_mb_mutex);
3030 seq = ++bp->fw_seq;
3031 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3032 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3041 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3055 bnx2x_fw_dump(bp);
3058 mutex_unlock(&bp->fw_mb_mutex);
3063 static void storm_memset_func_cfg(struct bnx2x *bp,
3072 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3075 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3077 if (CHIP_IS_E1x(bp)) {
3080 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3084 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3085 storm_memset_func_en(bp, p->func_id, 1);
3089 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3090 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3098 * @bp: device handle
3104 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3122 if (bp->flags & TX_SWITCHING)
3135 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3142 if (IS_MF_SD(bp))
3167 if (IS_MF_AFEX(bp))
3170 return flags | bnx2x_get_common_flags(bp, fp, true);
3173 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3184 gen_init->mtu = bp->dev->mtu;
3191 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3200 pause->sge_th_lo = SGE_TH_LO(bp);
3201 pause->sge_th_hi = SGE_TH_HI(bp);
3204 WARN_ON(bp->dropless_fc &&
3209 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3217 if (!CHIP_IS_E1(bp)) {
3218 pause->bd_th_lo = BD_TH_LO(bp);
3219 pause->bd_th_hi = BD_TH_HI(bp);
3221 pause->rcq_th_lo = RCQ_TH_LO(bp);
3222 pause->rcq_th_hi = RCQ_TH_HI(bp);
3227 WARN_ON(bp->dropless_fc &&
3229 bp->rx_ring_size);
3230 WARN_ON(bp->dropless_fc &&
3253 rxq_init->rss_engine_id = BP_FUNC(bp);
3254 rxq_init->mcast_engine_id = BP_FUNC(bp);
3261 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3273 if (IS_MF_AFEX(bp)) {
3274 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3279 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3292 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3300 static void bnx2x_pf_init(struct bnx2x *bp)
3305 if (!CHIP_IS_E1x(bp)) {
3308 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3310 (CHIP_MODE_IS_4_PORT(bp) ?
3311 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3313 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3316 (CHIP_MODE_IS_4_PORT(bp) ?
3317 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3321 func_init.pf_id = BP_FUNC(bp);
3322 func_init.func_id = BP_FUNC(bp);
3323 func_init.spq_map = bp->spq_mapping;
3324 func_init.spq_prod = bp->spq_prod_idx;
3326 bnx2x_func_init(bp, &func_init);
3328 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3336 bp->link_vars.line_speed = SPEED_10000;
3337 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3340 if (bp->port.pmf)
3341 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3344 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3345 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3346 eq_data.producer = bp->eq_prod;
3349 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3352 static void bnx2x_e1h_disable(struct bnx2x *bp)
3354 int port = BP_PORT(bp);
3356 bnx2x_tx_disable(bp);
3358 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3361 static void bnx2x_e1h_enable(struct bnx2x *bp)
3363 int port = BP_PORT(bp);
3365 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3366 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3369 netif_tx_wake_all_queues(bp->dev);
3379 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3382 &bp->slowpath->drv_info_to_mcp.ether_stat;
3384 &bp->sp_objs->mac_obj;
3401 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3405 ether_stat->mtu_size = bp->dev->mtu;
3406 if (bp->dev->features & NETIF_F_RXCSUM)
3408 if (bp->dev->features & NETIF_F_TSO)
3410 ether_stat->feature_flags |= bp->common.boot_mode;
3412 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3414 ether_stat->txq_size = bp->tx_ring_size;
3415 ether_stat->rxq_size = bp->rx_ring_size;
3418 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3422 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3424 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3426 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3428 if (!CNIC_LOADED(bp))
3431 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3437 if (!NO_FCOE(bp)) {
3439 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3443 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3447 &bp->fw_stats_data->fcoe;
3521 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3524 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3526 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3528 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3530 if (!CNIC_LOADED(bp))
3533 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3540 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3548 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3554 if (!IS_MF(bp)) {
3560 if (bp->link_vars.link_up) {
3561 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3562 bnx2x_link_sync_notify(bp);
3564 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3567 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3569 bnx2x_config_mf_bw(bp);
3570 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3573 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3576 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3582 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3585 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3591 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3599 mutex_lock(&bp->drv_info_mutex);
3601 memset(&bp->slowpath->drv_info_to_mcp, 0,
3606 bnx2x_drv_info_ether_stat(bp);
3609 bnx2x_drv_info_fcoe_stat(bp);
3612 bnx2x_drv_info_iscsi_stat(bp);
3616 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3623 SHMEM2_WR(bp, drv_info_host_addr_lo,
3624 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3625 SHMEM2_WR(bp, drv_info_host_addr_hi,
3626 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3628 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3634 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3636 } else if (!bp->drv_info_mng_owner) {
3637 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3640 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3644 SHMEM2_WR(bp, mfw_drv_indication,
3655 bp->drv_info_mng_owner = true;
3659 mutex_unlock(&bp->drv_info_mutex);
3683 void bnx2x_update_mng_version(struct bnx2x *bp)
3688 int idx = BP_FW_MB_IDX(bp);
3691 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3694 mutex_lock(&bp->drv_info_mutex);
3696 if (bp->drv_info_mng_owner)
3699 if (bp->state != BNX2X_STATE_OPEN)
3704 if (!CNIC_LOADED(bp))
3708 memset(&bp->slowpath->drv_info_to_mcp, 0,
3710 bnx2x_drv_info_iscsi_stat(bp);
3711 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3714 memset(&bp->slowpath->drv_info_to_mcp, 0,
3716 bnx2x_drv_info_fcoe_stat(bp);
3717 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3721 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3722 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3723 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3725 mutex_unlock(&bp->drv_info_mutex);
3731 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3736 if (!SHMEM2_HAS(bp, drv_info))
3740 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3743 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3745 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3748 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3757 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3781 * where the bp->flags can change so it is done without any
3784 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3786 bp->flags |= MF_FUNC_DIS;
3788 bnx2x_e1h_disable(bp);
3791 bp->flags &= ~MF_FUNC_DIS;
3793 bnx2x_e1h_enable(bp);
3801 bnx2x_config_mf_bw(bp);
3808 bnx2x_fw_command(bp, cmd_fail, 0);
3810 bnx2x_fw_command(bp, cmd_ok, 0);
3814 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3816 struct eth_spe *next_spe = bp->spq_prod_bd;
3818 if (bp->spq_prod_bd == bp->spq_last_bd) {
3819 bp->spq_prod_bd = bp->spq;
3820 bp->spq_prod_idx = 0;
3823 bp->spq_prod_bd++;
3824 bp->spq_prod_idx++;
3830 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3832 int func = BP_FUNC(bp);
3841 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3842 bp->spq_prod_idx);
3868 * @bp: driver handle
3879 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3887 if (unlikely(bp->panic)) {
3893 spin_lock_bh(&bp->spq_lock);
3896 if (!atomic_read(&bp->eq_spq_left)) {
3898 spin_unlock_bh(&bp->spq_lock);
3902 } else if (!atomic_read(&bp->cq_spq_left)) {
3904 spin_unlock_bh(&bp->spq_lock);
3909 spe = bnx2x_sp_get_next(bp);
3914 HW_CID(bp, cid));
3923 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3940 atomic_dec(&bp->eq_spq_left);
3942 atomic_dec(&bp->cq_spq_left);
3946 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3947 (u32)(U64_LO(bp->spq_mapping) +
3948 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3949 HW_CID(bp, cid), data_hi, data_lo, type,
3950 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3952 bnx2x_sp_prod_update(bp);
3953 spin_unlock_bh(&bp->spq_lock);
3958 static int bnx2x_acquire_alr(struct bnx2x *bp)
3965 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3966 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3981 static void bnx2x_release_alr(struct bnx2x *bp)
3983 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3989 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3991 struct host_sp_status_block *def_sb = bp->def_status_blk;
3995 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3996 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4000 if (bp->def_idx != def_sb->sp_sb.running_index) {
4001 bp->def_idx = def_sb->sp_sb.running_index;
4014 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4016 int port = BP_PORT(bp);
4025 if (bp->attn_state & asserted)
4028 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4029 aeu_mask = REG_RD(bp, aeu_addr);
4036 REG_WR(bp, aeu_addr, aeu_mask);
4037 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4039 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4040 bp->attn_state |= asserted;
4041 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4046 bnx2x_acquire_phy_lock(bp);
4049 nig_mask = REG_RD(bp, nig_int_mask_addr);
4055 REG_WR(bp, nig_int_mask_addr, 0);
4057 bnx2x_link_attn(bp);
4077 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4094 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4104 if (bp->common.int_block == INT_BLOCK_HC)
4111 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4112 REG_WR(bp, reg_addr, asserted);
4119 if (bp->common.int_block != INT_BLOCK_HC) {
4122 igu_acked = REG_RD(bp,
4131 REG_WR(bp, nig_int_mask_addr, nig_mask);
4132 bnx2x_release_phy_lock(bp);
4136 static void bnx2x_fan_failure(struct bnx2x *bp)
4138 int port = BP_PORT(bp);
4142 SHMEM_RD(bp,
4147 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4151 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4158 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4161 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4163 int port = BP_PORT(bp);
4172 val = REG_RD(bp, reg_offset);
4174 REG_WR(bp, reg_offset, val);
4179 bnx2x_hw_reset_phy(&bp->link_params);
4180 bnx2x_fan_failure(bp);
4183 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4184 bnx2x_acquire_phy_lock(bp);
4185 bnx2x_handle_module_detect_int(&bp->link_params);
4186 bnx2x_release_phy_lock(bp);
4191 val = REG_RD(bp, reg_offset);
4193 REG_WR(bp, reg_offset, val);
4201 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4207 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4216 int port = BP_PORT(bp);
4222 val = REG_RD(bp, reg_offset);
4224 REG_WR(bp, reg_offset, val);
4232 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4238 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4246 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4252 if (!CHIP_IS_E1x(bp)) {
4253 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4260 int port = BP_PORT(bp);
4266 val = REG_RD(bp, reg_offset);
4268 REG_WR(bp, reg_offset, val);
4276 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4283 int func = BP_FUNC(bp);
4285 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4286 bnx2x_read_mf_cfg(bp);
4287 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4288 func_mf_config[BP_ABS_FUNC(bp)].config);
4289 val = SHMEM_RD(bp,
4290 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4294 bnx2x_oem_event(bp,
4299 bnx2x_set_mf_bw(bp);
4302 bnx2x_handle_drv_info_req(bp);
4305 bnx2x_schedule_iov_task(bp,
4308 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4309 bnx2x_pmf_update(bp);
4311 if (bp->port.pmf &&
4313 bp->dcbx_enabled > 0)
4315 bnx2x_dcbx_set_params(bp,
4318 bnx2x_handle_afex_cmd(bp,
4321 bnx2x_handle_eee_event(bp);
4324 bnx2x_schedule_sp_rtnl(bp,
4327 if (bp->link_vars.periodic_flags &
4330 bnx2x_acquire_phy_lock(bp);
4331 bp->link_vars.periodic_flags &=
4333 bnx2x_release_phy_lock(bp);
4334 if (IS_MF(bp))
4335 bnx2x_link_sync_notify(bp);
4336 bnx2x_link_report(bp);
4341 bnx2x__link_status_update(bp);
4345 bnx2x_mc_assert(bp);
4346 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4347 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4348 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4349 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4355 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4356 bnx2x_fw_dump(bp);
4365 val = CHIP_IS_E1(bp) ? 0 :
4366 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4370 val = CHIP_IS_E1(bp) ? 0 :
4371 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4374 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4407 void bnx2x_set_reset_global(struct bnx2x *bp)
4410 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4411 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4412 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4413 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4421 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4424 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4425 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4426 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4427 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4435 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4437 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4448 static void bnx2x_set_reset_done(struct bnx2x *bp)
4451 u32 bit = BP_PATH(bp) ?
4453 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4454 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4458 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4468 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4471 u32 bit = BP_PATH(bp) ?
4473 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4474 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4478 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4479 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4486 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4488 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4501 void bnx2x_set_pf_load(struct bnx2x *bp)
4504 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4506 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4510 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4518 val1 |= (1 << bp->pf_num);
4526 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4527 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4533 * @bp: driver handle
4539 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4542 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4544 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4547 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4548 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4555 val1 &= ~(1 << bp->pf_num);
4563 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4564 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4573 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4579 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4591 static void _print_parity(struct bnx2x *bp, u32 reg)
4593 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4601 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4619 _print_parity(bp,
4625 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4629 _print_parity(bp,
4635 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4639 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4644 _print_parity(bp,
4646 _print_parity(bp,
4651 _print_parity(bp, GRCBASE_XPB +
4665 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4683 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4689 _print_parity(bp, QM_REG_QM_PRTY_STS);
4695 _print_parity(bp, TM_REG_TM_PRTY_STS);
4701 _print_parity(bp,
4708 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4715 _print_parity(bp,
4717 _print_parity(bp,
4725 _print_parity(bp,
4732 if (CHIP_IS_E1x(bp)) {
4733 _print_parity(bp,
4736 _print_parity(bp,
4738 _print_parity(bp,
4753 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4759 _print_parity(bp,
4766 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4773 _print_parity(bp,
4775 _print_parity(bp,
4782 _print_parity(bp, GRCBASE_UPB +
4789 _print_parity(bp,
4796 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4809 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4827 _print_parity(bp,
4829 _print_parity(bp,
4834 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4835 _print_parity(bp,
4837 _print_parity(bp,
4846 _print_parity(bp,
4851 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4855 _print_parity(bp,
4860 if (CHIP_IS_E1x(bp))
4861 _print_parity(bp,
4864 _print_parity(bp,
4869 _print_parity(bp,
4883 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4919 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4932 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4950 _print_parity(bp,
4955 _print_parity(bp,
4968 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4993 netdev_err(bp->dev,
4999 res |= bnx2x_check_blocks_with_parity0(bp,
5001 res |= bnx2x_check_blocks_with_parity1(bp,
5003 res |= bnx2x_check_blocks_with_parity2(bp,
5005 res |= bnx2x_check_blocks_with_parity3(bp,
5007 res |= bnx2x_check_blocks_with_parity4(bp,
5020 * @bp: driver handle
5024 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5027 int port = BP_PORT(bp);
5029 attn.sig[0] = REG_RD(bp,
5032 attn.sig[1] = REG_RD(bp,
5035 attn.sig[2] = REG_RD(bp,
5038 attn.sig[3] = REG_RD(bp,
5044 attn.sig[3] &= ((REG_RD(bp,
5050 if (!CHIP_IS_E1x(bp))
5051 attn.sig[4] = REG_RD(bp,
5055 return bnx2x_parity_attn(bp, global, print, attn.sig);
5058 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5063 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5087 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5111 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5114 int port = BP_PORT(bp);
5123 bnx2x_acquire_alr(bp);
5125 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5127 bp->recovery_state = BNX2X_RECOVERY_INIT;
5128 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5130 bnx2x_int_disable(bp);
5137 bnx2x_release_alr(bp);
5141 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5142 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5143 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5144 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5145 if (!CHIP_IS_E1x(bp))
5147 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5156 group_mask = &bp->attn_group[index];
5164 bnx2x_attn_int_deasserted4(bp,
5166 bnx2x_attn_int_deasserted3(bp,
5168 bnx2x_attn_int_deasserted1(bp,
5170 bnx2x_attn_int_deasserted2(bp,
5172 bnx2x_attn_int_deasserted0(bp,
5177 bnx2x_release_alr(bp);
5179 if (bp->common.int_block == INT_BLOCK_HC)
5187 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5188 REG_WR(bp, reg_addr, val);
5190 if (~bp->attn_state & deasserted)
5196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5197 aeu_mask = REG_RD(bp, reg_addr);
5204 REG_WR(bp, reg_addr, aeu_mask);
5205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5208 bp->attn_state &= ~deasserted;
5209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5212 static void bnx2x_attn_int(struct bnx2x *bp)
5215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5219 u32 attn_state = bp->attn_state;
5234 bnx2x_attn_int_asserted(bp, asserted);
5237 bnx2x_attn_int_deasserted(bp, deasserted);
5240 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5243 u32 igu_addr = bp->igu_base_addr;
5245 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5249 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5252 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5255 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5260 if (!bp->cnic_eth_dev.starting_cid ||
5261 (cid < bp->cnic_eth_dev.starting_cid &&
5262 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5271 bnx2x_panic_dump(bp, false);
5273 bnx2x_cnic_cfc_comp(bp, cid, err);
5277 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5284 rparam.mcast_obj = &bp->mcast_obj;
5286 netif_addr_lock_bh(bp->dev);
5289 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5292 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5293 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5299 netif_addr_unlock_bh(bp->dev);
5302 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5317 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5318 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5320 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5325 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5332 bnx2x_handle_mcast_eqe(bp);
5339 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5347 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5349 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5351 netif_addr_lock_bh(bp->dev);
5353 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5356 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5357 bnx2x_set_storm_rx_mode(bp);
5359 &bp->sp_state))
5360 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5362 &bp->sp_state))
5363 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5365 netif_addr_unlock_bh(bp->dev);
5368 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5375 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5380 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5385 static void bnx2x_after_function_update(struct bnx2x *bp)
5404 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5409 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5413 for_each_eth_queue(bp, q) {
5415 fp = &bp->fp[q];
5416 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5419 rc = bnx2x_queue_state_change(bp, &queue_params);
5425 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5426 fp = &bp->fp[FCOE_IDX(bp)];
5427 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5434 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5438 rc = bnx2x_queue_state_change(bp, &queue_params);
5444 bnx2x_link_report(bp);
5445 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5450 struct bnx2x *bp, u32 cid)
5454 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5455 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5457 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5460 static void bnx2x_eq_int(struct bnx2x *bp)
5469 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5470 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5472 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5483 * specific bp, thus there is no need in "paired" read memory
5486 sw_cons = bp->eq_cons;
5487 sw_prod = bp->eq_prod;
5489 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5490 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5495 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5497 rc = bnx2x_iov_eq_sp_event(bp, elem);
5509 bnx2x_vf_mbx_schedule(bp,
5516 bp->stats_comp++);
5523 * we may want to verify here that the bp state is
5533 if (CNIC_LOADED(bp) &&
5534 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5537 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5539 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5546 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5547 if (f_obj->complete_cmd(bp, f_obj,
5554 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5555 if (f_obj->complete_cmd(bp, f_obj,
5566 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5574 f_obj->complete_cmd(bp, f_obj,
5581 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5587 f_obj->complete_cmd(bp, f_obj,
5589 bnx2x_after_afex_vif_lists(bp, elem);
5594 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5602 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5610 if (f_obj->complete_cmd(bp, f_obj,
5616 switch (opcode | bp->state) {
5639 bnx2x_handle_classification_eqe(bp, elem);
5649 bnx2x_handle_mcast_eqe(bp);
5659 bnx2x_handle_rx_mode_eqe(bp);
5663 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5664 elem->message.opcode, bp->state);
5671 atomic_add(spqe_cnt, &bp->eq_spq_left);
5673 bp->eq_cons = sw_cons;
5674 bp->eq_prod = sw_prod;
5679 bnx2x_update_eq_prod(bp, bp->eq_prod);
5684 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5690 if (atomic_read(&bp->interrupt_occurred)) {
5693 u16 status = bnx2x_update_dsb_idx(bp);
5697 atomic_set(&bp->interrupt_occurred, 0);
5701 bnx2x_attn_int(bp);
5707 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5709 if (FCOE_INIT(bp) &&
5715 napi_schedule(&bnx2x_fcoe(bp, napi));
5720 bnx2x_eq_int(bp);
5721 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5722 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5733 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5734 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5739 &bp->sp_state)) {
5740 bnx2x_link_report(bp);
5741 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5748 struct bnx2x *bp = netdev_priv(dev);
5750 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5754 if (unlikely(bp->panic))
5758 if (CNIC_LOADED(bp)) {
5762 c_ops = rcu_dereference(bp->cnic_ops);
5764 c_ops->cnic_handler(bp->cnic_data, NULL);
5771 bnx2x_schedule_sp_task(bp);
5778 void bnx2x_drv_pulse(struct bnx2x *bp)
5780 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5781 bp->fw_drv_pulse_wr_seq);
5786 struct bnx2x *bp = from_timer(bp, t, timer);
5788 if (!netif_running(bp->dev))
5791 if (IS_PF(bp) &&
5792 !BP_NOMCP(bp)) {
5793 int mb_idx = BP_FW_MB_IDX(bp);
5797 ++bp->fw_drv_pulse_wr_seq;
5798 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5799 drv_pulse = bp->fw_drv_pulse_wr_seq;
5800 bnx2x_drv_pulse(bp);
5802 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5814 if (bp->state == BNX2X_STATE_OPEN)
5815 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5818 if (IS_VF(bp))
5819 bnx2x_timer_sriov(bp);
5821 mod_timer(&bp->timer, jiffies + bp->current_interval);
5832 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5837 REG_WR(bp, addr + i, fill);
5840 REG_WR8(bp, addr + i, fill);
5844 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5851 REG_WR(bp, BAR_CSTRORM_INTMEM +
5857 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5865 if (!CHIP_IS_E1x(bp)) {
5879 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5881 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5884 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5890 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5893 int func = BP_FUNC(bp);
5896 REG_WR(bp, BAR_CSTRORM_INTMEM +
5902 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5904 int func = BP_FUNC(bp);
5911 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5913 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5916 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5959 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5970 if (CHIP_INT_MODE_IS_BC(bp))
5975 bnx2x_zero_fp_sb(bp, fw_sb_id);
5977 if (!CHIP_IS_E1x(bp)) {
5980 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5983 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5995 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5998 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
6016 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6019 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6022 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6024 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6027 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6030 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6035 static void bnx2x_init_def_sb(struct bnx2x *bp)
6037 struct host_sp_status_block *def_sb = bp->def_status_blk;
6038 dma_addr_t mapping = bp->def_status_blk_mapping;
6041 int port = BP_PORT(bp);
6042 int func = BP_FUNC(bp);
6049 if (CHIP_INT_MODE_IS_BC(bp)) {
6053 igu_sp_sb_index = bp->igu_dsb_id;
6062 bp->attn_state = 0;
6072 bp->attn_group[index].sig[sindex] =
6073 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6075 if (!CHIP_IS_E1x(bp))
6081 bp->attn_group[index].sig[4] = REG_RD(bp,
6084 bp->attn_group[index].sig[4] = 0;
6087 if (bp->common.int_block == INT_BLOCK_HC) {
6091 REG_WR(bp, reg_offset, U64_LO(section));
6092 REG_WR(bp, reg_offset + 4, U64_HI(section));
6093 } else if (!CHIP_IS_E1x(bp)) {
6094 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6095 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6101 bnx2x_zero_sp_sb(bp);
6110 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6113 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6115 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6118 void bnx2x_update_coalesce(struct bnx2x *bp)
6122 for_each_eth_queue(bp, i)
6123 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6124 bp->tx_ticks, bp->rx_ticks);
6127 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6129 spin_lock_init(&bp->spq_lock);
6130 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6132 bp->spq_prod_idx = 0;
6133 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6134 bp->spq_prod_bd = bp->spq;
6135 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6138 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6143 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6146 cpu_to_le32(U64_HI(bp->eq_mapping +
6149 cpu_to_le32(U64_LO(bp->eq_mapping +
6152 bp->eq_cons = 0;
6153 bp->eq_prod = NUM_EQ_DESC;
6154 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6156 atomic_set(&bp->eq_spq_left,
6161 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6175 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6176 ramrod_param.func_id = BP_FUNC(bp);
6178 ramrod_param.pstate = &bp->sp_state;
6181 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6182 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6184 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6192 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6194 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6201 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6226 if (bp->accept_any_vlan) {
6242 if (bp->accept_any_vlan) {
6262 if (IS_MF_SI(bp))
6280 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6286 if (!NO_FCOE(bp))
6290 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6298 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6303 static void bnx2x_init_internal_common(struct bnx2x *bp)
6310 REG_WR(bp, BAR_USTRORM_INTMEM +
6312 if (!CHIP_IS_E1x(bp)) {
6313 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6314 CHIP_INT_MODE_IS_BC(bp) ?
6319 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6324 bnx2x_init_internal_common(bp);
6344 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6349 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6354 if (CHIP_IS_E1x(fp->bp))
6355 return BP_L_ID(fp->bp) + fp->index;
6360 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6362 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6388 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6389 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6390 FP_COS_TO_TXQ(fp, cos, bp),
6396 if (IS_VF(bp))
6399 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6402 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6403 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6404 bnx2x_sp_mapping(bp, q_rdata), q_type);
6413 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6446 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6450 for_each_tx_queue_cnic(bp, i)
6451 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6454 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6459 for_each_eth_queue(bp, i)
6460 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6461 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6464 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6466 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6469 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6470 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6472 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6473 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6474 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6475 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6476 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6477 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6483 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6485 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6495 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6496 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6497 bnx2x_sp_mapping(bp, q_rdata), q_type);
6501 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6505 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6507 if (!NO_FCOE(bp))
6508 bnx2x_init_fcoe_fp(bp);
6510 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6512 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6516 bnx2x_init_rx_rings_cnic(bp);
6517 bnx2x_init_tx_rings_cnic(bp);
6523 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6528 for_each_eth_queue(bp, i)
6529 bnx2x_init_eth_fp(bp, i);
6533 bnx2x_init_rx_rings(bp);
6534 bnx2x_init_tx_rings(bp);
6536 if (IS_PF(bp)) {
6538 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6539 bp->common.shmem_base,
6540 bp->common.shmem2_base, BP_PORT(bp));
6543 bnx2x_init_def_sb(bp);
6544 bnx2x_update_dsb_idx(bp);
6545 bnx2x_init_sp_ring(bp);
6547 bnx2x_memset_stats(bp);
6551 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6553 bnx2x_init_eq_ring(bp);
6554 bnx2x_init_internal(bp, load_code);
6555 bnx2x_pf_init(bp);
6556 bnx2x_stats_init(bp);
6561 bnx2x_int_enable(bp);
6564 bnx2x_attn_int_deasserted0(bp,
6565 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6570 static int bnx2x_gunzip_init(struct bnx2x *bp)
6572 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6573 &bp->gunzip_mapping, GFP_KERNEL);
6574 if (bp->gunzip_buf == NULL)
6577 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6578 if (bp->strm == NULL)
6581 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6582 if (bp->strm->workspace == NULL)
6588 kfree(bp->strm);
6589 bp->strm = NULL;
6592 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6593 bp->gunzip_mapping);
6594 bp->gunzip_buf = NULL;
6601 static void bnx2x_gunzip_end(struct bnx2x *bp)
6603 if (bp->strm) {
6604 vfree(bp->strm->workspace);
6605 kfree(bp->strm);
6606 bp->strm = NULL;
6609 if (bp->gunzip_buf) {
6610 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6611 bp->gunzip_mapping);
6612 bp->gunzip_buf = NULL;
6616 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6633 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6634 bp->strm->avail_in = len - n;
6635 bp->strm->next_out = bp->gunzip_buf;
6636 bp->strm->avail_out = FW_BUF_SIZE;
6638 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6642 rc = zlib_inflate(bp->strm, Z_FINISH);
6644 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6645 bp->strm->msg);
6647 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6648 if (bp->gunzip_outlen & 0x3)
6649 netdev_err(bp->dev,
6651 bp->gunzip_outlen);
6652 bp->gunzip_outlen >>= 2;
6654 zlib_inflateEnd(bp->strm);
6669 static void bnx2x_lb_pckt(struct bnx2x *bp)
6677 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6683 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6690 static int bnx2x_int_mem_test(struct bnx2x *bp)
6696 if (CHIP_REV_IS_FPGA(bp))
6698 else if (CHIP_REV_IS_EMUL(bp))
6704 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6705 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6706 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6707 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6710 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6713 bnx2x_lb_pckt(bp);
6720 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6721 val = *bnx2x_sp(bp, wb_data[0]);
6736 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6749 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6753 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6754 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6759 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6760 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6761 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6762 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6765 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6769 bnx2x_lb_pckt(bp);
6776 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6777 val = *bnx2x_sp(bp, wb_data[0]);
6790 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6795 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6800 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6806 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6807 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6814 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6818 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6819 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6820 if (!CNIC_SUPPORT(bp))
6822 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6825 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6826 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6827 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6828 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6835 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6839 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6840 if (!CHIP_IS_E1x(bp))
6841 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6843 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6844 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6845 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6852 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6853 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6854 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6855 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6856 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6857 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6858 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6859 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6860 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6861 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6862 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6863 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6864 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6865 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6866 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6867 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6868 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6869 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6870 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6875 if (!CHIP_IS_E1x(bp))
6878 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6880 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6881 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6882 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6883 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6885 if (!CHIP_IS_E1x(bp))
6887 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6889 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6890 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6891 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6892 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
6895 static void bnx2x_reset_common(struct bnx2x *bp)
6900 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6903 if (CHIP_IS_E3(bp)) {
6908 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6911 static void bnx2x_setup_dmae(struct bnx2x *bp)
6913 bp->dmae_ready = 0;
6914 spin_lock_init(&bp->dmae_lock);
6917 static void bnx2x_init_pxp(struct bnx2x *bp)
6922 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6925 if (bp->mrrs == -1)
6928 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6929 r_order = bp->mrrs;
6932 bnx2x_init_pxp_arb(bp, r_order, w_order);
6935 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6941 if (BP_NOMCP(bp))
6945 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6960 bp,
6961 bp->common.shmem_base,
6962 bp->common.shmem2_base,
6972 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6975 val = REG_RD(bp, MISC_REG_SPIO_INT);
6977 REG_WR(bp, MISC_REG_SPIO_INT, val);
6980 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6982 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6985 void bnx2x_pf_disable(struct bnx2x *bp)
6987 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6990 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6991 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6992 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6995 static void bnx2x__common_init_phy(struct bnx2x *bp)
6999 if (SHMEM2_RD(bp, size) >
7000 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
7002 shmem_base[0] = bp->common.shmem_base;
7003 shmem2_base[0] = bp->common.shmem2_base;
7004 if (!CHIP_IS_E1x(bp)) {
7006 SHMEM2_RD(bp, other_shmem_base_addr);
7008 SHMEM2_RD(bp, other_shmem2_base_addr);
7010 bnx2x_acquire_phy_lock(bp);
7011 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7012 bp->common.chip_id);
7013 bnx2x_release_phy_lock(bp);
7016 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7018 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7019 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7020 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7021 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7022 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7025 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7027 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7028 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7029 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7030 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7033 static void bnx2x_set_endianity(struct bnx2x *bp)
7036 bnx2x_config_endianity(bp, 1);
7038 bnx2x_config_endianity(bp, 0);
7042 static void bnx2x_reset_endianity(struct bnx2x *bp)
7044 bnx2x_config_endianity(bp, 0);
7050 * @bp: driver handle
7052 static int bnx2x_init_hw_common(struct bnx2x *bp)
7056 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7062 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7064 bnx2x_reset_common(bp);
7065 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7068 if (CHIP_IS_E3(bp)) {
7072 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7074 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7076 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7078 if (!CHIP_IS_E1x(bp)) {
7088 for (abs_func_id = BP_PATH(bp);
7090 if (abs_func_id == BP_ABS_FUNC(bp)) {
7091 REG_WR(bp,
7097 bnx2x_pretend_func(bp, abs_func_id);
7099 bnx2x_pf_disable(bp);
7100 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7104 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7105 if (CHIP_IS_E1(bp)) {
7108 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7111 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7112 bnx2x_init_pxp(bp);
7113 bnx2x_set_endianity(bp);
7114 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7116 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7117 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7122 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7127 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7138 if (!CHIP_IS_E1x(bp)) {
7222 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7223 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7224 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7226 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7227 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7228 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7231 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7232 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7234 if (!CHIP_IS_E1x(bp)) {
7235 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7236 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7237 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7239 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7244 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7253 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7255 bnx2x_iov_init_dmae(bp);
7258 bp->dmae_ready = 1;
7259 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7261 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7263 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7265 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7267 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7269 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7270 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7271 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7272 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7274 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7277 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7280 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7281 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7283 if (CNIC_SUPPORT(bp))
7284 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7286 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7288 if (!CHIP_REV_IS_SLOW(bp))
7290 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7292 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7294 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7295 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7297 if (!CHIP_IS_E1(bp))
7298 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7300 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7301 if (IS_MF_AFEX(bp)) {
7305 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7306 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7307 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7308 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7309 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7314 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7315 bp->path_has_ovlan ? 7 : 6);
7319 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7320 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7321 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7322 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7324 if (!CHIP_IS_E1x(bp)) {
7326 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7329 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7336 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7337 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7338 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7339 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7342 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7344 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7347 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7348 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7349 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7351 if (!CHIP_IS_E1x(bp)) {
7352 if (IS_MF_AFEX(bp)) {
7356 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7357 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7358 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7359 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7360 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7362 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7363 bp->path_has_ovlan ? 7 : 6);
7367 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7369 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7371 if (CNIC_SUPPORT(bp)) {
7372 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7373 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7374 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7375 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7376 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7377 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7378 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7379 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7380 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7381 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7383 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7387 dev_alert(&bp->pdev->dev,
7391 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7393 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7395 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7396 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7398 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7401 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7403 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7405 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7406 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7408 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7409 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7412 REG_WR(bp, 0x2814, 0xffffffff);
7413 REG_WR(bp, 0x3820, 0xffffffff);
7415 if (!CHIP_IS_E1x(bp)) {
7416 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7419 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7423 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7429 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7430 if (!CHIP_IS_E1(bp)) {
7432 if (!CHIP_IS_E3(bp))
7433 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7435 if (CHIP_IS_E1H(bp))
7437 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7439 if (CHIP_REV_IS_SLOW(bp))
7443 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7448 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7453 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7458 REG_WR(bp, CFC_REG_DEBUG0, 0);
7460 if (CHIP_IS_E1(bp)) {
7463 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7464 val = *bnx2x_sp(bp, wb_data[0]);
7467 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7473 bnx2x_setup_fan_failure_detection(bp);
7476 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7478 bnx2x_enable_blocks_attention(bp);
7479 bnx2x_enable_blocks_parity(bp);
7481 if (!BP_NOMCP(bp)) {
7482 if (CHIP_IS_E1x(bp))
7483 bnx2x__common_init_phy(bp);
7487 if (SHMEM2_HAS(bp, netproc_fw_ver))
7488 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7496 * @bp: driver handle
7498 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7500 int rc = bnx2x_init_hw_common(bp);
7506 if (!BP_NOMCP(bp))
7507 bnx2x__common_init_phy(bp);
7512 static int bnx2x_init_hw_port(struct bnx2x *bp)
7514 int port = BP_PORT(bp);
7521 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7523 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7524 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7525 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7532 if (!CHIP_IS_E1x(bp))
7533 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7535 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7536 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7537 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7538 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7540 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7541 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7542 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7543 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7546 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7548 if (CNIC_SUPPORT(bp)) {
7549 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7550 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7551 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7554 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7556 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7558 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7560 if (IS_MF(bp))
7561 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7562 else if (bp->dev->mtu > 4096) {
7563 if (bp->flags & ONE_PORT_FLAG)
7566 val = bp->dev->mtu;
7572 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7574 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7575 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7578 if (CHIP_MODE_IS_4_PORT(bp))
7579 REG_WR(bp, (BP_PORT(bp) ?
7583 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7584 if (CHIP_IS_E3B0(bp)) {
7585 if (IS_MF_AFEX(bp)) {
7587 REG_WR(bp, BP_PORT(bp) ?
7590 REG_WR(bp, BP_PORT(bp) ?
7593 REG_WR(bp, BP_PORT(bp) ?
7601 REG_WR(bp, BP_PORT(bp) ?
7604 (bp->path_has_ovlan ? 7 : 6));
7608 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7609 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7610 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7611 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7613 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7614 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7615 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7616 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7618 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7619 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7621 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7623 if (CHIP_IS_E1x(bp)) {
7625 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7628 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7630 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7633 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7635 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7638 if (CNIC_SUPPORT(bp))
7639 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7641 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7642 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7644 if (CHIP_IS_E1(bp)) {
7645 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7646 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7648 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7650 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7652 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7657 val = IS_MF(bp) ? 0xF7 : 0x7;
7659 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7660 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7664 REG_WR(bp, reg,
7665 REG_RD(bp, reg) &
7669 REG_WR(bp, reg,
7670 REG_RD(bp, reg) &
7673 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7675 if (!CHIP_IS_E1x(bp)) {
7679 if (IS_MF_AFEX(bp))
7680 REG_WR(bp, BP_PORT(bp) ?
7684 REG_WR(bp, BP_PORT(bp) ?
7687 IS_MF_SD(bp) ? 7 : 6);
7689 if (CHIP_IS_E3(bp))
7690 REG_WR(bp, BP_PORT(bp) ?
7692 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7694 if (!CHIP_IS_E3(bp))
7695 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7697 if (!CHIP_IS_E1(bp)) {
7699 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7700 (IS_MF_SD(bp) ? 0x1 : 0x2));
7702 if (!CHIP_IS_E1x(bp)) {
7704 switch (bp->mf_mode) {
7714 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7718 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7719 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7720 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7725 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7729 val = REG_RD(bp, reg_addr);
7731 REG_WR(bp, reg_addr, val);
7734 if (CHIP_IS_E3B0(bp))
7735 bp->flags |= PTP_SUPPORTED;
7740 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7745 if (CHIP_IS_E1(bp))
7752 REG_WR_DMAE(bp, reg, wb_write, 2);
7755 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7766 if (CHIP_INT_MODE_IS_BC(bp))
7780 REG_WR(bp, igu_addr_data, data);
7784 REG_WR(bp, igu_addr_ctl, ctl);
7788 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7791 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7798 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7800 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7803 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7807 bnx2x_ilt_wr(bp, i, 0);
7810 static void bnx2x_init_searcher(struct bnx2x *bp)
7812 int port = BP_PORT(bp);
7813 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7815 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7818 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7829 func_params.f_obj = &bp->func_obj;
7839 rc = bnx2x_func_state_change(bp, &func_params);
7844 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7846 int rc, i, port = BP_PORT(bp);
7850 if (bp->mf_mode == SINGLE_FUNCTION) {
7851 bnx2x_set_rx_filter(&bp->link_params, 0);
7853 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7855 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7858 mac_en[i] = REG_RD(bp, port ?
7863 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7870 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7878 rc = bnx2x_func_switch_update(bp, 1);
7885 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7888 if (bp->mf_mode == SINGLE_FUNCTION) {
7889 bnx2x_set_rx_filter(&bp->link_params, 1);
7891 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7894 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7902 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7906 rc = bnx2x_func_switch_update(bp, 0);
7916 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7920 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7922 if (CONFIGURE_NIC_MODE(bp)) {
7924 bnx2x_init_searcher(bp);
7927 rc = bnx2x_reset_nic_mode(bp);
7943 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7945 if (!CHIP_IS_E1x(bp))
7946 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7947 1 << BP_ABS_FUNC(bp));
7950 static int bnx2x_init_hw_func(struct bnx2x *bp)
7952 int port = BP_PORT(bp);
7953 int func = BP_FUNC(bp);
7955 struct bnx2x_ilt *ilt = BP_ILT(bp);
7964 if (!CHIP_IS_E1x(bp)) {
7965 rc = bnx2x_pf_flr_clnup(bp);
7967 bnx2x_fw_dump(bp);
7973 if (bp->common.int_block == INT_BLOCK_HC) {
7975 val = REG_RD(bp, addr);
7977 REG_WR(bp, addr, val);
7980 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7981 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7983 ilt = BP_ILT(bp);
7986 if (IS_SRIOV(bp))
7988 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7994 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7995 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7997 bp->context[i].cxt_mapping;
7998 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
8001 bnx2x_ilt_init_op(bp, INITOP_SET);
8003 if (!CONFIGURE_NIC_MODE(bp)) {
8004 bnx2x_init_searcher(bp);
8005 REG_WR(bp, PRS_REG_NIC_MODE, 0);
8009 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8013 if (!CHIP_IS_E1x(bp)) {
8019 if (!(bp->flags & USING_MSIX_FLAG))
8033 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8035 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8038 bp->dmae_ready = 1;
8040 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8042 bnx2x_clean_pglue_errors(bp);
8044 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8045 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8046 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8047 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8048 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8049 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8050 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8051 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8052 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8053 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8054 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8055 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8056 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8058 if (!CHIP_IS_E1x(bp))
8059 REG_WR(bp, QM_REG_PF_EN, 1);
8061 if (!CHIP_IS_E1x(bp)) {
8062 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8063 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8064 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8065 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8067 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8069 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8070 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8071 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
8073 bnx2x_iov_init_dq(bp);
8075 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8076 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8077 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8078 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8079 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8080 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8081 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8082 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8083 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8084 if (!CHIP_IS_E1x(bp))
8085 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8087 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8089 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8091 if (!CHIP_IS_E1x(bp))
8092 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8094 if (IS_MF(bp)) {
8095 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8096 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8097 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8098 bp->mf_ov);
8102 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8105 if (bp->common.int_block == INT_BLOCK_HC) {
8106 if (CHIP_IS_E1H(bp)) {
8107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8109 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8110 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8112 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8117 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8119 if (!CHIP_IS_E1x(bp)) {
8120 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8121 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8124 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8126 if (!CHIP_IS_E1x(bp)) {
8149 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8151 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8152 prod_offset = (bp->igu_base_sb + sb_idx) *
8158 REG_WR(bp, addr, 0);
8161 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8163 bnx2x_igu_clear_sb(bp,
8164 bp->igu_base_sb + sb_idx);
8168 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8171 if (CHIP_MODE_IS_4_PORT(bp))
8172 dsb_idx = BP_FUNC(bp);
8174 dsb_idx = BP_VN(bp);
8176 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8188 REG_WR(bp, addr, 0);
8191 if (CHIP_INT_MODE_IS_BC(bp)) {
8192 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8194 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8196 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8198 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8200 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8203 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8205 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8208 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8212 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8213 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8214 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8215 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8216 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8217 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8222 REG_WR(bp, 0x2114, 0xffffffff);
8223 REG_WR(bp, 0x2120, 0xffffffff);
8225 if (CHIP_IS_E1x(bp)) {
8228 BP_PORT(bp) * (main_mem_size * 4);
8232 val = REG_RD(bp, main_mem_prty_clr);
8242 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8243 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8247 REG_RD(bp, main_mem_prty_clr);
8252 REG_WR8(bp, BAR_USTRORM_INTMEM +
8253 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8254 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8255 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8256 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8257 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8258 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8259 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8262 bnx2x_phy_probe(&bp->link_params);
8267 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8269 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8271 if (!CHIP_IS_E1x(bp))
8272 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8275 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8278 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8281 void bnx2x_free_mem(struct bnx2x *bp)
8285 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8286 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8288 if (IS_VF(bp))
8291 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8294 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8297 for (i = 0; i < L2_ILT_LINES(bp); i++)
8298 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8299 bp->context[i].size);
8300 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8302 BNX2X_FREE(bp->ilt->lines);
8304 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8306 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8309 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8311 bnx2x_iov_free_mem(bp);
8314 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8316 if (!CHIP_IS_E1x(bp)) {
8318 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8320 if (!bp->cnic_sb.e2_sb)
8323 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8325 if (!bp->cnic_sb.e1x_sb)
8329 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8331 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8332 if (!bp->t2)
8337 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8338 &bp->slowpath->drv_info_to_mcp;
8340 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8346 bnx2x_free_mem_cnic(bp);
8351 int bnx2x_alloc_mem(struct bnx2x *bp)
8355 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8357 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8358 if (!bp->t2)
8362 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8364 if (!bp->def_status_blk)
8367 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8369 if (!bp->slowpath)
8385 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8388 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8390 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8391 bp->context[i].size);
8392 if (!bp->context[i].vcxt)
8394 allocated += bp->context[i].size;
8396 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8398 if (!bp->ilt->lines)
8401 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8404 if (bnx2x_iov_alloc_mem(bp))
8408 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8409 if (!bp->spq)
8413 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8415 if (!bp->eq_ring)
8421 bnx2x_free_mem(bp);
8430 int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
8456 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8468 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8492 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8505 void bnx2x_clear_vlan_info(struct bnx2x *bp)
8510 list_for_each_entry(vlan, &bp->vlan_reg, link)
8513 bp->vlan_cnt = 0;
8516 static int bnx2x_del_all_vlans(struct bnx2x *bp)
8518 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8524 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8528 bnx2x_clear_vlan_info(bp);
8533 int bnx2x_del_all_macs(struct bnx2x *bp,
8547 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8554 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8556 if (IS_PF(bp)) {
8561 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8562 &bp->sp_objs->mac_obj, set,
8565 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8566 bp->fp->index, set);
8570 int bnx2x_setup_leading(struct bnx2x *bp)
8572 if (IS_PF(bp))
8573 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8575 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8581 * @bp: driver handle
8585 int bnx2x_set_int_mode(struct bnx2x *bp)
8589 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8597 rc = bnx2x_enable_msix(bp);
8604 if (rc && IS_VF(bp))
8609 bp->num_queues,
8610 1 + bp->num_cnic_queues);
8614 bnx2x_enable_msi(bp);
8618 bp->num_ethernet_queues = 1;
8619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8630 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8632 if (IS_SRIOV(bp))
8634 return L2_ILT_LINES(bp);
8637 void bnx2x_ilt_set_info(struct bnx2x *bp)
8640 struct bnx2x_ilt *ilt = BP_ILT(bp);
8643 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8652 line += bnx2x_cid_ilt_lines(bp);
8654 if (CNIC_SUPPORT(bp))
8666 if (QM_INIT(bp->qm_cid_count)) {
8674 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8688 if (CNIC_SUPPORT(bp)) {
8730 * @bp: driver handle
8738 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8756 init_params->rx.hc_rate = bp->rx_ticks ?
8757 (1000000 / bp->rx_ticks) : 0;
8758 init_params->tx.hc_rate = bp->tx_ticks ?
8759 (1000000 / bp->tx_ticks) : 0;
8785 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8789 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8800 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8806 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8809 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8818 return bnx2x_queue_state_change(bp, q_params);
8824 * @bp: driver handle
8832 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8847 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8850 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8855 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8861 rc = bnx2x_queue_state_change(bp, &q_params);
8873 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8876 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8879 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8882 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8889 bp->fcoe_init = true;
8892 rc = bnx2x_queue_state_change(bp, &q_params);
8904 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8916 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8918 struct bnx2x_fastpath *fp = &bp->fp[index];
8925 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8946 rc = bnx2x_queue_state_change(bp, &q_params);
8955 rc = bnx2x_queue_state_change(bp, &q_params);
8962 rc = bnx2x_queue_state_change(bp, &q_params);
8971 rc = bnx2x_queue_state_change(bp, &q_params);
8979 return bnx2x_queue_state_change(bp, &q_params);
8982 static void bnx2x_reset_func(struct bnx2x *bp)
8984 int port = BP_PORT(bp);
8985 int func = BP_FUNC(bp);
8989 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8990 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8991 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8992 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8995 for_each_eth_queue(bp, i) {
8996 struct bnx2x_fastpath *fp = &bp->fp[i];
8997 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9002 if (CNIC_LOADED(bp))
9004 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9006 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
9009 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9014 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9018 if (bp->common.int_block == INT_BLOCK_HC) {
9019 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9020 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9022 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9023 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9026 if (CNIC_LOADED(bp)) {
9028 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9035 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9040 bnx2x_clear_func_ilt(bp, func);
9045 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9053 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9057 if (!CHIP_IS_E1x(bp))
9058 bnx2x_pf_disable(bp);
9060 bp->dmae_ready = 0;
9063 static void bnx2x_reset_port(struct bnx2x *bp)
9065 int port = BP_PORT(bp);
9069 bnx2x__link_reset(bp);
9071 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9074 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9076 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9080 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9084 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9092 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9099 func_params.f_obj = &bp->func_obj;
9104 return bnx2x_func_state_change(bp, &func_params);
9107 static int bnx2x_func_stop(struct bnx2x *bp)
9114 func_params.f_obj = &bp->func_obj;
9123 rc = bnx2x_func_state_change(bp, &func_params);
9130 return bnx2x_func_state_change(bp, &func_params);
9140 * @bp: driver handle
9145 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9148 int port = BP_PORT(bp);
9154 else if (bp->flags & NO_WOL_FLAG)
9157 else if (bp->wol) {
9159 const u8 *mac_addr = bp->dev->dev_addr;
9160 struct pci_dev *pdev = bp->pdev;
9167 u8 entry = (BP_VN(bp) + 1)*8;
9170 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9174 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9187 if (!BP_NOMCP(bp))
9188 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9190 int path = BP_PATH(bp);
9214 * @bp: driver handle
9217 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9222 if (!BP_NOMCP(bp))
9223 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9226 static int bnx2x_func_wait_started(struct bnx2x *bp)
9229 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9231 if (!bp->port.pmf)
9250 synchronize_irq(bp->msix_table[0].vector);
9252 synchronize_irq(bp->pdev->irq);
9257 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9261 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9276 func_params.f_obj = &bp->func_obj;
9282 bnx2x_func_state_change(bp, &func_params);
9286 return bnx2x_func_state_change(bp, &func_params);
9293 static void bnx2x_disable_ptp(struct bnx2x *bp)
9295 int port = BP_PORT(bp);
9298 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9302 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9304 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9306 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9308 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9312 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9317 static void bnx2x_stop_ptp(struct bnx2x *bp)
9322 cancel_work_sync(&bp->ptp_task);
9324 if (bp->ptp_tx_skb) {
9325 dev_kfree_skb_any(bp->ptp_tx_skb);
9326 bp->ptp_tx_skb = NULL;
9330 bnx2x_disable_ptp(bp);
9335 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9337 int port = BP_PORT(bp);
9344 for_each_tx_queue(bp, i) {
9345 struct bnx2x_fastpath *fp = &bp->fp[i];
9348 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9359 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9365 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9375 if (!CHIP_IS_E1x(bp)) {
9377 rc = bnx2x_del_all_vlans(bp);
9383 if (!CHIP_IS_E1(bp))
9384 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9390 netif_addr_lock_bh(bp->dev);
9392 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9393 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9394 else if (bp->slowpath)
9395 bnx2x_set_storm_rx_mode(bp);
9398 rparam.mcast_obj = &bp->mcast_obj;
9399 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9403 netif_addr_unlock_bh(bp->dev);
9405 bnx2x_iov_chip_cleanup(bp);
9412 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9418 rc = bnx2x_func_wait_started(bp);
9429 for_each_eth_queue(bp, i)
9430 if (bnx2x_stop_queue(bp, i))
9437 if (CNIC_LOADED(bp)) {
9438 for_each_cnic_queue(bp, i)
9439 if (bnx2x_stop_queue(bp, i))
9450 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9456 rc = bnx2x_func_stop(bp);
9469 if (bp->flags & PTP_SUPPORTED) {
9470 bnx2x_stop_ptp(bp);
9471 if (bp->ptp_clock) {
9472 ptp_clock_unregister(bp->ptp_clock);
9473 bp->ptp_clock = NULL;
9477 if (!bp->nic_stopped) {
9479 bnx2x_netif_stop(bp, 1);
9481 bnx2x_del_all_napi(bp);
9482 if (CNIC_LOADED(bp))
9483 bnx2x_del_all_napi_cnic(bp);
9486 bnx2x_free_irq(bp);
9487 bp->nic_stopped = true;
9495 if (!pci_channel_offline(bp->pdev)) {
9496 rc = bnx2x_reset_hw(bp, reset_code);
9502 bnx2x_send_unload_done(bp, keep_link);
9505 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9511 if (CHIP_IS_E1(bp)) {
9512 int port = BP_PORT(bp);
9516 val = REG_RD(bp, addr);
9518 REG_WR(bp, addr, val);
9520 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9523 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9528 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9533 if (!CHIP_IS_E1(bp)) {
9535 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9537 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9541 if (CHIP_IS_E1x(bp)) {
9543 val = REG_RD(bp, HC_REG_CONFIG_1);
9544 REG_WR(bp, HC_REG_CONFIG_1,
9548 val = REG_RD(bp, HC_REG_CONFIG_0);
9549 REG_WR(bp, HC_REG_CONFIG_0,
9554 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9556 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9568 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9571 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9573 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9579 * @bp: driver handle
9582 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9585 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9586 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9593 * @bp: driver handle
9598 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9606 if (!CHIP_IS_E1(bp))
9607 bnx2x_clp_reset_prep(bp, magic_val);
9610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9612 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9616 REG_WR(bp, shmem + validity_offset, 0);
9625 * @bp: driver handle
9627 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9631 if (CHIP_REV_IS_SLOW(bp))
9638 * initializes bp->common.shmem_base and waits for validity signature to appear
9640 static int bnx2x_init_shmem(struct bnx2x *bp)
9646 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9651 if (bp->common.shmem_base == 0xFFFFFFFF) {
9652 bp->flags |= NO_MCP_FLAG;
9656 if (bp->common.shmem_base) {
9657 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9662 bnx2x_mcp_wait_one(bp);
9671 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9673 int rc = bnx2x_init_shmem(bp);
9676 if (!CHIP_IS_E1(bp))
9677 bnx2x_clp_reset_done(bp, magic_val);
9682 static void bnx2x_pxp_prep(struct bnx2x *bp)
9684 if (!CHIP_IS_E1(bp)) {
9685 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9686 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9700 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9752 if (CHIP_IS_E1(bp))
9754 else if (CHIP_IS_E1H(bp))
9756 else if (CHIP_IS_E2(bp))
9779 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9782 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9792 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9799 * @bp: driver handle
9804 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9810 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9827 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9836 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9837 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9838 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9839 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9840 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9841 if (CHIP_IS_E3(bp))
9842 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9848 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9864 bnx2x_set_234_gates(bp, true);
9867 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9873 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9884 bnx2x_reset_mcp_prep(bp, &val);
9887 bnx2x_pxp_prep(bp);
9891 bnx2x_process_kill_chip_reset(bp, global);
9895 if (!CHIP_IS_E1x(bp))
9896 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9900 if (global && bnx2x_reset_mcp_comp(bp, val))
9906 bnx2x_set_234_gates(bp, false);
9914 static int bnx2x_leader_reset(struct bnx2x *bp)
9917 bool global = bnx2x_reset_is_global(bp);
9923 if (!global && !BP_NOMCP(bp)) {
9924 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9937 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9946 if (bnx2x_process_kill(bp, global)) {
9948 BP_PATH(bp));
9957 bnx2x_set_reset_done(bp);
9959 bnx2x_clear_reset_global(bp);
9963 if (!global && !BP_NOMCP(bp)) {
9964 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9965 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9968 bp->is_leader = 0;
9969 bnx2x_release_leader_lock(bp);
9974 static void bnx2x_recovery_failed(struct bnx2x *bp)
9976 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9979 netif_device_detach(bp->dev);
9985 bnx2x_set_reset_in_progress(bp);
9988 bnx2x_set_power_state(bp, PCI_D3hot);
9990 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9998 * will never be called when netif_running(bp->dev) is false.
10000 static void bnx2x_parity_recover(struct bnx2x *bp)
10007 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
10008 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
10016 switch (bp->recovery_state) {
10019 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10023 if (bnx2x_trylock_leader_lock(bp)) {
10024 bnx2x_set_reset_in_progress(bp);
10032 bnx2x_set_reset_global(bp);
10034 bp->is_leader = 1;
10039 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10042 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10053 if (bp->is_leader) {
10054 int other_engine = BP_PATH(bp) ? 0 : 1;
10056 bnx2x_get_load_status(bp, other_engine);
10058 bnx2x_get_load_status(bp, BP_PATH(bp));
10059 global = bnx2x_reset_is_global(bp);
10074 schedule_delayed_work(&bp->sp_rtnl_task,
10083 if (bnx2x_leader_reset(bp)) {
10084 bnx2x_recovery_failed(bp);
10096 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10103 if (bnx2x_trylock_leader_lock(bp)) {
10107 bp->is_leader = 1;
10111 schedule_delayed_work(&bp->sp_rtnl_task,
10120 if (bnx2x_reset_is_global(bp)) {
10122 &bp->sp_rtnl_task,
10128 bp->eth_stats.recoverable_error;
10130 bp->eth_stats.unrecoverable_error;
10131 bp->recovery_state =
10133 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10135 netdev_err(bp->dev,
10138 netif_device_detach(bp->dev);
10141 bp, PCI_D3hot);
10144 bp->recovery_state =
10149 bp->eth_stats.recoverable_error =
10151 bp->eth_stats.unrecoverable_error =
10163 static int bnx2x_udp_port_update(struct bnx2x *bp)
10176 func_params.f_obj = &bp->func_obj;
10183 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
10184 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10188 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
10189 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10197 rc = bnx2x_func_state_change(bp, &func_params);
10211 struct bnx2x *bp = netdev_priv(netdev);
10215 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
10217 return bnx2x_udp_port_update(bp);
10237 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10241 if (!netif_running(bp->dev)) {
10246 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10256 bp->sp_rtnl_state = 0;
10259 bnx2x_parity_recover(bp);
10265 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10276 bp->sp_rtnl_state = 0;
10280 bp->link_vars.link_up = 0;
10281 bp->force_link_down = true;
10282 netif_carrier_off(bp->dev);
10285 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10290 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10291 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10292 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10301 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10302 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10303 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10304 bnx2x_after_function_update(bp);
10310 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10312 netif_device_detach(bp->dev);
10313 bnx2x_close(bp->dev);
10318 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10321 bnx2x_vfpf_set_mcast(bp->dev);
10324 &bp->sp_rtnl_state)){
10325 if (netif_carrier_ok(bp->dev)) {
10326 bnx2x_tx_disable(bp);
10331 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10333 bnx2x_set_rx_mode_inner(bp);
10337 &bp->sp_rtnl_state))
10338 bnx2x_pf_set_vfs_vlan(bp);
10340 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10341 bnx2x_dcbx_stop_hw_tx(bp);
10342 bnx2x_dcbx_resume_hw_tx(bp);
10346 &bp->sp_rtnl_state))
10347 bnx2x_update_mng_version(bp);
10349 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10350 bnx2x_handle_update_svid_cmd(bp);
10358 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10359 &bp->sp_rtnl_state)) {
10360 bnx2x_disable_sriov(bp);
10361 bnx2x_enable_sriov(bp);
10367 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10369 if (!netif_running(bp->dev))
10372 if (CHIP_REV_IS_SLOW(bp)) {
10377 bnx2x_acquire_phy_lock(bp);
10380 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
10384 if (bp->port.pmf) {
10385 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10388 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10391 bnx2x_release_phy_lock(bp);
10400 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10404 return base + (BP_ABS_FUNC(bp)) * stride;
10407 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10420 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10421 REG_WR(bp, vals->umac_addr[port], 0);
10426 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10431 u8 port = BP_PORT(bp);
10436 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10438 if (!CHIP_IS_E3(bp)) {
10439 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10444 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10446 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10455 wb_data[0] = REG_RD(bp, base_addr + offset);
10456 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10461 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10462 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10465 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10466 vals->emac_val = REG_RD(bp, vals->emac_addr);
10467 REG_WR(bp, vals->emac_addr, 0);
10472 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10473 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10474 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10476 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10479 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10480 REG_WR(bp, vals->xmac_addr, 0);
10484 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10486 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10505 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10510 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10514 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10522 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10527 if (BP_FUNC(bp) < 2)
10528 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10530 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10532 tmp_reg = REG_RD(bp, addr);
10537 REG_WR(bp, addr, tmp_reg);
10540 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10543 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10545 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10556 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10561 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10562 bp->pdev->bus->number == tmp_list->bus &&
10563 BP_PATH(bp) == tmp_list->path)
10569 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10580 tmp_list = bnx2x_prev_path_get_entry(bp);
10586 BP_PATH(bp));
10594 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10602 tmp_list = bnx2x_prev_path_get_entry(bp);
10606 BP_PATH(bp));
10610 BP_PATH(bp));
10619 bool bnx2x_port_after_undi(struct bnx2x *bp)
10626 entry = bnx2x_prev_path_get_entry(bp);
10627 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10634 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10646 tmp_list = bnx2x_prev_path_get_entry(bp);
10652 BP_PATH(bp));
10667 tmp_list->bus = bp->pdev->bus->number;
10668 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10669 tmp_list->path = BP_PATH(bp);
10671 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10679 BP_PATH(bp));
10687 static int bnx2x_do_flr(struct bnx2x *bp)
10689 struct pci_dev *dev = bp->pdev;
10691 if (CHIP_IS_E1x(bp)) {
10697 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10699 bp->common.bc_ver);
10707 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10712 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10719 if (bnx2x_prev_is_path_marked(bp))
10720 return bnx2x_prev_mcp_done(bp);
10725 if (bnx2x_prev_is_after_undi(bp))
10732 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10737 rc = bnx2x_do_flr(bp);
10750 rc = bnx2x_prev_mcp_done(bp);
10757 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10771 if (bnx2x_prev_is_path_marked(bp))
10772 return bnx2x_prev_mcp_done(bp);
10774 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10781 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10784 bnx2x_set_rx_filter(&bp->link_params, 0);
10785 bp->link_params.port ^= 1;
10786 bnx2x_set_rx_filter(&bp->link_params, 0);
10787 bp->link_params.port ^= 1;
10790 if (bnx2x_prev_is_after_undi(bp)) {
10793 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10795 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10797 if (!CHIP_IS_E1x(bp))
10799 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10802 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10806 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10820 bnx2x_prev_unload_undi_inc(bp, 1);
10830 bnx2x_reset_common(bp);
10833 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10835 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10837 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10839 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10841 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10842 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10845 rc = bnx2x_prev_mark_path(bp, prev_undi);
10847 bnx2x_prev_mcp_done(bp);
10851 return bnx2x_prev_mcp_done(bp);
10854 static int bnx2x_prev_unload(struct bnx2x *bp)
10863 bnx2x_clean_pglue_errors(bp);
10866 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10867 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10868 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10870 hw_lock_val = REG_RD(bp, hw_lock_reg);
10874 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10875 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10879 REG_WR(bp, hw_lock_reg, 0xffffffff);
10883 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10885 bnx2x_release_alr(bp);
10891 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10904 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10905 bnx2x_prev_path_get_entry(bp)->aer);
10910 rc = bnx2x_prev_unload_common(bp);
10915 rc = bnx2x_prev_unload_uncommon(bp);
10928 if (bnx2x_port_after_undi(bp))
10929 bp->link_params.feature_config_flags |=
10937 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10944 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10946 val = REG_RD(bp, MISC_REG_CHIP_REV);
10952 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10954 val = REG_RD(bp, MISC_REG_BOND_ID);
10956 bp->common.chip_id = id;
10959 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10960 if (CHIP_IS_57810(bp))
10961 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10962 (bp->common.chip_id & 0x0000FFFF);
10963 else if (CHIP_IS_57810_MF(bp))
10964 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10965 (bp->common.chip_id & 0x0000FFFF);
10966 bp->common.chip_id |= 0x1;
10970 bp->db_size = (1 << BNX2X_DB_SHIFT);
10972 if (!CHIP_IS_E1x(bp)) {
10973 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10975 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10980 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10983 if (CHIP_MODE_IS_4_PORT(bp))
10984 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
10986 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
10988 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10989 bp->pfid = bp->pf_num; /* 0..7 */
10992 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10994 bp->link_params.chip_id = bp->common.chip_id;
10997 val = (REG_RD(bp, 0x2874) & 0x55);
10998 if ((bp->common.chip_id & 0x1) ||
10999 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11000 bp->flags |= ONE_PORT_FLAG;
11004 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11005 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11008 bp->common.flash_size, bp->common.flash_size);
11010 bnx2x_init_shmem(bp);
11012 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11016 bp->link_params.shmem_base = bp->common.shmem_base;
11017 bp->link_params.shmem2_base = bp->common.shmem2_base;
11018 if (SHMEM2_RD(bp, size) >
11019 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11020 bp->link_params.lfa_base =
11021 REG_RD(bp, bp->common.shmem2_base +
11023 lfa_host_addr[BP_PORT(bp)]));
11025 bp->link_params.lfa_base = 0;
11027 bp->common.shmem_base, bp->common.shmem2_base);
11029 if (!bp->common.shmem_base) {
11031 bp->flags |= NO_MCP_FLAG;
11035 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11036 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11038 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11042 bp->link_params.feature_config_flags = 0;
11043 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11045 bp->link_params.feature_config_flags |=
11048 bp->link_params.feature_config_flags &=
11051 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11052 bp->common.bc_ver = val;
11060 bp->link_params.feature_config_flags |=
11064 bp->link_params.feature_config_flags |=
11067 bp->link_params.feature_config_flags |=
11070 bp->link_params.feature_config_flags |=
11074 bp->link_params.feature_config_flags |=
11078 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11081 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11084 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11087 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11090 boot_mode = SHMEM_RD(bp,
11091 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11095 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11098 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11101 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11104 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11108 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11109 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11112 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11114 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11115 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11116 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11117 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11119 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11126 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11128 int pfid = BP_FUNC(bp);
11133 bp->igu_base_sb = 0xff;
11134 if (CHIP_INT_MODE_IS_BC(bp)) {
11135 int vn = BP_VN(bp);
11136 igu_sb_cnt = bp->igu_sb_cnt;
11137 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11140 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11141 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11149 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11158 bp->igu_dsb_id = igu_sb_id;
11160 if (bp->igu_base_sb == 0xff)
11161 bp->igu_base_sb = igu_sb_id;
11174 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11185 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11187 int cfg_size = 0, idx, port = BP_PORT(bp);
11190 bp->port.supported[0] = 0;
11191 bp->port.supported[1] = 0;
11192 switch (bp->link_params.num_phys) {
11194 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11198 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11202 if (bp->link_params.multi_phy_config &
11204 bp->port.supported[1] =
11205 bp->link_params.phy[EXT_PHY1].supported;
11206 bp->port.supported[0] =
11207 bp->link_params.phy[EXT_PHY2].supported;
11209 bp->port.supported[0] =
11210 bp->link_params.phy[EXT_PHY1].supported;
11211 bp->port.supported[1] =
11212 bp->link_params.phy[EXT_PHY2].supported;
11218 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11220 SHMEM_RD(bp,
11222 SHMEM_RD(bp,
11227 if (CHIP_IS_E3(bp))
11228 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11232 bp->port.phy_addr = REG_RD(
11233 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11236 bp->port.phy_addr = REG_RD(
11237 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11241 bp->port.link_config[0]);
11245 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11248 if (!(bp->link_params.speed_cap_mask[idx] &
11250 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11252 if (!(bp->link_params.speed_cap_mask[idx] &
11254 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11256 if (!(bp->link_params.speed_cap_mask[idx] &
11258 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11260 if (!(bp->link_params.speed_cap_mask[idx] &
11262 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11264 if (!(bp->link_params.speed_cap_mask[idx] &
11266 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11269 if (!(bp->link_params.speed_cap_mask[idx] &
11271 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11273 if (!(bp->link_params.speed_cap_mask[idx] &
11275 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11277 if (!(bp->link_params.speed_cap_mask[idx] &
11279 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11282 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11283 bp->port.supported[1]);
11286 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11289 bp->port.advertising[0] = 0;
11290 bp->port.advertising[1] = 0;
11291 switch (bp->link_params.num_phys) {
11301 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11302 link_config = bp->port.link_config[idx];
11305 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11306 bp->link_params.req_line_speed[idx] =
11308 bp->port.advertising[idx] |=
11309 bp->port.supported[idx];
11310 if (bp->link_params.phy[EXT_PHY1].type ==
11312 bp->port.advertising[idx] |=
11317 bp->link_params.req_line_speed[idx] =
11319 bp->port.advertising[idx] |=
11327 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11328 bp->link_params.req_line_speed[idx] =
11330 bp->port.advertising[idx] |=
11336 bp->link_params.speed_cap_mask[idx]);
11342 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11343 bp->link_params.req_line_speed[idx] =
11345 bp->link_params.req_duplex[idx] =
11347 bp->port.advertising[idx] |=
11353 bp->link_params.speed_cap_mask[idx]);
11359 if (bp->port.supported[idx] &
11361 bp->link_params.req_line_speed[idx] =
11363 bp->port.advertising[idx] |=
11369 bp->link_params.speed_cap_mask[idx]);
11375 if (bp->port.supported[idx] &
11377 bp->link_params.req_line_speed[idx] =
11379 bp->link_params.req_duplex[idx] =
11381 bp->port.advertising[idx] |=
11387 bp->link_params.speed_cap_mask[idx]);
11393 if (bp->port.supported[idx] &
11395 bp->link_params.req_line_speed[idx] =
11397 bp->port.advertising[idx] |=
11400 } else if (bp->port.supported[idx] &
11402 bp->link_params.req_line_speed[idx] =
11404 bp->port.advertising[idx] |=
11409 bp->link_params.speed_cap_mask[idx]);
11415 if (bp->port.supported[idx] &
11417 bp->link_params.req_line_speed[idx] =
11419 bp->port.advertising[idx] |=
11425 bp->link_params.speed_cap_mask[idx]);
11431 if (bp->port.supported[idx] &
11433 bp->link_params.req_line_speed[idx] =
11435 bp->port.advertising[idx] |=
11438 } else if (bp->port.supported[idx] &
11440 bp->link_params.req_line_speed[idx] =
11442 bp->port.advertising[idx] |=
11448 bp->link_params.speed_cap_mask[idx]);
11453 bp->link_params.req_line_speed[idx] = SPEED_20000;
11459 bp->link_params.req_line_speed[idx] =
11461 bp->port.advertising[idx] =
11462 bp->port.supported[idx];
11466 bp->link_params.req_flow_ctrl[idx] = (link_config &
11468 if (bp->link_params.req_flow_ctrl[idx] ==
11470 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11471 bp->link_params.req_flow_ctrl[idx] =
11474 bnx2x_set_requested_fc(bp);
11478 bp->link_params.req_line_speed[idx],
11479 bp->link_params.req_duplex[idx],
11480 bp->link_params.req_flow_ctrl[idx],
11481 bp->port.advertising[idx]);
11493 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11495 int port = BP_PORT(bp);
11499 bp->link_params.bp = bp;
11500 bp->link_params.port = port;
11502 bp->link_params.lane_config =
11503 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11505 bp->link_params.speed_cap_mask[0] =
11506 SHMEM_RD(bp,
11509 bp->link_params.speed_cap_mask[1] =
11510 SHMEM_RD(bp,
11513 bp->port.link_config[0] =
11514 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11516 bp->port.link_config[1] =
11517 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11519 bp->link_params.multi_phy_config =
11520 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11524 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11525 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11529 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11530 bp->flags |= NO_ISCSI_FLAG;
11532 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11533 bp->flags |= NO_FCOE_FLAG;
11536 bp->link_params.lane_config,
11537 bp->link_params.speed_cap_mask[0],
11538 bp->port.link_config[0]);
11540 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11542 bnx2x_phy_probe(&bp->link_params);
11543 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11545 bnx2x_link_settings_requested(bp);
11552 SHMEM_RD(bp,
11556 bp->mdio.prtad = bp->port.phy_addr;
11560 bp->mdio.prtad =
11564 eee_mode = (((SHMEM_RD(bp, dev_info.
11569 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11573 bp->link_params.eee_mode = 0;
11577 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11580 int port = BP_PORT(bp);
11581 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11584 if (!CNIC_SUPPORT(bp)) {
11585 bp->flags |= no_flags;
11590 bp->cnic_eth_dev.max_iscsi_conn =
11595 bp->cnic_eth_dev.max_iscsi_conn);
11601 if (!bp->cnic_eth_dev.max_iscsi_conn)
11602 bp->flags |= no_flags;
11605 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11608 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11609 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11610 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11611 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11614 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11615 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11616 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11617 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11620 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11624 if (IS_MF(bp)) {
11628 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11629 if (IS_MF_SD(bp)) {
11630 u32 cfg = MF_CFG_RD(bp,
11638 u32 cfg = MF_CFG_RD(bp,
11648 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11651 u32 lic = SHMEM_RD(bp,
11662 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11664 int port = BP_PORT(bp);
11665 int func = BP_ABS_FUNC(bp);
11666 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11668 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11670 if (!CNIC_SUPPORT(bp)) {
11671 bp->flags |= NO_FCOE_FLAG;
11676 bp->cnic_eth_dev.max_fcoe_conn =
11681 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11685 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11688 if (!IS_MF(bp)) {
11690 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11691 SHMEM_RD(bp,
11694 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11695 SHMEM_RD(bp,
11700 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11701 SHMEM_RD(bp,
11704 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11705 SHMEM_RD(bp,
11708 } else if (!IS_MF_SD(bp)) {
11712 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11713 bnx2x_get_ext_wwn_info(bp, func);
11715 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11716 bnx2x_get_ext_wwn_info(bp, func);
11719 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11725 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11726 bp->flags |= NO_FCOE_FLAG;
11727 eth_zero_addr(bp->fip_mac);
11731 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11738 bnx2x_get_iscsi_info(bp);
11739 bnx2x_get_fcoe_info(bp);
11742 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11745 int func = BP_ABS_FUNC(bp);
11746 int port = BP_PORT(bp);
11747 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11748 u8 *fip_mac = bp->fip_mac;
11750 if (IS_MF(bp)) {
11756 if (!IS_MF_SD(bp)) {
11757 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11759 val2 = MF_CFG_RD(bp, func_ext_config[func].
11761 val = MF_CFG_RD(bp, func_ext_config[func].
11767 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11771 val2 = MF_CFG_RD(bp, func_ext_config[func].
11773 val = MF_CFG_RD(bp, func_ext_config[func].
11779 bp->flags |= NO_FCOE_FLAG;
11782 bp->mf_ext_config = cfg;
11785 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11787 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11792 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11794 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11805 if (IS_MF_FCOE_AFEX(bp))
11806 eth_hw_addr_set(bp->dev, fip_mac);
11808 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11810 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11814 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11816 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11823 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11829 bp->flags |= NO_FCOE_FLAG;
11830 eth_zero_addr(bp->fip_mac);
11834 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11837 int func = BP_ABS_FUNC(bp);
11838 int port = BP_PORT(bp);
11842 eth_hw_addr_set(bp->dev, addr);
11844 if (BP_NOMCP(bp)) {
11846 eth_hw_addr_random(bp->dev);
11847 } else if (IS_MF(bp)) {
11848 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11849 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11853 eth_hw_addr_set(bp->dev, addr);
11856 if (CNIC_SUPPORT(bp))
11857 bnx2x_get_cnic_mac_hwinfo(bp);
11860 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11861 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11863 eth_hw_addr_set(bp->dev, addr);
11865 if (CNIC_SUPPORT(bp))
11866 bnx2x_get_cnic_mac_hwinfo(bp);
11869 if (!BP_NOMCP(bp)) {
11871 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11872 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11873 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11874 bp->flags |= HAS_PHYS_PORT_ID;
11877 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11879 if (!is_valid_ether_addr(bp->dev->dev_addr))
11880 dev_err(&bp->pdev->dev,
11883 bp->dev->dev_addr);
11886 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11891 if (IS_VF(bp))
11894 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11896 tmp = BP_ABS_FUNC(bp);
11897 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11901 tmp = BP_PORT(bp);
11902 cfg = SHMEM_RD(bp,
11909 static void validate_set_si_mode(struct bnx2x *bp)
11911 u8 func = BP_ABS_FUNC(bp);
11914 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11918 bp->mf_mode = MULTI_FUNCTION_SI;
11919 bp->mf_config[BP_VN(bp)] =
11920 MF_CFG_RD(bp, func_mf_config[func].config);
11925 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11927 int /*abs*/func = BP_ABS_FUNC(bp);
11933 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11934 dev_err(&bp->pdev->dev,
11939 bnx2x_get_common_hwinfo(bp);
11944 if (CHIP_IS_E1x(bp)) {
11945 bp->common.int_block = INT_BLOCK_HC;
11947 bp->igu_dsb_id = DEF_SB_IGU_ID;
11948 bp->igu_base_sb = 0;
11950 bp->common.int_block = INT_BLOCK_IGU;
11953 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11955 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11963 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11964 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11966 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11971 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11972 dev_err(&bp->pdev->dev,
11974 bnx2x_release_hw_lock(bp,
11982 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11986 rc = bnx2x_get_igu_cam_info(bp);
11987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11997 if (CHIP_IS_E1x(bp))
11998 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12004 bp->base_fw_ndsb = bp->igu_base_sb;
12007 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12008 bp->igu_sb_cnt, bp->base_fw_ndsb);
12013 bp->mf_ov = 0;
12014 bp->mf_mode = 0;
12015 bp->mf_sub_mode = 0;
12016 vn = BP_VN(bp);
12018 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12020 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12023 if (SHMEM2_HAS(bp, mf_cfg_addr))
12024 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12026 bp->common.mf_cfg_base = bp->common.shmem_base +
12037 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12039 val = SHMEM_RD(bp,
12045 validate_set_si_mode(bp);
12048 if ((!CHIP_IS_E1x(bp)) &&
12049 (MF_CFG_RD(bp, func_mf_config[func].
12051 (SHMEM2_HAS(bp,
12053 bp->mf_mode = MULTI_FUNCTION_AFEX;
12054 bp->mf_config[vn] = MF_CFG_RD(bp,
12062 val = MF_CFG_RD(bp,
12067 bp->mf_mode = MULTI_FUNCTION_SD;
12068 bp->mf_config[vn] = MF_CFG_RD(bp,
12074 bp->mf_mode = MULTI_FUNCTION_SD;
12075 bp->mf_sub_mode = SUB_MF_MODE_BD;
12076 bp->mf_config[vn] =
12077 MF_CFG_RD(bp,
12080 if (SHMEM2_HAS(bp, mtu_size)) {
12081 int mtu_idx = BP_FW_MB_IDX(bp);
12085 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12094 bp->dev->mtu = mtu_size;
12098 bp->mf_mode = MULTI_FUNCTION_SD;
12099 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12100 bp->mf_config[vn] =
12101 MF_CFG_RD(bp,
12105 bp->mf_config[vn] = 0;
12108 val2 = SHMEM_RD(bp,
12113 validate_set_si_mode(bp);
12114 bp->mf_sub_mode =
12119 bp->mf_config[vn] = 0;
12126 bp->mf_config[vn] = 0;
12132 IS_MF(bp) ? "multi" : "single");
12134 switch (bp->mf_mode) {
12136 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12139 bp->mf_ov = val;
12140 bp->path_has_ovlan = true;
12143 func, bp->mf_ov, bp->mf_ov);
12144 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12145 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12146 dev_err(&bp->pdev->dev,
12149 bp->path_has_ovlan = true;
12151 dev_err(&bp->pdev->dev,
12166 dev_err(&bp->pdev->dev,
12179 if (CHIP_MODE_IS_4_PORT(bp) &&
12180 !bp->path_has_ovlan &&
12181 !IS_MF(bp) &&
12182 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12183 u8 other_port = !BP_PORT(bp);
12184 u8 other_func = BP_PATH(bp) + 2*other_port;
12185 val = MF_CFG_RD(bp,
12188 bp->path_has_ovlan = true;
12193 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12194 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12197 bnx2x_get_port_hwinfo(bp);
12200 bnx2x_get_mac_hwinfo(bp);
12202 bnx2x_get_cnic_info(bp);
12207 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12214 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12216 vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
12231 if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
12232 memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
12233 bp->fw_ver[kw_len] = ' ';
12240 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12244 if (CHIP_REV_IS_FPGA(bp))
12246 else if (CHIP_REV_IS_EMUL(bp))
12251 if (CHIP_MODE_IS_4_PORT(bp))
12256 if (CHIP_IS_E2(bp))
12258 else if (CHIP_IS_E3(bp)) {
12260 if (CHIP_REV(bp) == CHIP_REV_Ax)
12262 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
12266 if (IS_MF(bp)) {
12268 switch (bp->mf_mode) {
12287 INIT_MODE_FLAGS(bp) = flags;
12290 static int bnx2x_init_bp(struct bnx2x *bp)
12295 mutex_init(&bp->port.phy_mutex);
12296 mutex_init(&bp->fw_mb_mutex);
12297 mutex_init(&bp->drv_info_mutex);
12298 sema_init(&bp->stats_lock, 1);
12299 bp->drv_info_mng_owner = false;
12300 INIT_LIST_HEAD(&bp->vlan_reg);
12302 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12303 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12304 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12305 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12306 if (IS_PF(bp)) {
12307 rc = bnx2x_get_hwinfo(bp);
12313 eth_hw_addr_set(bp->dev, zero_addr);
12316 bnx2x_set_modes_bitmap(bp);
12318 rc = bnx2x_alloc_mem_bp(bp);
12322 bnx2x_read_fwinfo(bp);
12324 func = BP_FUNC(bp);
12327 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12329 bp->fw_seq =
12330 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12332 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12334 rc = bnx2x_prev_unload(bp);
12336 bnx2x_free_mem_bp(bp);
12341 if (CHIP_REV_IS_FPGA(bp))
12342 dev_err(&bp->pdev->dev, "FPGA detected\n");
12344 if (BP_NOMCP(bp) && (func == 0))
12345 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12347 bp->disable_tpa = disable_tpa;
12348 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12350 bp->disable_tpa |= is_kdump_kernel();
12353 if (bp->disable_tpa) {
12354 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12355 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12358 if (CHIP_IS_E1(bp))
12359 bp->dropless_fc = false;
12361 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12363 bp->mrrs = mrrs;
12365 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12366 if (IS_VF(bp))
12367 bp->rx_ring_size = MAX_RX_AVAIL;
12370 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12371 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12373 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12375 timer_setup(&bp->timer, bnx2x_timer, 0);
12376 bp->timer.expires = jiffies + bp->current_interval;
12378 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12379 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12380 SHMEM2_HAS(bp, dcbx_en) &&
12381 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12382 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12383 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12384 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12385 bnx2x_dcbx_init_params(bp);
12387 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12390 if (CHIP_IS_E1x(bp))
12391 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12393 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12396 if (IS_VF(bp))
12397 bp->max_cos = 1;
12398 else if (CHIP_IS_E1x(bp))
12399 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12400 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12401 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12402 else if (CHIP_IS_E3B0(bp))
12403 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12406 CHIP_NUM(bp), CHIP_REV(bp));
12407 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12413 if (IS_VF(bp))
12414 bp->min_msix_vec_cnt = 1;
12415 else if (CNIC_SUPPORT(bp))
12416 bp->min_msix_vec_cnt = 3;
12418 bp->min_msix_vec_cnt = 2;
12419 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12421 bp->dump_preset_idx = 1;
12437 struct bnx2x *bp = netdev_priv(dev);
12440 bp->stats_init = true;
12444 bnx2x_set_power_state(bp, PCI_D0);
12452 if (IS_PF(bp)) {
12453 int other_engine = BP_PATH(bp) ? 0 : 1;
12457 other_load_status = bnx2x_get_load_status(bp, other_engine);
12458 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12459 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12460 bnx2x_chk_parity_attn(bp, &global, true)) {
12468 bnx2x_set_reset_global(bp);
12477 bnx2x_trylock_leader_lock(bp) &&
12478 !bnx2x_leader_reset(bp)) {
12479 netdev_info(bp->dev,
12485 bnx2x_set_power_state(bp, PCI_D3hot);
12486 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12496 bp->recovery_state = BNX2X_RECOVERY_DONE;
12497 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12507 struct bnx2x *bp = netdev_priv(dev);
12510 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12538 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12545 int mc_count = netdev_mc_count(bp->dev);
12549 netdev_for_each_mc_addr(ha, bp->dev) {
12576 * @bp: driver handle
12580 static int bnx2x_set_uc_list(struct bnx2x *bp)
12583 struct net_device *dev = bp->dev;
12585 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12589 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12596 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12614 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12618 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12621 struct net_device *dev = bp->dev;
12625 rparam.mcast_obj = &bp->mcast_obj;
12628 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12636 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12641 rc = bnx2x_config_mcast(bp, &rparam,
12653 static int bnx2x_set_mc_list(struct bnx2x *bp)
12657 struct net_device *dev = bp->dev;
12661 if (CHIP_IS_E1x(bp))
12662 return bnx2x_set_mc_list_e1x(bp);
12664 rparam.mcast_obj = &bp->mcast_obj;
12667 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12672 rc = bnx2x_config_mcast(bp, &rparam,
12681 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12690 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12693 struct bnx2x *bp = netdev_priv(dev);
12695 if (bp->state != BNX2X_STATE_OPEN) {
12696 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12700 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12705 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12709 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12711 netif_addr_lock_bh(bp->dev);
12713 if (bp->dev->flags & IFF_PROMISC) {
12715 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12716 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12717 CHIP_IS_E1(bp))) {
12720 if (IS_PF(bp)) {
12722 if (bnx2x_set_mc_list(bp) < 0)
12726 netif_addr_unlock_bh(bp->dev);
12727 if (bnx2x_set_uc_list(bp) < 0)
12729 netif_addr_lock_bh(bp->dev);
12734 bnx2x_schedule_sp_rtnl(bp,
12739 bp->rx_mode = rx_mode;
12741 if (IS_MF_ISCSI_ONLY(bp))
12742 bp->rx_mode = BNX2X_RX_MODE_NONE;
12745 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12746 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12747 netif_addr_unlock_bh(bp->dev);
12751 if (IS_PF(bp)) {
12752 bnx2x_set_storm_rx_mode(bp);
12753 netif_addr_unlock_bh(bp->dev);
12759 netif_addr_unlock_bh(bp->dev);
12760 bnx2x_vfpf_storm_rx_mode(bp);
12768 struct bnx2x *bp = netdev_priv(netdev);
12778 bnx2x_acquire_phy_lock(bp);
12779 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12780 bnx2x_release_phy_lock(bp);
12792 struct bnx2x *bp = netdev_priv(netdev);
12802 bnx2x_acquire_phy_lock(bp);
12803 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12804 bnx2x_release_phy_lock(bp);
12811 struct bnx2x *bp = netdev_priv(dev);
12819 return bnx2x_hwtstamp_ioctl(bp, ifr);
12823 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12829 struct bnx2x *bp = netdev_priv(dev);
12832 if (IS_VF(bp))
12833 bnx2x_sample_bulletin(bp);
12845 struct bnx2x *bp = netdev_priv(netdev);
12847 if (!(bp->flags & HAS_PHYS_PORT_ID))
12850 ppid->id_len = sizeof(bp->phys_port_id);
12851 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12882 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12886 if (IS_PF(bp)) {
12890 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12893 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12899 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12905 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12909 if (bp->vlan_cnt >= bp->vlan_credit)
12912 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12920 bp->vlan_cnt++;
12926 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12930 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12932 if (bp->accept_any_vlan != need_accept_any_vlan) {
12933 bp->accept_any_vlan = need_accept_any_vlan;
12935 bp->accept_any_vlan ? "raised" : "cleared");
12937 if (IS_PF(bp))
12938 bnx2x_set_rx_mode_inner(bp);
12940 bnx2x_vfpf_storm_rx_mode(bp);
12945 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12948 bnx2x_vlan_configure(bp, false);
12955 struct bnx2x *bp = netdev_priv(dev);
12966 list_add_tail(&vlan->link, &bp->vlan_reg);
12969 bnx2x_vlan_configure(bp, true);
12976 struct bnx2x *bp = netdev_priv(dev);
12983 list_for_each_entry(vlan, &bp->vlan_reg, link)
12995 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
12997 bp->vlan_cnt--;
13004 bnx2x_vlan_configure(bp, true);
13042 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13053 bp->dev = dev;
13054 bp->pdev = pdev;
13058 dev_err(&bp->pdev->dev,
13064 dev_err(&bp->pdev->dev,
13070 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13071 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13087 dev_err(&bp->pdev->dev,
13096 if (IS_PF(bp)) {
13098 dev_err(&bp->pdev->dev,
13106 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13111 rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
13113 dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
13123 bp->regview = pci_ioremap_bar(pdev, 0);
13124 if (!bp->regview) {
13125 dev_err(&bp->pdev->dev,
13137 bp->pf_num = PCI_FUNC(pdev->devfn);
13140 pci_read_config_dword(bp->pdev,
13142 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13145 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13158 if (IS_PF(bp)) {
13159 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13160 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13161 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13162 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13165 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13166 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13167 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13168 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13176 REG_WR(bp,
13183 bnx2x_set_ethtool_ops(bp, dev);
13209 if (IS_PF(bp))
13216 if (IS_PF(bp)) {
13218 bp->accept_any_vlan = true;
13243 bp->mdio.prtad = MDIO_PRTAD_NONE;
13244 bp->mdio.mmds = 0;
13245 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13246 bp->mdio.dev = dev;
13247 bp->mdio.mdio_read = bnx2x_mdio_read;
13248 bp->mdio.mdio_write = bnx2x_mdio_write;
13263 static int bnx2x_check_firmware(struct bnx2x *bp)
13265 const struct firmware *firmware = bp->firmware;
13307 if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
13308 fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
13311 bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
13382 bp->arr = kmalloc(len, GFP_KERNEL); \
13383 if (!bp->arr) \
13385 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13386 (u8 *)bp->arr, len); \
13389 static int bnx2x_init_firmware(struct bnx2x *bp)
13395 if (bp->firmware)
13398 if (CHIP_IS_E1(bp)) {
13401 } else if (CHIP_IS_E1H(bp)) {
13404 } else if (!CHIP_IS_E1x(bp)) {
13414 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13419 rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
13424 bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
13426 bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
13427 bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
13430 bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
13431 bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
13432 bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
13434 rc = bnx2x_check_firmware(bp);
13440 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13455 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13457 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13459 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13461 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13463 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13465 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13467 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13469 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13477 kfree(bp->init_ops_offsets);
13479 kfree(bp->init_ops);
13481 kfree(bp->init_data);
13483 release_firmware(bp->firmware);
13484 bp->firmware = NULL;
13489 static void bnx2x_release_firmware(struct bnx2x *bp)
13491 kfree(bp->init_ops_offsets);
13492 kfree(bp->init_ops);
13493 kfree(bp->init_data);
13494 release_firmware(bp->firmware);
13495 bp->firmware = NULL;
13515 void bnx2x__init_func_obj(struct bnx2x *bp)
13518 bnx2x_setup_dmae(bp);
13520 bnx2x_init_func_obj(bp, &bp->func_obj,
13521 bnx2x_sp(bp, func_rdata),
13522 bnx2x_sp_mapping(bp, func_rdata),
13523 bnx2x_sp(bp, func_afex_rdata),
13524 bnx2x_sp_mapping(bp, func_afex_rdata),
13529 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13531 int cid_count = BNX2X_L2_MAX_CID(bp);
13533 if (IS_SRIOV(bp))
13536 if (CNIC_SUPPORT(bp))
13636 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13647 func_params.f_obj = &bp->func_obj;
13658 return bnx2x_func_state_change(bp, &func_params);
13663 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13672 if (!netif_running(bp->dev)) {
13717 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13732 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13734 if (!netif_running(bp->dev)) {
13742 timecounter_adjtime(&bp->timecounter, delta);
13749 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13752 if (!netif_running(bp->dev)) {
13758 ns = timecounter_read(&bp->timecounter);
13770 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13773 if (!netif_running(bp->dev)) {
13784 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13793 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13799 void bnx2x_register_phc(struct bnx2x *bp)
13802 bp->ptp_clock_info.owner = THIS_MODULE;
13803 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13804 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13805 bp->ptp_clock_info.n_alarm = 0;
13806 bp->ptp_clock_info.n_ext_ts = 0;
13807 bp->ptp_clock_info.n_per_out = 0;
13808 bp->ptp_clock_info.pps = 0;
13809 bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
13810 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13811 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13812 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13813 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13815 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13816 if (IS_ERR(bp->ptp_clock)) {
13817 bp->ptp_clock = NULL;
13826 struct bnx2x *bp;
13849 * initialization of bp->max_cos based on the chip versions AND chip
13878 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13882 bp = netdev_priv(dev);
13884 bp->flags = 0;
13886 bp->flags |= IS_VF_FLAG;
13888 bp->igu_sb_cnt = max_non_def_sbs;
13889 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13890 bp->msg_enable = debug;
13891 bp->cnic_support = cnic_cnt;
13892 bp->cnic_probe = bnx2x_cnic_probe;
13896 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13903 IS_PF(bp) ? "physical" : "virtual");
13904 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13909 rc = bnx2x_init_bp(bp);
13913 /* Map doorbells here as we need the real value of bp->max_cos which
13917 if (IS_VF(bp)) {
13918 bp->doorbells = bnx2x_vf_doorbells(bp);
13919 rc = bnx2x_vf_pci_alloc(bp);
13923 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13925 dev_err(&bp->pdev->dev,
13930 bp->doorbells = ioremap(pci_resource_start(pdev, 2),
13933 if (!bp->doorbells) {
13934 dev_err(&bp->pdev->dev,
13940 if (IS_VF(bp)) {
13941 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13947 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13955 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13960 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13961 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13964 if (CHIP_IS_E1x(bp))
13965 bp->flags |= NO_FCOE_FLAG;
13967 /* Set bp->num_queues for MSI-X mode*/
13968 bnx2x_set_num_queues(bp);
13973 rc = bnx2x_set_int_mode(bp);
13988 if (!NO_FCOE(bp)) {
13991 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13997 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13998 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13999 pcie_print_link_status(bp->pdev);
14001 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14002 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14007 bnx2x_free_mem_bp(bp);
14010 if (bp->regview)
14011 iounmap(bp->regview);
14013 if (IS_PF(bp) && bp->doorbells)
14014 iounmap(bp->doorbells);
14028 struct bnx2x *bp,
14032 if (!NO_FCOE(bp)) {
14034 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14040 bnx2x_dcbnl_update_applist(bp, true);
14043 if (IS_PF(bp) &&
14044 !BP_NOMCP(bp) &&
14045 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14046 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14057 bnx2x_iov_remove_one(bp);
14060 if (IS_PF(bp)) {
14061 bnx2x_set_power_state(bp, PCI_D0);
14062 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14067 bnx2x_reset_endianity(bp);
14071 bnx2x_disable_msi(bp);
14074 if (IS_PF(bp))
14075 bnx2x_set_power_state(bp, PCI_D3hot);
14078 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14081 if (IS_VF(bp))
14082 bnx2x_vfpf_release(bp);
14086 pci_wake_from_d3(pdev, bp->wol);
14091 if (bp->regview)
14092 iounmap(bp->regview);
14097 if (IS_PF(bp)) {
14098 if (bp->doorbells)
14099 iounmap(bp->doorbells);
14101 bnx2x_release_firmware(bp);
14103 bnx2x_vf_pci_dealloc(bp);
14105 bnx2x_free_mem_bp(bp);
14119 struct bnx2x *bp;
14125 bp = netdev_priv(dev);
14127 __bnx2x_remove(pdev, dev, bp, true);
14130 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14132 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14134 bp->rx_mode = BNX2X_RX_MODE_NONE;
14136 if (CNIC_LOADED(bp))
14137 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14140 bnx2x_tx_disable(bp);
14141 netdev_reset_tc(bp->dev);
14143 del_timer_sync(&bp->timer);
14144 cancel_delayed_work_sync(&bp->sp_task);
14145 cancel_delayed_work_sync(&bp->period_task);
14147 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14148 bp->stats_state = STATS_STATE_DISABLED;
14149 up(&bp->stats_lock);
14152 bnx2x_save_statistics(bp);
14154 netif_carrier_off(bp->dev);
14171 struct bnx2x *bp = netdev_priv(dev);
14185 bnx2x_eeh_nic_unload(bp);
14187 bnx2x_prev_path_mark_eeh(bp);
14206 struct bnx2x *bp = netdev_priv(dev);
14223 bnx2x_set_power_state(bp, PCI_D0);
14229 if (bnx2x_init_shmem(bp)) {
14234 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14237 v = SHMEM2_RD(bp,
14238 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14239 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14242 bnx2x_drain_tx_queues(bp);
14243 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14244 if (!bp->nic_stopped) {
14245 bnx2x_netif_stop(bp, 1);
14246 bnx2x_del_all_napi(bp);
14248 if (CNIC_LOADED(bp))
14249 bnx2x_del_all_napi_cnic(bp);
14251 bnx2x_free_irq(bp);
14252 bp->nic_stopped = true;
14256 bnx2x_send_unload_done(bp, true);
14258 bp->sp_state = 0;
14259 bp->port.pmf = 0;
14261 bnx2x_prev_unload(bp);
14266 bnx2x_squeeze_objects(bp);
14267 bnx2x_free_skbs(bp);
14268 for_each_rx_queue(bp, i)
14269 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14270 bnx2x_free_fp_mem(bp);
14271 bnx2x_free_mem(bp);
14273 bp->state = BNX2X_STATE_CLOSED;
14291 struct bnx2x *bp = netdev_priv(dev);
14293 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14294 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14300 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14304 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
14305 netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
14325 struct bnx2x *bp;
14330 bp = netdev_priv(dev);
14331 if (!bp)
14342 __bnx2x_remove(pdev, dev, bp, false);
14401 void bnx2x_notify_link_changed(struct bnx2x *bp)
14403 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14411 * @bp: driver handle
14416 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14421 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14422 &bp->iscsi_l2_mac_obj, true,
14427 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14433 if (unlikely(bp->panic))
14437 spin_lock_bh(&bp->spq_lock);
14438 BUG_ON(bp->cnic_spq_pending < count);
14439 bp->cnic_spq_pending -= count;
14441 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14442 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14445 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14453 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14455 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14457 bnx2x_set_ctx_validation(bp,
14458 &bp->context[cxt_index].
14460 BNX2X_ISCSI_ETH_CID(bp));
14471 if (!atomic_read(&bp->cq_spq_left))
14474 atomic_dec(&bp->cq_spq_left);
14476 if (!atomic_read(&bp->eq_spq_left))
14479 atomic_dec(&bp->eq_spq_left);
14482 if (bp->cnic_spq_pending >=
14483 bp->cnic_eth_dev.max_kwqe_pending)
14486 bp->cnic_spq_pending++;
14493 spe = bnx2x_sp_get_next(bp);
14494 *spe = *bp->cnic_kwq_cons;
14497 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14499 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14500 bp->cnic_kwq_cons = bp->cnic_kwq;
14502 bp->cnic_kwq_cons++;
14504 bnx2x_sp_prod_update(bp);
14505 spin_unlock_bh(&bp->spq_lock);
14511 struct bnx2x *bp = netdev_priv(dev);
14515 if (unlikely(bp->panic)) {
14521 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14522 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14527 spin_lock_bh(&bp->spq_lock);
14532 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14535 *bp->cnic_kwq_prod = *spe;
14537 bp->cnic_kwq_pending++;
14543 bp->cnic_kwq_pending);
14545 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14546 bp->cnic_kwq_prod = bp->cnic_kwq;
14548 bp->cnic_kwq_prod++;
14551 spin_unlock_bh(&bp->spq_lock);
14553 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14554 bnx2x_cnic_sp_post(bp, 0);
14559 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14564 mutex_lock(&bp->cnic_mutex);
14565 c_ops = rcu_dereference_protected(bp->cnic_ops,
14566 lockdep_is_held(&bp->cnic_mutex));
14568 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14569 mutex_unlock(&bp->cnic_mutex);
14574 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14580 c_ops = rcu_dereference(bp->cnic_ops);
14582 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14591 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14597 return bnx2x_cnic_ctl_send(bp, &ctl);
14600 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14609 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14610 bnx2x_cnic_sp_post(bp, 0);
14618 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14621 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14637 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14642 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14644 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14645 set_bit(sched_state, &bp->sp_state);
14648 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14655 struct bnx2x *bp = netdev_priv(dev);
14663 bnx2x_ilt_wr(bp, index, addr);
14670 bnx2x_cnic_sp_post(bp, count);
14676 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14680 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14682 cp->iscsi_l2_cid, BP_FUNC(bp),
14683 bnx2x_sp(bp, mac_rdata),
14684 bnx2x_sp_mapping(bp, mac_rdata),
14686 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14687 &bp->macs_pool);
14690 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14699 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14706 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14718 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14725 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14731 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14739 atomic_add(count, &bp->cq_spq_left);
14746 if (CHIP_IS_E3(bp)) {
14747 int idx = BP_FW_MB_IDX(bp);
14748 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14749 int path = BP_PATH(bp);
14750 int port = BP_PORT(bp);
14760 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14763 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14764 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14768 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14777 REG_WR(bp, scratch_offset + i,
14780 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14787 if (CHIP_IS_E3(bp)) {
14788 int idx = BP_FW_MB_IDX(bp);
14791 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14796 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14798 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14808 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14813 bnx2x_set_os_driver_state(bp,
14817 bnx2x_set_os_driver_state(bp,
14821 bnx2x_set_os_driver_state(bp,
14835 struct bnx2x *bp = netdev_priv(dev);
14841 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14852 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14860 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14898 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14900 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14902 if (bp->flags & USING_MSIX_FLAG) {
14905 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14910 if (!CHIP_IS_E1x(bp))
14911 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14913 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14915 cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
14916 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14917 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14918 cp->irq_arr[1].status_blk = bp->def_status_blk;
14919 cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
14926 void bnx2x_setup_cnic_info(struct bnx2x *bp)
14928 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14930 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14931 bnx2x_cid_ilt_lines(bp);
14932 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14933 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14934 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14936 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14937 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14940 if (NO_ISCSI_OOO(bp))
14947 struct bnx2x *bp = netdev_priv(dev);
14948 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14958 if (!CNIC_SUPPORT(bp)) {
14963 if (!CNIC_LOADED(bp)) {
14964 rc = bnx2x_load_cnic(bp);
14971 bp->cnic_enabled = true;
14973 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14974 if (!bp->cnic_kwq)
14977 bp->cnic_kwq_cons = bp->cnic_kwq;
14978 bp->cnic_kwq_prod = bp->cnic_kwq;
14979 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14981 bp->cnic_spq_pending = 0;
14982 bp->cnic_kwq_pending = 0;
14984 bp->cnic_data = data;
14988 cp->iro_arr = bp->iro_arr;
14990 bnx2x_setup_cnic_irq_info(bp);
14992 rcu_assign_pointer(bp->cnic_ops, ops);
14995 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15002 struct bnx2x *bp = netdev_priv(dev);
15003 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15005 mutex_lock(&bp->cnic_mutex);
15007 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15008 mutex_unlock(&bp->cnic_mutex);
15010 bp->cnic_enabled = false;
15011 kfree(bp->cnic_kwq);
15012 bp->cnic_kwq = NULL;
15019 struct bnx2x *bp = netdev_priv(dev);
15020 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15026 if (NO_ISCSI(bp) && NO_FCOE(bp))
15030 cp->chip_id = CHIP_ID(bp);
15031 cp->pdev = bp->pdev;
15032 cp->io_base = bp->regview;
15033 cp->io_base2 = bp->doorbells;
15036 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15037 bnx2x_cid_ilt_lines(bp);
15039 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15045 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15047 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15048 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15050 if (NO_ISCSI_OOO(bp))
15053 if (NO_ISCSI(bp))
15056 if (NO_FCOE(bp))
15070 struct bnx2x *bp = fp->bp;
15073 if (IS_VF(bp))
15074 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15075 else if (!CHIP_IS_E1x(bp))
15078 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15088 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15092 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15096 pretend_reg = bnx2x_get_pretend_reg(bp);
15097 REG_WR(bp, pretend_reg, pretend_func_val);
15098 REG_RD(bp, pretend_reg);
15104 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15105 int port = BP_PORT(bp);
15117 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15128 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15131 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15134 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15136 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15140 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15148 bp->eth_stats.ptp_skip_tx_ts++;
15151 dev_kfree_skb_any(bp->ptp_tx_skb);
15152 bp->ptp_tx_skb = NULL;
15155 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15157 int port = BP_PORT(bp);
15160 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15163 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15167 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15170 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15181 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15182 int port = BP_PORT(bp);
15186 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15196 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15198 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15199 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15200 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15201 bp->cyclecounter.shift = 0;
15202 bp->cyclecounter.mult = 1;
15205 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15215 func_params.f_obj = &bp->func_obj;
15222 return bnx2x_func_state_change(bp, &func_params);
15225 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15240 for_each_eth_queue(bp, i) {
15241 struct bnx2x_fastpath *fp = &bp->fp[i];
15244 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15247 rc = bnx2x_queue_state_change(bp, &q_params);
15270 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15272 int port = BP_PORT(bp);
15276 if (!bp->hwtstamp_ioctl_called)
15283 switch (bp->tx_type) {
15285 bp->flags |= TX_TIMESTAMPING_EN;
15286 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15287 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15299 switch (bp->rx_filter) {
15305 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15310 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15312 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15313 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15318 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15320 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15321 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15326 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15328 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15329 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15335 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15337 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15338 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15343 rc = bnx2x_enable_ptp_packets(bp);
15348 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15354 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15367 bp->hwtstamp_ioctl_called = true;
15368 bp->tx_type = config.tx_type;
15369 bp->rx_filter = config.rx_filter;
15371 rc = bnx2x_configure_ptp_filters(bp);
15375 config.rx_filter = bp->rx_filter;
15382 static int bnx2x_configure_ptp(struct bnx2x *bp)
15384 int rc, port = BP_PORT(bp);
15388 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15390 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15392 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15394 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15398 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15402 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15408 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15411 rc = bnx2x_send_reset_timesync_ramrod(bp);
15418 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15420 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15427 void bnx2x_init_ptp(struct bnx2x *bp)
15432 rc = bnx2x_configure_ptp(bp);
15439 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15445 if (!bp->timecounter_init_done) {
15446 bnx2x_init_cyclecounter(bp);
15447 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15449 bp->timecounter_init_done = true;