Lines Matching defs:hdev

48 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
49 static int hclge_init_vlan_config(struct hclge_dev *hdev);
50 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
53 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
54 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
57 static int hclge_set_default_loopback(struct hclge_dev *hdev);
59 static void hclge_sync_mac_table(struct hclge_dev *hdev);
60 static void hclge_restore_hw_table(struct hclge_dev *hdev);
61 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
62 static void hclge_sync_fd_table(struct hclge_dev *hdev);
63 static void hclge_update_fec_stats(struct hclge_dev *hdev);
64 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
66 static int hclge_update_port_info(struct hclge_dev *hdev);
438 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
442 u64 *data = (u64 *)(&hdev->mac_stats);
450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
452 dev_err(&hdev->pdev->dev,
474 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
478 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
479 u64 *data = (u64 *)(&hdev->mac_stats);
498 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
504 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
521 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
531 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 dev_err(&hdev->pdev->dev,
547 dev_err(&hdev->pdev->dev,
555 int hclge_mac_update_stats(struct hclge_dev *hdev)
558 if (hdev->ae_dev->dev_specs.mac_stats_num)
559 return hclge_mac_update_stats_complete(hdev);
561 return hclge_mac_update_stats_defective(hdev);
564 static int hclge_comm_get_count(struct hclge_dev *hdev,
572 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
578 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
586 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
589 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
596 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
607 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
617 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
622 handle = &hdev->vport[0].nic;
624 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
626 dev_err(&hdev->pdev->dev,
632 hclge_update_fec_stats(hdev);
634 status = hclge_mac_update_stats(hdev);
636 dev_err(&hdev->pdev->dev,
643 struct hclge_dev *hdev = vport->back;
646 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
649 status = hclge_mac_update_stats(hdev);
651 dev_err(&hdev->pdev->dev,
655 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
657 dev_err(&hdev->pdev->dev,
661 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
673 struct hclge_dev *hdev = vport->back;
684 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
692 if (hdev->ae_dev->dev_specs.hilink_version !=
703 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
704 hdev->hw.mac.phydev->drv->set_loopback) ||
705 hnae3_dev_phy_imp_supported(hdev)) {
710 count = hclge_comm_get_count(hdev, g_mac_stats_string,
722 struct hclge_dev *hdev = vport->back;
728 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
764 struct hclge_dev *hdev = vport->back;
767 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
776 struct hclge_dev *hdev = vport->back;
780 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
781 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
784 static int hclge_parse_func_status(struct hclge_dev *hdev,
794 hdev->flag |= HCLGE_FLAG_MAIN;
796 hdev->flag &= ~HCLGE_FLAG_MAIN;
798 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
802 static int hclge_query_function_status(struct hclge_dev *hdev)
815 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
817 dev_err(&hdev->pdev->dev,
828 return hclge_parse_func_status(hdev, req);
831 static int hclge_query_pf_resource(struct hclge_dev *hdev)
838 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
840 dev_err(&hdev->pdev->dev,
846 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
848 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
851 hdev->tx_buf_size =
854 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
856 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
859 hdev->dv_buf_size =
862 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
864 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
866 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
867 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
868 dev_err(&hdev->pdev->dev,
870 hdev->num_nic_msi);
874 if (hnae3_dev_roce_supported(hdev)) {
875 hdev->num_roce_msi =
881 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
883 hdev->num_msi = hdev->num_nic_msi;
955 struct hclge_dev *hdev = vport->back;
956 u32 speed_ability = hdev->hw.mac.speed_ability;
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1130 struct hclge_mac *mac = &hdev->hw.mac;
1139 if (hnae3_dev_fec_supported(hdev))
1142 if (hnae3_dev_pause_supported(hdev))
1149 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1152 struct hclge_mac *mac = &hdev->hw.mac;
1155 if (hnae3_dev_fec_supported(hdev))
1158 if (hnae3_dev_pause_supported(hdev))
1165 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1168 unsigned long *supported = hdev->hw.mac.supported;
1190 if (hnae3_dev_pause_supported(hdev)) {
1199 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1201 u8 media_type = hdev->hw.mac.media_type;
1204 hclge_parse_fiber_link_mode(hdev, speed_ability);
1206 hclge_parse_copper_link_mode(hdev, speed_ability);
1208 hclge_parse_backplane_link_mode(hdev, speed_ability);
1335 * @hdev: pointer to struct hclge_dev
1338 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1359 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1361 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1370 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1412 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1414 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1434 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1439 ret = hclge_mac_query_reg_num(hdev, &reg_num);
1443 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1447 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1453 ret = hclge_query_mac_stats_num(hdev);
1460 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461 hclge_set_default_dev_specs(hdev);
1472 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1476 hclge_parse_dev_specs(hdev, desc);
1477 hclge_check_dev_specs(hdev);
1482 static int hclge_get_cap(struct hclge_dev *hdev)
1486 ret = hclge_query_function_status(hdev);
1488 dev_err(&hdev->pdev->dev,
1494 return hclge_query_pf_resource(hdev);
1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1505 dev_info(&hdev->pdev->dev,
1509 hdev->num_tqps = hdev->num_req_vfs + 1;
1510 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1514 static void hclge_init_tc_config(struct hclge_dev *hdev)
1518 if (hdev->tc_max > HNAE3_MAX_TC ||
1519 hdev->tc_max < 1) {
1520 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1521 hdev->tc_max);
1522 hdev->tc_max = 1;
1526 if (!hnae3_dev_dcb_supported(hdev)) {
1527 hdev->tc_max = 1;
1528 hdev->pfc_max = 0;
1530 hdev->pfc_max = hdev->tc_max;
1533 hdev->tm_info.num_tc = 1;
1536 for (i = 0; i < hdev->tm_info.num_tc; i++)
1537 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1539 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1542 static int hclge_configure(struct hclge_dev *hdev)
1544 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1548 ret = hclge_get_cfg(hdev, &cfg);
1552 hdev->base_tqp_pid = 0;
1553 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1554 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1555 hdev->rx_buf_len = cfg.rx_buf_len;
1556 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1557 hdev->hw.mac.media_type = cfg.media_type;
1558 hdev->hw.mac.phy_addr = cfg.phy_addr;
1559 hdev->num_tx_desc = cfg.tqp_desc_num;
1560 hdev->num_rx_desc = cfg.tqp_desc_num;
1561 hdev->tm_info.num_pg = 1;
1562 hdev->tc_max = cfg.tc_num;
1563 hdev->tm_info.hw_pfc_map = 0;
1565 hdev->wanted_umv_size = cfg.umv_space;
1567 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1568 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1569 hdev->gro_en = true;
1573 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1574 hdev->fd_en = true;
1575 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1578 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1580 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1584 hdev->hw.mac.req_speed = hdev->hw.mac.speed;
1585 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
1586 hdev->hw.mac.req_duplex = DUPLEX_FULL;
1588 hclge_parse_link_mode(hdev, cfg.speed_ability);
1590 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1592 hclge_init_tc_config(hdev);
1593 hclge_init_kdump_kernel_config(hdev);
1598 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 return hclge_cmd_send(&hdev->hw, &desc, 1);
1613 static int hclge_config_gro(struct hclge_dev *hdev)
1619 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1625 req->gro_en = hdev->gro_en ? 1 : 0;
1627 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629 dev_err(&hdev->pdev->dev,
1635 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1637 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1641 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1643 if (!hdev->htqp)
1646 tqp = hdev->htqp;
1648 for (i = 0; i < hdev->num_tqps; i++) {
1649 tqp->dev = &hdev->pdev->dev;
1653 tqp->q.buf_size = hdev->rx_buf_len;
1654 tqp->q.tx_desc_num = hdev->num_tx_desc;
1655 tqp->q.rx_desc_num = hdev->num_rx_desc;
1661 tqp->q.io_base = hdev->hw.hw.io_base +
1665 tqp->q.io_base = hdev->hw.hw.io_base +
1676 tqp->q.mem_base = hdev->hw.hw.mem_base +
1677 HCLGE_TQP_MEM_OFFSET(hdev, i);
1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1712 struct hclge_dev *hdev = vport->back;
1715 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1717 if (!hdev->htqp[i].alloced) {
1718 hdev->htqp[i].q.handle = &vport->nic;
1719 hdev->htqp[i].q.tqp_index = alloced;
1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723 hdev->htqp[i].alloced = true;
1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729 vport->alloc_tqps / hdev->tm_info.num_tc);
1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1744 struct hclge_dev *hdev = vport->back;
1750 kinfo->rx_buf_len = hdev->rx_buf_len;
1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1789 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 struct hclge_vport *vport = hdev->vport;
1794 num_vport = hdev->num_req_vfs + 1;
1798 ret = hclge_map_tqp_to_vport(hdev, vport);
1811 struct hclge_dev *hdev = vport->back;
1814 nic->pdev = hdev->pdev;
1816 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
1818 nic->kinfo.io_base = hdev->hw.hw.io_base;
1821 hdev->num_tx_desc, hdev->num_rx_desc);
1823 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1828 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 struct pci_dev *pdev = hdev->pdev;
1838 num_vport = hdev->num_req_vfs + 1;
1840 if (hdev->num_tqps < num_vport) {
1841 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1842 hdev->num_tqps, num_vport);
1847 tqp_per_vport = hdev->num_tqps / num_vport;
1848 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1855 hdev->vport = vport;
1856 hdev->num_alloc_vport = num_vport;
1859 hdev->num_alloc_vfs = hdev->num_req_vfs;
1862 vport->back = hdev;
1892 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1916 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1925 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1928 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1939 if (hdev->hw_tc_map & BIT(i))
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1954 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1972 if (hdev->hw_tc_map & BIT(i) &&
1973 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2005 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2010 u32 tc_num = hclge_get_tc_num(hdev);
2015 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2017 if (hnae3_dev_dcb_supported(hdev))
2019 hdev->dv_buf_size;
2022 + hdev->dv_buf_size;
2034 if (hnae3_dev_dcb_supported(hdev)) {
2035 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2045 if (hnae3_dev_dcb_supported(hdev)) {
2046 hi_thrd = shared_buf - hdev->dv_buf_size;
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2076 total_size = hdev->pkt_buf_size;
2082 if (hdev->hw_tc_map & BIT(i)) {
2083 if (total_size < hdev->tx_buf_size)
2086 priv->tx_buf_size = hdev->tx_buf_size;
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2100 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2112 if (!(hdev->hw_tc_map & BIT(i)))
2117 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2127 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2145 if (hdev->hw_tc_map & mask &&
2146 !(hdev->tm_info.hw_pfc_map & mask)) {
2155 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2160 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2166 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2175 if (hdev->hw_tc_map & mask &&
2176 hdev->tm_info.hw_pfc_map & mask) {
2185 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2190 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2200 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201 u32 tc_num = hclge_get_tc_num(hdev);
2202 u32 half_mps = hdev->mps >> 1;
2212 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2227 if (!(hdev->hw_tc_map & BIT(i)))
2232 priv->wl.high = rx_priv - hdev->dv_buf_size;
2242 * @hdev: pointer to struct hclge_dev
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2250 if (!hnae3_dev_dcb_supported(hdev)) {
2251 u32 rx_all = hdev->pkt_buf_size;
2254 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2260 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2263 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2267 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2270 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2273 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2306 dev_err(&hdev->pdev->dev,
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2348 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2350 dev_err(&hdev->pdev->dev,
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2392 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2394 dev_err(&hdev->pdev->dev,
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2418 dev_err(&hdev->pdev->dev,
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2433 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2435 dev_err(&hdev->pdev->dev,
2440 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2442 dev_err(&hdev->pdev->dev,
2447 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2449 dev_err(&hdev->pdev->dev,
2455 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2457 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2462 if (hnae3_dev_dcb_supported(hdev)) {
2463 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2465 dev_err(&hdev->pdev->dev,
2471 ret = hclge_common_thrd_config(hdev, pkt_buf);
2473 dev_err(&hdev->pdev->dev,
2480 ret = hclge_common_wl_config(hdev, pkt_buf);
2482 dev_err(&hdev->pdev->dev,
2494 struct hclge_dev *hdev = vport->back;
2498 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2501 roce->rinfo.base_vector = hdev->num_nic_msi;
2504 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2505 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2515 static int hclge_init_msi(struct hclge_dev *hdev)
2517 struct pci_dev *pdev = hdev->pdev;
2522 hdev->num_msi,
2530 if (vectors < hdev->num_msi)
2531 dev_warn(&hdev->pdev->dev,
2533 hdev->num_msi, vectors);
2535 hdev->num_msi = vectors;
2536 hdev->num_msi_left = vectors;
2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2540 if (!hdev->vector_status) {
2545 for (i = 0; i < hdev->num_msi; i++)
2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2550 if (!hdev->vector_irq) {
2592 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2609 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2621 dev_err(&hdev->pdev->dev,
2629 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2631 struct hclge_mac *mac = &hdev->hw.mac;
2639 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2643 hdev->hw.mac.speed = speed;
2644 hdev->hw.mac.duplex = duplex;
2646 hdev->hw.mac.lane_num = lane_num;
2655 struct hclge_dev *hdev = vport->back;
2657 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2660 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2674 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2676 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2685 struct hclge_dev *hdev = vport->back;
2687 if (!hdev->hw.mac.support_autoneg) {
2689 dev_err(&hdev->pdev->dev,
2697 return hclge_set_autoneg_en(hdev, enable);
2703 struct hclge_dev *hdev = vport->back;
2704 struct phy_device *phydev = hdev->hw.mac.phydev;
2709 return hdev->hw.mac.autoneg;
2715 struct hclge_dev *hdev = vport->back;
2718 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2720 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2723 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2729 struct hclge_dev *hdev = vport->back;
2731 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2732 return hclge_set_autoneg_en(hdev, !halt);
2737 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2754 hdev->fec_stats.per_lanes[i] +=
2760 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2767 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2768 hdev->fec_stats.rs_corr_blocks +=
2770 hdev->fec_stats.rs_uncorr_blocks +=
2772 hdev->fec_stats.rs_error_blocks +=
2774 hdev->fec_stats.base_r_corr_blocks +=
2776 hdev->fec_stats.base_r_uncorr_blocks +=
2779 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2782 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2795 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2799 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2804 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2806 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2810 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2813 ret = hclge_update_fec_stats_hw(hdev);
2815 dev_err(&hdev->pdev->dev,
2818 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2821 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2824 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2826 hdev->fec_stats.rs_uncorr_blocks;
2829 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2834 if (hdev->fec_stats.base_r_lane_num == 0 ||
2835 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2836 dev_err(&hdev->pdev->dev,
2838 hdev->fec_stats.base_r_lane_num);
2842 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2844 hdev->fec_stats.base_r_corr_per_lanes[i];
2846 hdev->fec_stats.base_r_uncorr_per_lanes[i];
2850 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2853 u32 fec_mode = hdev->hw.mac.fec_mode;
2858 hclge_get_fec_stats_total(hdev, fec_stats);
2861 hclge_get_fec_stats_lanes(hdev, fec_stats);
2864 dev_err(&hdev->pdev->dev,
2875 struct hclge_dev *hdev = vport->back;
2876 u32 fec_mode = hdev->hw.mac.fec_mode;
2883 hclge_update_fec_stats(hdev);
2885 hclge_comm_get_fec_stats(hdev, fec_stats);
2888 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2909 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2911 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2919 struct hclge_dev *hdev = vport->back;
2920 struct hclge_mac *mac = &hdev->hw.mac;
2924 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2928 ret = hclge_set_fec_hw(hdev, fec_mode);
2940 struct hclge_dev *hdev = vport->back;
2941 struct hclge_mac *mac = &hdev->hw.mac;
2949 static int hclge_mac_init(struct hclge_dev *hdev)
2951 struct hclge_mac *mac = &hdev->hw.mac;
2954 hdev->support_sfp_query = true;
2956 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2957 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2959 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2960 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
2964 if (hdev->hw.mac.support_autoneg) {
2965 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2973 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2978 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2980 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2984 ret = hclge_set_default_loopback(hdev);
2988 ret = hclge_buffer_alloc(hdev);
2990 dev_err(&hdev->pdev->dev,
2996 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2998 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2999 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
3000 hdev->last_mbx_scheduled = jiffies;
3001 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3005 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
3007 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3008 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
3009 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
3010 hdev->last_rst_scheduled = jiffies;
3011 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3015 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
3017 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3018 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
3019 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3022 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
3024 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3025 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
3026 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
3029 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
3036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3038 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
3050 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
3052 struct phy_device *phydev = hdev->hw.mac.phydev;
3056 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
3062 return hclge_get_mac_link_status(hdev, link_status);
3065 static void hclge_push_link_status(struct hclge_dev *hdev)
3071 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3072 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3080 dev_err(&hdev->pdev->dev,
3087 static void hclge_update_link_status(struct hclge_dev *hdev)
3089 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3090 struct hnae3_handle *handle = &hdev->vport[0].nic;
3091 struct hnae3_client *rclient = hdev->roce_client;
3092 struct hnae3_client *client = hdev->nic_client;
3099 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3102 ret = hclge_get_mac_phy_link(hdev, &state);
3104 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3108 if (state != hdev->hw.mac.link) {
3109 hdev->hw.mac.link = state;
3111 hclge_update_port_info(hdev);
3114 hclge_config_mac_tnl_int(hdev, state);
3118 hclge_push_link_status(hdev);
3121 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3166 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3168 struct hclge_mac *mac = &hdev->hw.mac;
3171 switch (hdev->fc_mode_last_time) {
3193 static void hclge_update_advertising(struct hclge_dev *hdev)
3195 struct hclge_mac *mac = &hdev->hw.mac;
3200 hclge_update_pause_advertising(hdev);
3203 static void hclge_update_port_capability(struct hclge_dev *hdev,
3206 if (hnae3_dev_fec_supported(hdev))
3224 hclge_update_advertising(hdev);
3228 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3238 dev_warn(&hdev->pdev->dev,
3242 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3251 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3264 dev_warn(&hdev->pdev->dev,
3268 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3309 struct hclge_dev *hdev = vport->back;
3318 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3320 dev_err(&hdev->pdev->dev,
3359 struct hclge_dev *hdev = vport->back;
3387 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3389 dev_err(&hdev->pdev->dev,
3394 hdev->hw.mac.req_autoneg = cmd->base.autoneg;
3395 hdev->hw.mac.req_speed = cmd->base.speed;
3396 hdev->hw.mac.req_duplex = cmd->base.duplex;
3397 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3402 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3407 if (!hnae3_dev_phy_imp_supported(hdev))
3410 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3414 hdev->hw.mac.autoneg = cmd.base.autoneg;
3415 hdev->hw.mac.speed = cmd.base.speed;
3416 hdev->hw.mac.duplex = cmd.base.duplex;
3417 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3422 static int hclge_tp_port_init(struct hclge_dev *hdev)
3426 if (!hnae3_dev_phy_imp_supported(hdev))
3429 cmd.base.autoneg = hdev->hw.mac.req_autoneg;
3430 cmd.base.speed = hdev->hw.mac.req_speed;
3431 cmd.base.duplex = hdev->hw.mac.req_duplex;
3432 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3434 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3437 static int hclge_update_port_info(struct hclge_dev *hdev)
3439 struct hclge_mac *mac = &hdev->hw.mac;
3445 return hclge_update_tp_port_info(hdev);
3448 if (!hdev->support_sfp_query)
3451 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3453 ret = hclge_get_sfp_info(hdev, mac);
3456 ret = hclge_get_sfp_speed(hdev, &speed);
3460 hdev->support_sfp_query = false;
3466 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3468 hclge_update_port_capability(hdev, mac);
3470 (void)hclge_tm_port_shaper_cfg(hdev);
3473 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3480 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3487 struct hclge_dev *hdev = vport->back;
3489 hclge_update_link_status(hdev);
3491 return hdev->hw.mac.link;
3494 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3496 if (!pci_num_vf(hdev->pdev)) {
3497 dev_err(&hdev->pdev->dev,
3502 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3503 dev_err(&hdev->pdev->dev,
3505 vf, pci_num_vf(hdev->pdev));
3511 return &hdev->vport[vf];
3518 struct hclge_dev *hdev = vport->back;
3520 vport = hclge_get_vf_vport(hdev, vf);
3542 struct hclge_dev *hdev = vport->back;
3546 vport = hclge_get_vf_vport(hdev, vf);
3562 dev_err(&hdev->pdev->dev,
3569 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3574 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3575 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3576 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3588 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3589 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3590 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3592 hdev->rst_stats.imp_rst_cnt++;
3597 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3598 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3599 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3601 hdev->rst_stats.global_rst_cnt++;
3624 dev_info(&hdev->pdev->dev,
3631 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3642 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3645 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3652 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3654 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3658 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3668 struct hclge_dev *hdev = data;
3673 hclge_enable_vector(&hdev->misc_vector, false);
3674 event_cause = hclge_check_event_cause(hdev, &clearval);
3679 hclge_errhand_task_schedule(hdev);
3682 hclge_reset_task_schedule(hdev);
3685 spin_lock_irqsave(&hdev->ptp->lock, flags);
3686 hclge_ptp_clean_tx_hwts(hdev);
3687 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3699 hclge_mbx_task_schedule(hdev);
3702 dev_warn(&hdev->pdev->dev,
3707 hclge_clear_event_cause(hdev, event_cause, clearval);
3713 hclge_enable_vector(&hdev->misc_vector, true);
3718 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3720 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3721 dev_warn(&hdev->pdev->dev,
3726 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3727 hdev->num_msi_left += 1;
3728 hdev->num_msi_used -= 1;
3731 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3733 struct hclge_misc_vector *vector = &hdev->misc_vector;
3735 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3737 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3738 hdev->vector_status[0] = 0;
3740 hdev->num_msi_left -= 1;
3741 hdev->num_msi_used += 1;
3744 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3748 hclge_get_misc_vector(hdev);
3751 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3752 HCLGE_NAME, pci_name(hdev->pdev));
3753 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3754 0, hdev->misc_vector.name, hdev);
3756 hclge_free_vector(hdev, 0);
3757 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3758 hdev->misc_vector.vector_irq);
3764 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3766 free_irq(hdev->misc_vector.vector_irq, hdev);
3767 hclge_free_vector(hdev, 0);
3770 int hclge_notify_client(struct hclge_dev *hdev,
3773 struct hnae3_handle *handle = &hdev->vport[0].nic;
3774 struct hnae3_client *client = hdev->nic_client;
3777 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3785 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3791 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3794 struct hnae3_handle *handle = &hdev->vport[0].roce;
3795 struct hnae3_client *client = hdev->roce_client;
3798 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3806 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3812 static int hclge_reset_wait(struct hclge_dev *hdev)
3820 switch (hdev->reset_type) {
3834 dev_err(&hdev->pdev->dev,
3836 hdev->reset_type);
3840 val = hclge_read_dev(&hdev->hw, reg);
3843 val = hclge_read_dev(&hdev->hw, reg);
3848 dev_warn(&hdev->pdev->dev,
3849 "Wait for reset timeout: %d\n", hdev->reset_type);
3856 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3868 return hclge_cmd_send(&hdev->hw, &desc, 1);
3871 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3875 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3876 struct hclge_vport *vport = &hdev->vport[i];
3880 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3882 dev_err(&hdev->pdev->dev,
3894 hdev->reset_type == HNAE3_FUNC_RESET) {
3906 dev_warn(&hdev->pdev->dev,
3915 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3917 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3918 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3919 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3922 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3924 dev_warn(&hdev->pdev->dev,
3926 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3929 hclge_mbx_handler(hdev);
3931 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3934 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3946 hclge_mailbox_service_task(hdev);
3948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3956 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3966 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3969 void hclge_report_hw_error(struct hclge_dev *hdev,
3972 struct hnae3_client *client = hdev->nic_client;
3975 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3978 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3981 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3985 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3987 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3989 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3993 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3995 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3999 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4011 dev_err(&hdev->pdev->dev,
4017 static void hclge_do_reset(struct hclge_dev *hdev)
4019 struct hnae3_handle *handle = &hdev->vport[0].nic;
4020 struct pci_dev *pdev = hdev->pdev;
4026 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
4027 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
4031 switch (hdev->reset_type) {
4034 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4036 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
4040 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
4042 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4047 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
4048 hclge_reset_task_schedule(hdev);
4052 "unsupported reset type: %d\n", hdev->reset_type);
4061 struct hclge_dev *hdev = ae_dev->priv;
4081 if (hdev->reset_type != HNAE3_NONE_RESET &&
4082 rst_level < hdev->reset_type)
4088 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4092 switch (hdev->reset_type) {
4109 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4110 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4113 hclge_enable_vector(&hdev->misc_vector, true);
4116 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4120 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4126 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4129 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4133 ret = hclge_set_all_vf_rst(hdev, true);
4137 hclge_func_reset_sync_vf(hdev);
4142 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4147 switch (hdev->reset_type) {
4149 ret = hclge_func_reset_notify_vf(hdev);
4153 ret = hclge_func_reset_cmd(hdev, 0);
4155 dev_err(&hdev->pdev->dev,
4165 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4166 hdev->rst_stats.pf_rst_cnt++;
4169 ret = hclge_func_reset_notify_vf(hdev);
4174 hclge_handle_imp_error(hdev);
4175 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4176 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4185 hclge_reset_handshake(hdev, true);
4186 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4191 static void hclge_show_rst_info(struct hclge_dev *hdev)
4199 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4201 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4206 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4210 if (hdev->reset_pending) {
4211 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4212 hdev->reset_pending);
4214 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4216 dev_info(&hdev->pdev->dev,
4218 hclge_clear_reset_cause(hdev);
4220 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4221 hdev->rst_stats.reset_fail_cnt++;
4222 set_bit(hdev->reset_type, &hdev->reset_pending);
4223 dev_info(&hdev->pdev->dev,
4225 hdev->rst_stats.reset_fail_cnt);
4229 hclge_clear_reset_cause(hdev);
4232 hclge_reset_handshake(hdev, true);
4234 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4236 hclge_show_rst_info(hdev);
4238 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4243 static void hclge_update_reset_level(struct hclge_dev *hdev)
4245 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4252 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4259 &hdev->default_reset_request);
4261 set_bit(reset_level, &hdev->reset_request);
4264 static int hclge_set_rst_done(struct hclge_dev *hdev)
4274 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4280 dev_warn(&hdev->pdev->dev,
4285 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4292 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4296 switch (hdev->reset_type) {
4299 ret = hclge_set_all_vf_rst(hdev, false);
4303 ret = hclge_set_rst_done(hdev);
4310 hclge_reset_handshake(hdev, false);
4315 static int hclge_reset_stack(struct hclge_dev *hdev)
4319 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4323 ret = hclge_reset_ae_dev(hdev->ae_dev);
4327 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4330 static int hclge_reset_prepare(struct hclge_dev *hdev)
4334 hdev->rst_stats.reset_cnt++;
4336 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4341 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4346 return hclge_reset_prepare_wait(hdev);
4349 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4353 hdev->rst_stats.hw_reset_done_cnt++;
4355 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4360 ret = hclge_reset_stack(hdev);
4365 hclge_clear_reset_cause(hdev);
4367 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4372 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4375 ret = hclge_reset_prepare_up(hdev);
4380 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4385 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4389 hdev->last_reset_time = jiffies;
4390 hdev->rst_stats.reset_fail_cnt = 0;
4391 hdev->rst_stats.reset_done_cnt++;
4392 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4394 hclge_update_reset_level(hdev);
4399 static void hclge_reset(struct hclge_dev *hdev)
4401 if (hclge_reset_prepare(hdev))
4404 if (hclge_reset_wait(hdev))
4407 if (hclge_reset_rebuild(hdev))
4413 if (hclge_reset_err_handle(hdev))
4414 hclge_reset_task_schedule(hdev);
4420 struct hclge_dev *hdev = ae_dev->priv;
4437 if (time_before(jiffies, (hdev->last_reset_time +
4439 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4443 if (hdev->default_reset_request) {
4444 hdev->reset_level =
4446 &hdev->default_reset_request);
4447 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4448 hdev->reset_level = HNAE3_FUNC_RESET;
4451 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4452 hdev->reset_level);
4455 set_bit(hdev->reset_level, &hdev->reset_request);
4456 hclge_reset_task_schedule(hdev);
4458 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4459 hdev->reset_level++;
4465 struct hclge_dev *hdev = ae_dev->priv;
4467 set_bit(rst_type, &hdev->default_reset_request);
4472 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4477 if (!hdev->default_reset_request)
4480 dev_info(&hdev->pdev->dev,
4482 hclge_reset_event(hdev->pdev, NULL);
4485 static void hclge_reset_subtask(struct hclge_dev *hdev)
4487 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4498 hdev->last_reset_time = jiffies;
4499 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4500 if (hdev->reset_type != HNAE3_NONE_RESET)
4501 hclge_reset(hdev);
4504 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4505 if (hdev->reset_type != HNAE3_NONE_RESET)
4506 hclge_do_reset(hdev);
4508 hdev->reset_type = HNAE3_NONE_RESET;
4511 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4513 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4522 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4523 ae_dev->ops->reset_event(hdev->pdev, NULL);
4526 hclge_enable_vector(&hdev->misc_vector, true);
4529 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4531 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4535 if (hclge_find_error_source(hdev)) {
4537 hclge_handle_mac_tnl(hdev);
4538 hclge_handle_vf_queue_err_ras(hdev);
4541 hclge_handle_err_reset_request(hdev);
4544 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4546 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4547 struct device *dev = &hdev->pdev->dev;
4550 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4553 (hdev, &hdev->default_reset_request))
4560 hclge_handle_err_reset_request(hdev);
4563 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4565 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4568 if (hnae3_dev_ras_imp_supported(hdev))
4569 hclge_handle_err_recovery(hdev);
4571 hclge_misc_err_recovery(hdev);
4574 static void hclge_reset_service_task(struct hclge_dev *hdev)
4576 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4579 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4581 dev_warn(&hdev->pdev->dev,
4583 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4586 down(&hdev->reset_sem);
4587 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4589 hclge_reset_subtask(hdev);
4591 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4592 up(&hdev->reset_sem);
4595 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4603 for (i = 1; i < hdev->num_alloc_vport; i++) {
4604 struct hclge_vport *vport = &hdev->vport[i];
4612 dev_warn(&hdev->pdev->dev,
4619 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4623 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4629 hclge_update_link_status(hdev);
4630 hclge_sync_mac_table(hdev);
4631 hclge_sync_promisc_mode(hdev);
4632 hclge_sync_fd_table(hdev);
4634 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4635 delta = jiffies - hdev->last_serv_processed;
4643 hdev->serv_processed_cnt++;
4644 hclge_update_vport_alive(hdev);
4646 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4647 hdev->last_serv_processed = jiffies;
4651 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4652 hclge_update_stats_for_all(hdev);
4654 hclge_update_port_info(hdev);
4655 hclge_sync_vlan_filter(hdev);
4657 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4658 hclge_rfs_filter_expire(hdev);
4660 hdev->last_serv_processed = jiffies;
4663 hclge_task_schedule(hdev, delta);
4666 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4670 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4671 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4672 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4676 spin_lock_irqsave(&hdev->ptp->lock, flags);
4681 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4682 hclge_ptp_clean_tx_hwts(hdev);
4684 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4689 struct hclge_dev *hdev =
4692 hclge_errhand_service_task(hdev);
4693 hclge_reset_service_task(hdev);
4694 hclge_ptp_service_task(hdev);
4695 hclge_mailbox_service_task(hdev);
4696 hclge_periodic_service_task(hdev);
4702 hclge_errhand_service_task(hdev);
4703 hclge_reset_service_task(hdev);
4704 hclge_mailbox_service_task(hdev);
4718 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4723 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4727 vector_info->io_addr = hdev->hw.hw.io_base +
4731 vector_info->io_addr = hdev->hw.hw.io_base +
4738 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4739 hdev->vector_irq[idx] = vector_info->vector;
4747 struct hclge_dev *hdev = vport->back;
4752 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4753 vector_num = min(hdev->num_msi_left, vector_num);
4756 while (++i < hdev->num_nic_msi) {
4757 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4758 hclge_get_vector_info(hdev, i, vector);
4766 hdev->num_msi_left -= alloc;
4767 hdev->num_msi_used += alloc;
4772 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4776 for (i = 0; i < hdev->num_msi; i++)
4777 if (vector == hdev->vector_irq[i])
4786 struct hclge_dev *hdev = vport->back;
4789 vector_id = hclge_get_vector_index(hdev, vector);
4791 dev_err(&hdev->pdev->dev,
4796 hclge_free_vector(hdev, vector_id);
4821 struct hclge_dev *hdev = vport->back;
4822 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4825 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4827 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4836 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4844 struct hclge_dev *hdev = vport->back;
4847 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4848 &hdev->rss_cfg, nfc);
4850 dev_err(&hdev->pdev->dev,
4880 struct hclge_dev *hdev = vport->back;
4882 return hdev->pf_rss_size_max;
4885 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4887 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4888 struct hclge_vport *vport = hdev->vport;
4902 if (!(hdev->hw_tc_map & BIT(i)))
4911 dev_err(&hdev->pdev->dev,
4925 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4929 int hclge_rss_init_hw(struct hclge_dev *hdev)
4931 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4932 u8 *key = hdev->rss_cfg.rss_hash_key;
4933 u8 hfunc = hdev->rss_cfg.rss_algo;
4936 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4941 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4945 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
4949 return hclge_init_rss_tc_mode(hdev);
4956 struct hclge_dev *hdev = vport->back;
4993 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4995 dev_err(&hdev->pdev->dev,
5019 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5021 dev_err(&hdev->pdev->dev,
5034 struct hclge_dev *hdev = vport->back;
5037 vector_id = hclge_get_vector_index(hdev, vector);
5039 dev_err(&hdev->pdev->dev,
5051 struct hclge_dev *hdev = vport->back;
5054 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5057 vector_id = hclge_get_vector_index(hdev, vector);
5073 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5076 struct hclge_vport *vport = &hdev->vport[vf_id];
5109 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5111 dev_err(&hdev->pdev->dev,
5129 struct hclge_dev *hdev = vport->back;
5136 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5150 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5152 if (hlist_empty(&hdev->fd_rule_list))
5153 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5156 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5158 if (!test_bit(location, hdev->fd_bmap)) {
5159 set_bit(location, hdev->fd_bmap);
5160 hdev->hclge_fd_rule_num++;
5164 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5166 if (test_bit(location, hdev->fd_bmap)) {
5167 clear_bit(location, hdev->fd_bmap);
5168 hdev->hclge_fd_rule_num--;
5172 static void hclge_fd_free_node(struct hclge_dev *hdev,
5177 hclge_sync_fd_state(hdev);
5180 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5203 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5204 hclge_fd_free_node(hdev, old_rule);
5219 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5220 hclge_fd_free_node(hdev, old_rule);
5262 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5293 dev_err(&hdev->pdev->dev,
5298 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5302 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5306 spin_lock_bh(&hdev->fd_rule_lock);
5308 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5310 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5313 spin_unlock_bh(&hdev->fd_rule_lock);
5316 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5319 struct hlist_head *hlist = &hdev->fd_rule_list;
5329 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5346 dev_err(&hdev->pdev->dev,
5352 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5361 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5364 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5369 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5378 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5385 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5389 static void hclge_update_fd_list(struct hclge_dev *hdev,
5393 struct hlist_head *hlist = &hdev->fd_rule_list;
5398 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5400 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5401 hclge_sync_fd_user_def_cfg(hdev, true);
5403 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5411 dev_warn(&hdev->pdev->dev,
5417 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5418 hclge_sync_fd_user_def_cfg(hdev, true);
5421 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5424 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5425 hclge_task_schedule(hdev, 0);
5429 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5439 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5441 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5450 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5466 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5479 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5490 stage = &hdev->fd_cfg.key_cfg[stage_num];
5500 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5502 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5507 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5509 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5511 spin_lock_bh(&hdev->fd_rule_lock);
5512 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5513 spin_unlock_bh(&hdev->fd_rule_lock);
5515 hclge_fd_set_user_def_cmd(hdev, cfg);
5518 static int hclge_init_fd_config(struct hclge_dev *hdev)
5524 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5527 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5531 switch (hdev->fd_cfg.fd_mode) {
5533 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5536 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5539 dev_err(&hdev->pdev->dev,
5541 hdev->fd_cfg.fd_mode);
5545 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5558 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5561 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5570 ret = hclge_get_fd_allocation(hdev,
5571 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5572 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5573 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5574 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5578 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5581 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5614 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5616 dev_err(&hdev->pdev->dev,
5623 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5626 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5662 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5664 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5792 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5795 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5823 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5831 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5834 dev_err(&hdev->pdev->dev,
5840 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5843 dev_err(&hdev->pdev->dev,
5849 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5852 struct hclge_vport *vport = hdev->vport;
5872 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5875 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5887 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6022 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6028 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6037 dev_err(&hdev->pdev->dev,
6047 if (hdev->fd_cfg.fd_mode !=
6049 dev_err(&hdev->pdev->dev,
6095 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6100 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6120 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6125 dev_err(&hdev->pdev->dev,
6132 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6138 dev_err(&hdev->pdev->dev,
6151 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6159 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6160 dev_err(&hdev->pdev->dev,
6163 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6167 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6194 if (hdev->fd_cfg.fd_mode !=
6196 dev_err(&hdev->pdev->dev,
6205 dev_err(&hdev->pdev->dev,
6212 dev_err(&hdev->pdev->dev,
6218 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6413 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6418 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6422 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6425 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6430 spin_lock_bh(&hdev->fd_rule_lock);
6432 if (hdev->fd_active_type != rule->rule_type &&
6433 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6434 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6435 dev_err(&hdev->pdev->dev,
6437 rule->rule_type, hdev->fd_active_type);
6438 spin_unlock_bh(&hdev->fd_rule_lock);
6442 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6446 ret = hclge_clear_arfs_rules(hdev);
6450 ret = hclge_fd_config_rule(hdev, rule);
6455 hdev->fd_active_type = rule->rule_type;
6456 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6459 spin_unlock_bh(&hdev->fd_rule_lock);
6466 struct hclge_dev *hdev = vport->back;
6468 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6471 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6474 struct hclge_vport *vport = hdev->vport;
6486 if (vf > hdev->num_req_vfs) {
6487 dev_err(&hdev->pdev->dev,
6489 vf - 1U, hdev->num_req_vfs);
6493 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6494 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6497 dev_err(&hdev->pdev->dev,
6514 struct hclge_dev *hdev = vport->back;
6523 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6524 dev_err(&hdev->pdev->dev,
6529 if (!hdev->fd_en) {
6530 dev_err(&hdev->pdev->dev,
6537 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6541 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6564 ret = hclge_add_fd_entry_common(hdev, rule);
6575 struct hclge_dev *hdev = vport->back;
6579 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6584 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6587 spin_lock_bh(&hdev->fd_rule_lock);
6588 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6589 !test_bit(fs->location, hdev->fd_bmap)) {
6590 dev_err(&hdev->pdev->dev,
6592 spin_unlock_bh(&hdev->fd_rule_lock);
6596 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6601 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6604 spin_unlock_bh(&hdev->fd_rule_lock);
6608 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6615 spin_lock_bh(&hdev->fd_rule_lock);
6617 for_each_set_bit(location, hdev->fd_bmap,
6618 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6619 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6623 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6628 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6629 hdev->hclge_fd_rule_num = 0;
6630 bitmap_zero(hdev->fd_bmap,
6631 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6634 spin_unlock_bh(&hdev->fd_rule_lock);
6637 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6639 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6642 hclge_clear_fd_rules_in_list(hdev, true);
6643 hclge_fd_disable_user_def(hdev);
6649 struct hclge_dev *hdev = vport->back;
6657 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6661 if (!hdev->fd_en)
6664 spin_lock_bh(&hdev->fd_rule_lock);
6665 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6669 spin_unlock_bh(&hdev->fd_rule_lock);
6670 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6679 struct hclge_dev *hdev = vport->back;
6681 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6684 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6685 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6860 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6866 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6896 struct hclge_dev *hdev = vport->back;
6899 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6904 spin_lock_bh(&hdev->fd_rule_lock);
6906 rule = hclge_get_fd_rule(hdev, fs->location);
6908 spin_unlock_bh(&hdev->fd_rule_lock);
6948 spin_unlock_bh(&hdev->fd_rule_lock);
6957 struct hclge_dev *hdev = vport->back;
6962 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6965 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6967 spin_lock_bh(&hdev->fd_rule_lock);
6969 &hdev->fd_rule_list, rule_node) {
6971 spin_unlock_bh(&hdev->fd_rule_lock);
6982 spin_unlock_bh(&hdev->fd_rule_lock);
7014 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7020 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7058 struct hclge_dev *hdev = vport->back;
7062 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7068 spin_lock_bh(&hdev->fd_rule_lock);
7069 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7070 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7071 spin_unlock_bh(&hdev->fd_rule_lock);
7082 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7084 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7085 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7086 spin_unlock_bh(&hdev->fd_rule_lock);
7092 spin_unlock_bh(&hdev->fd_rule_lock);
7100 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7101 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7105 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7106 hclge_task_schedule(hdev, 0);
7108 spin_unlock_bh(&hdev->fd_rule_lock);
7112 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7115 struct hnae3_handle *handle = &hdev->vport[0].nic;
7119 spin_lock_bh(&hdev->fd_rule_lock);
7120 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7121 spin_unlock_bh(&hdev->fd_rule_lock);
7124 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7130 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7133 spin_unlock_bh(&hdev->fd_rule_lock);
7138 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7145 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7148 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7152 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7158 hclge_fd_dec_rule_cnt(hdev, rule->location);
7166 hclge_sync_fd_state(hdev);
7294 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7311 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
7329 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7334 if (tc < 0 || tc > hdev->tc_max) {
7335 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7340 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7341 dev_err(&hdev->pdev->dev,
7343 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7347 if (test_bit(prio - 1, hdev->fd_bmap)) {
7348 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7359 struct hclge_dev *hdev = vport->back;
7363 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7364 dev_err(&hdev->pdev->dev,
7369 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7371 dev_err(&hdev->pdev->dev,
7380 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7393 ret = hclge_add_fd_entry_common(hdev, rule);
7400 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7406 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7418 struct hclge_dev *hdev = vport->back;
7422 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7425 spin_lock_bh(&hdev->fd_rule_lock);
7427 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7429 spin_unlock_bh(&hdev->fd_rule_lock);
7433 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7440 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7441 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7442 spin_unlock_bh(&hdev->fd_rule_lock);
7446 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7447 spin_unlock_bh(&hdev->fd_rule_lock);
7452 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7458 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7461 spin_lock_bh(&hdev->fd_rule_lock);
7466 ret = hclge_fd_config_rule(hdev, rule);
7472 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7476 hclge_fd_dec_rule_cnt(hdev, rule->location);
7477 hclge_fd_free_node(hdev, rule);
7486 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7488 spin_unlock_bh(&hdev->fd_rule_lock);
7491 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7493 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7496 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7497 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7499 hclge_clear_fd_rules_in_list(hdev, clear_list);
7502 hclge_sync_fd_user_def_cfg(hdev, false);
7504 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7510 struct hclge_dev *hdev = vport->back;
7512 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7513 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7519 struct hclge_dev *hdev = vport->back;
7521 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7527 struct hclge_dev *hdev = vport->back;
7529 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7535 struct hclge_dev *hdev = vport->back;
7537 return hdev->rst_stats.hw_reset_done_cnt;
7543 struct hclge_dev *hdev = vport->back;
7545 hdev->fd_en = enable;
7548 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7552 hclge_task_schedule(hdev, 0);
7555 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7584 dev_err(&hdev->pdev->dev,
7590 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
7594 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7611 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7613 dev_err(&hdev->pdev->dev,
7623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7625 dev_err(&hdev->pdev->dev,
7630 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7635 struct phy_device *phydev = hdev->hw.mac.phydev;
7642 dev_err(&hdev->pdev->dev,
7654 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
7662 ret = hclge_get_mac_link_status(hdev, &link_status);
7673 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7683 hclge_phy_link_status_wait(hdev, link_ret);
7685 return hclge_mac_link_status_wait(hdev, link_ret,
7689 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7701 dev_err(&hdev->pdev->dev,
7716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7718 dev_err(&hdev->pdev->dev,
7723 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7745 dev_err(&hdev->pdev->dev,
7754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7756 dev_err(&hdev->pdev->dev,
7763 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7779 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7781 dev_err(&hdev->pdev->dev,
7790 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7793 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7800 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7805 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7809 return hclge_cfg_common_loopback_wait(hdev);
7812 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7817 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7821 hclge_cfg_mac_mode(hdev, en);
7823 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7825 dev_err(&hdev->pdev->dev,
7831 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7849 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7861 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7863 struct phy_device *phydev = hdev->hw.mac.phydev;
7867 if (hnae3_dev_phy_imp_supported(hdev))
7868 return hclge_set_common_loopback(hdev, en,
7874 ret = hclge_enable_phy_loopback(hdev, phydev);
7876 ret = hclge_disable_phy_loopback(hdev, phydev);
7878 dev_err(&hdev->pdev->dev,
7883 hclge_cfg_mac_mode(hdev, en);
7885 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7887 dev_err(&hdev->pdev->dev,
7893 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7906 return hclge_cmd_send(&hdev->hw, &desc, 1);
7912 struct hclge_dev *hdev = vport->back;
7917 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7928 struct hclge_dev *hdev = vport->back;
7936 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7939 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7947 ret = hclge_set_app_loopback(hdev, en);
7951 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7954 ret = hclge_set_phy_loopback(hdev, en);
7960 dev_err(&hdev->pdev->dev,
7970 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7976 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7980 ret = hclge_set_app_loopback(hdev, false);
7984 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7988 return hclge_cfg_common_loopback(hdev, false,
7992 static void hclge_flush_link_update(struct hclge_dev *hdev)
7996 unsigned long last = hdev->serv_processed_cnt;
7999 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8001 last == hdev->serv_processed_cnt)
8008 struct hclge_dev *hdev = vport->back;
8011 hclge_task_schedule(hdev, 0);
8014 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8017 hclge_flush_link_update(hdev);
8024 struct hclge_dev *hdev = vport->back;
8027 hclge_cfg_mac_mode(hdev, true);
8028 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8029 hdev->hw.mac.link = 0;
8034 hclge_mac_start_phy(hdev);
8042 struct hclge_dev *hdev = vport->back;
8044 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8045 spin_lock_bh(&hdev->fd_rule_lock);
8046 hclge_clear_arfs_rules(hdev);
8047 spin_unlock_bh(&hdev->fd_rule_lock);
8052 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
8053 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
8055 if (hdev->reset_type != HNAE3_FUNC_RESET &&
8056 hdev->reset_type != HNAE3_FLR_RESET) {
8057 hclge_mac_stop_phy(hdev);
8058 hclge_update_link_status(hdev);
8065 hclge_config_mac_tnl_int(hdev, false);
8068 hclge_cfg_mac_mode(hdev, false);
8070 hclge_mac_stop_phy(hdev);
8074 hclge_update_link_status(hdev);
8079 struct hclge_dev *hdev = vport->back;
8087 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8092 hclge_restore_hw_table(hdev);
8096 clear_bit(vport->vport_id, hdev->vport_config_block);
8126 struct hclge_dev *hdev = vport->back;
8129 dev_err(&hdev->pdev->dev,
8142 dev_err(&hdev->pdev->dev,
8150 dev_dbg(&hdev->pdev->dev,
8155 dev_err(&hdev->pdev->dev,
8163 dev_dbg(&hdev->pdev->dev,
8168 dev_err(&hdev->pdev->dev,
8174 dev_err(&hdev->pdev->dev,
8244 struct hclge_dev *hdev = vport->back;
8254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8256 dev_err(&hdev->pdev->dev,
8273 struct hclge_dev *hdev = vport->back;
8291 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8296 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8299 dev_err(&hdev->pdev->dev,
8315 struct hclge_dev *hdev = vport->back;
8329 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8345 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8355 dev_err(&hdev->pdev->dev,
8364 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8376 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8378 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8388 static int hclge_init_umv_space(struct hclge_dev *hdev)
8393 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8397 if (allocated_size < hdev->wanted_umv_size)
8398 dev_warn(&hdev->pdev->dev,
8400 hdev->wanted_umv_size, allocated_size);
8402 hdev->max_umv_size = allocated_size;
8403 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8404 hdev->share_umv_size = hdev->priv_umv_size +
8405 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8407 if (hdev->ae_dev->dev_specs.mc_mac_size)
8408 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8413 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8418 for (i = 0; i < hdev->num_alloc_vport; i++) {
8419 vport = &hdev->vport[i];
8423 mutex_lock(&hdev->vport_lock);
8424 hdev->share_umv_size = hdev->priv_umv_size +
8425 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8426 mutex_unlock(&hdev->vport_lock);
8428 hdev->used_mc_mac_num = 0;
8433 struct hclge_dev *hdev = vport->back;
8437 mutex_lock(&hdev->vport_lock);
8439 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8440 hdev->share_umv_size == 0);
8443 mutex_unlock(&hdev->vport_lock);
8450 struct hclge_dev *hdev = vport->back;
8453 if (vport->used_umv_num > hdev->priv_umv_size)
8454 hdev->share_umv_size++;
8459 if (vport->used_umv_num >= hdev->priv_umv_size &&
8460 hdev->share_umv_size > 0)
8461 hdev->share_umv_size--;
8513 struct hclge_dev *hdev = vport->back;
8538 dev_err(&hdev->pdev->dev,
8574 struct hclge_dev *hdev = vport->back;
8585 dev_err(&hdev->pdev->dev,
8608 mutex_lock(&hdev->vport_lock);
8613 mutex_unlock(&hdev->vport_lock);
8616 mutex_unlock(&hdev->vport_lock);
8619 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8620 hdev->priv_umv_size);
8645 struct hclge_dev *hdev = vport->back;
8654 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8664 mutex_lock(&hdev->vport_lock);
8666 mutex_unlock(&hdev->vport_lock);
8686 struct hclge_dev *hdev = vport->back;
8695 dev_err(&hdev->pdev->dev,
8704 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8705 hdev->used_mc_mac_num >=
8706 hdev->ae_dev->dev_specs.mc_mac_size)
8723 hdev->used_mc_mac_num++;
8731 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8750 struct hclge_dev *hdev = vport->back;
8758 dev_dbg(&hdev->pdev->dev,
8777 hdev->used_mc_mac_num--;
8986 struct hclge_dev *hdev = vport->back;
8988 if (test_bit(vport->vport_id, hdev->vport_config_block))
8997 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9001 for (i = 0; i < hdev->num_alloc_vport; i++) {
9002 struct hclge_vport *vport = &hdev->vport[i];
9067 struct hclge_dev *hdev = vport->back;
9081 set_bit(vport->vport_id, hdev->vport_config_block);
9103 struct hclge_dev *hdev = vport->back;
9131 dev_warn(&hdev->pdev->dev,
9142 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9147 for (i = 0; i < hdev->num_alloc_vport; i++) {
9148 vport = &hdev->vport[i];
9154 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9165 dev_err(&hdev->pdev->dev,
9177 dev_err(&hdev->pdev->dev,
9182 dev_err(&hdev->pdev->dev,
9187 dev_err(&hdev->pdev->dev,
9201 struct hclge_dev *hdev = vport->back;
9203 vport = hclge_get_vf_vport(hdev, vf);
9209 dev_info(&hdev->pdev->dev,
9222 dev_info(&hdev->pdev->dev,
9229 dev_info(&hdev->pdev->dev,
9235 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9248 dev_err(&hdev->pdev->dev,
9257 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9260 static int init_mgr_tbl(struct hclge_dev *hdev)
9266 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9268 dev_err(&hdev->pdev->dev,
9281 struct hclge_dev *hdev = vport->back;
9283 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9336 struct hclge_dev *hdev = vport->back;
9345 dev_err(&hdev->pdev->dev,
9351 ret = hclge_pause_addr_cfg(hdev, new_addr);
9353 dev_err(&hdev->pdev->dev,
9360 old_addr = hdev->hw.mac.mac_addr;
9366 dev_err(&hdev->pdev->dev,
9372 hclge_pause_addr_cfg(hdev, old_addr);
9379 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9382 hclge_task_schedule(hdev, 0);
9387 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9391 if (!hnae3_dev_phy_imp_supported(hdev))
9396 data->phy_id = hdev->hw.mac.phy_addr;
9400 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9404 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9414 struct hclge_dev *hdev = vport->back;
9418 return hclge_ptp_get_cfg(hdev, ifr);
9420 return hclge_ptp_set_cfg(hdev, ifr);
9422 if (!hdev->hw.mac.phydev)
9423 return hclge_mii_ioctl(hdev, ifr, cmd);
9426 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9429 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9442 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9444 dev_err(&hdev->pdev->dev,
9451 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9466 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9478 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9486 struct hclge_dev *hdev = vport->back;
9487 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9490 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9491 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9495 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9502 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9508 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9520 struct hclge_dev *hdev = vport->back;
9537 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9549 struct hclge_dev *hdev = vport->back;
9553 mutex_lock(&hdev->vport_lock);
9559 mutex_unlock(&hdev->vport_lock);
9565 mutex_unlock(&hdev->vport_lock);
9571 mutex_unlock(&hdev->vport_lock);
9583 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9614 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9616 dev_err(&hdev->pdev->dev,
9625 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9638 set_bit(vfid, hdev->vf_vlan_full);
9639 dev_warn(&hdev->pdev->dev,
9644 dev_err(&hdev->pdev->dev,
9660 dev_err(&hdev->pdev->dev,
9668 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9671 struct hclge_vport *vport = &hdev->vport[vfid];
9680 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9682 dev_err(&hdev->pdev->dev,
9689 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9693 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9696 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9720 dev_err(&hdev->pdev->dev,
9725 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9730 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9733 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9734 dev_warn(&hdev->pdev->dev,
9741 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9742 dev_warn(&hdev->pdev->dev,
9751 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9764 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9766 dev_err(&hdev->pdev->dev,
9772 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9775 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9779 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9789 struct hclge_dev *hdev = vport->back;
9821 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9823 dev_err(&hdev->pdev->dev,
9834 struct hclge_dev *hdev = vport->back;
9861 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9863 dev_err(&hdev->pdev->dev,
9925 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9935 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9937 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9939 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9941 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9943 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9945 dev_err(&hdev->pdev->dev,
9954 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9955 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9957 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9959 dev_err(&hdev->pdev->dev,
9966 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9973 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9974 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9979 for (i = 0; i < hdev->num_alloc_vport; i++) {
9980 vport = &hdev->vport[i];
9981 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9989 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
9990 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
9993 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9997 static int hclge_init_vlan_type(struct hclge_dev *hdev)
9999 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
10000 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
10001 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
10002 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
10003 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
10004 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
10006 return hclge_set_vlan_protocol_type(hdev);
10009 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
10016 for (i = 0; i < hdev->num_alloc_vport; i++) {
10017 vport = &hdev->vport[i];
10029 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10031 struct hnae3_handle *handle = &hdev->vport[0].nic;
10034 ret = hclge_init_vlan_filter(hdev);
10038 ret = hclge_init_vlan_type(hdev);
10042 ret = hclge_init_vport_vlan_offload(hdev);
10053 struct hclge_dev *hdev = vport->back;
10055 mutex_lock(&hdev->vport_lock);
10059 mutex_unlock(&hdev->vport_lock);
10066 mutex_unlock(&hdev->vport_lock);
10074 mutex_unlock(&hdev->vport_lock);
10080 struct hclge_dev *hdev = vport->back;
10083 mutex_lock(&hdev->vport_lock);
10087 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10091 dev_err(&hdev->pdev->dev,
10095 mutex_unlock(&hdev->vport_lock);
10102 mutex_unlock(&hdev->vport_lock);
10111 struct hclge_dev *hdev = vport->back;
10116 hclge_set_vlan_filter_hw(hdev,
10132 struct hclge_dev *hdev = vport->back;
10134 mutex_lock(&hdev->vport_lock);
10138 hclge_set_vlan_filter_hw(hdev,
10150 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10151 mutex_unlock(&hdev->vport_lock);
10154 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10160 mutex_lock(&hdev->vport_lock);
10162 for (i = 0; i < hdev->num_alloc_vport; i++) {
10163 vport = &hdev->vport[i];
10170 mutex_unlock(&hdev->vport_lock);
10173 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10184 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10185 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10195 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10196 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10207 struct hclge_dev *hdev = vport->back;
10210 mutex_lock(&hdev->vport_lock);
10214 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10223 mutex_unlock(&hdev->vport_lock);
10257 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10259 struct hclge_vport *vport = &hdev->vport[0];
10263 hclge_restore_vport_port_base_vlan_config(hdev);
10265 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10293 struct hclge_dev *hdev = vport->back;
10295 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10304 struct hclge_dev *hdev = vport->back;
10310 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10313 return hclge_set_vlan_filter_hw(hdev,
10323 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10327 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10352 struct hclge_dev *hdev = vport->back;
10356 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10365 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10368 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10372 dev_err(&hdev->pdev->dev,
10446 struct hclge_dev *hdev = vport->back;
10451 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10454 vport = hclge_get_vf_vport(hdev, vfid);
10476 dev_err(&hdev->pdev->dev,
10490 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10501 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10509 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10510 vport = &hdev->vport[vf];
10513 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10517 dev_err(&hdev->pdev->dev,
10527 struct hclge_dev *hdev = vport->back;
10535 mutex_lock(&hdev->vport_lock);
10536 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10537 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10539 mutex_unlock(&hdev->vport_lock);
10544 mutex_unlock(&hdev->vport_lock);
10553 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10563 mutex_lock(&hdev->vport_lock);
10565 mutex_unlock(&hdev->vport_lock);
10572 mutex_lock(&hdev->vport_lock);
10574 mutex_unlock(&hdev->vport_lock);
10582 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10588 for (i = 0; i < hdev->num_alloc_vport; i++) {
10589 vport = &hdev->vport[i];
10597 dev_err(&hdev->pdev->dev,
10607 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10614 mutex_lock(&hdev->vport_lock);
10616 for (i = 0; i < hdev->num_alloc_vport; i++) {
10617 struct hclge_vport *vport = &hdev->vport[i];
10622 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10626 mutex_unlock(&hdev->vport_lock);
10636 mutex_unlock(&hdev->vport_lock);
10644 mutex_unlock(&hdev->vport_lock);
10646 hclge_sync_vlan_fltr_state(hdev);
10649 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10660 return hclge_cmd_send(&hdev->hw, &desc, 1);
10672 struct hclge_dev *hdev = vport->back;
10678 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10682 mutex_lock(&hdev->vport_lock);
10683 /* VF's mps must fit within hdev->mps */
10684 if (vport->vport_id && max_frm_size > hdev->mps) {
10685 mutex_unlock(&hdev->vport_lock);
10689 mutex_unlock(&hdev->vport_lock);
10694 for (i = 1; i < hdev->num_alloc_vport; i++)
10695 if (max_frm_size < hdev->vport[i].mps) {
10696 dev_err(&hdev->pdev->dev,
10698 i, hdev->vport[i].mps);
10699 mutex_unlock(&hdev->vport_lock);
10703 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10705 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10707 dev_err(&hdev->pdev->dev,
10712 hdev->mps = max_frm_size;
10715 ret = hclge_buffer_alloc(hdev);
10717 dev_err(&hdev->pdev->dev,
10721 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10722 mutex_unlock(&hdev->vport_lock);
10726 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10742 dev_err(&hdev->pdev->dev,
10750 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10764 dev_err(&hdev->pdev->dev,
10788 struct hclge_dev *hdev = vport->back;
10797 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10799 dev_err(&hdev->pdev->dev,
10806 ret = hclge_get_reset_status(hdev, queue_gid,
10819 dev_err(&hdev->pdev->dev,
10824 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10826 dev_err(&hdev->pdev->dev,
10842 struct hclge_dev *hdev = vport->back;
10857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10859 dev_err(&hdev->pdev->dev,
10869 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10883 struct hclge_dev *hdev = vport->back;
10890 dev_err(&hdev->pdev->dev,
10902 struct hclge_dev *hdev = vport->back;
10904 return hdev->fw_version;
10907 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
10916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10925 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10927 struct phy_device *phydev = hdev->hw.mac.phydev;
10935 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10939 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10942 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10944 dev_err(&hdev->pdev->dev,
10950 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10952 struct phy_device *phydev = hdev->hw.mac.phydev;
10962 return hclge_mac_pause_setup_hw(hdev);
10982 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10989 struct hclge_dev *hdev = vport->back;
10990 u8 media_type = hdev->hw.mac.media_type;
10995 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11001 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11004 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11007 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11016 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11020 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11022 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11024 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11026 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11028 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11035 struct hclge_dev *hdev = vport->back;
11036 struct phy_device *phydev = hdev->hw.mac.phydev;
11039 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11042 dev_info(&hdev->pdev->dev,
11048 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11049 dev_info(&hdev->pdev->dev,
11054 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11056 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11058 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11059 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11071 struct hclge_dev *hdev = vport->back;
11074 *speed = hdev->hw.mac.speed;
11076 *duplex = hdev->hw.mac.duplex;
11078 *auto_neg = hdev->hw.mac.autoneg;
11080 *lane_num = hdev->hw.mac.lane_num;
11087 struct hclge_dev *hdev = vport->back;
11093 hclge_update_port_info(hdev);
11096 *media_type = hdev->hw.mac.media_type;
11099 *module_type = hdev->hw.mac.module_type;
11106 struct hclge_dev *hdev = vport->back;
11107 struct phy_device *phydev = hdev->hw.mac.phydev;
11152 static void hclge_info_show(struct hclge_dev *hdev)
11154 struct hnae3_handle *handle = &hdev->vport->nic;
11155 struct device *dev = &hdev->pdev->dev;
11159 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11160 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11161 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11162 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11163 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11164 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11165 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11166 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11167 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11169 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11175 hdev->tx_spare_buf_size);
11184 struct hclge_dev *hdev = ae_dev->priv;
11185 int rst_cnt = hdev->rst_stats.reset_cnt;
11192 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11193 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11194 rst_cnt != hdev->rst_stats.reset_cnt) {
11200 ret = hclge_config_nic_hw_error(hdev, true);
11209 if (netif_msg_drv(&hdev->vport->nic))
11210 hclge_info_show(hdev);
11215 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11216 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11227 struct hclge_dev *hdev = ae_dev->priv;
11232 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11233 !hdev->nic_client)
11236 client = hdev->roce_client;
11241 rst_cnt = hdev->rst_stats.reset_cnt;
11246 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11247 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11248 rst_cnt != hdev->rst_stats.reset_cnt) {
11254 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11266 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11267 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11270 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11278 struct hclge_dev *hdev = ae_dev->priv;
11279 struct hclge_vport *vport = &hdev->vport[0];
11284 hdev->nic_client = client;
11296 if (hnae3_dev_roce_supported(hdev)) {
11297 hdev->roce_client = client;
11313 hdev->nic_client = NULL;
11317 hdev->roce_client = NULL;
11325 struct hclge_dev *hdev = ae_dev->priv;
11326 struct hclge_vport *vport = &hdev->vport[0];
11328 if (hdev->roce_client) {
11329 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11330 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11333 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11334 hdev->roce_client = NULL;
11339 if (hdev->nic_client && client->ops->uninit_instance) {
11340 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11341 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11345 hdev->nic_client = NULL;
11350 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11352 struct pci_dev *pdev = hdev->pdev;
11353 struct hclge_hw *hw = &hdev->hw;
11371 static int hclge_pci_init(struct hclge_dev *hdev)
11373 struct pci_dev *pdev = hdev->pdev;
11401 hw = &hdev->hw;
11409 ret = hclge_dev_mem_map(hdev);
11413 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11418 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11427 static void hclge_pci_uninit(struct hclge_dev *hdev)
11429 struct pci_dev *pdev = hdev->pdev;
11431 if (hdev->hw.hw.mem_base)
11432 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11434 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11440 static void hclge_state_init(struct hclge_dev *hdev)
11442 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11443 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11444 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11445 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11446 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11447 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11448 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11451 static void hclge_state_uninit(struct hclge_dev *hdev)
11453 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11454 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11456 if (hdev->reset_timer.function)
11457 del_timer_sync(&hdev->reset_timer);
11458 if (hdev->service_task.work.func)
11459 cancel_delayed_work_sync(&hdev->service_task);
11468 struct hclge_dev *hdev = ae_dev->priv;
11473 down(&hdev->reset_sem);
11474 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11475 hdev->reset_type = rst_type;
11476 ret = hclge_reset_prepare(hdev);
11477 if (!ret && !hdev->reset_pending)
11480 dev_err(&hdev->pdev->dev,
11482 ret, hdev->reset_pending, retry_cnt);
11483 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11484 up(&hdev->reset_sem);
11489 hclge_enable_vector(&hdev->misc_vector, false);
11490 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11492 if (hdev->reset_type == HNAE3_FLR_RESET)
11493 hdev->rst_stats.flr_rst_cnt++;
11498 struct hclge_dev *hdev = ae_dev->priv;
11501 hclge_enable_vector(&hdev->misc_vector, true);
11503 ret = hclge_reset_rebuild(hdev);
11505 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11507 hdev->reset_type = HNAE3_NONE_RESET;
11508 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11509 up(&hdev->reset_sem);
11512 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11516 for (i = 0; i < hdev->num_alloc_vport; i++) {
11517 struct hclge_vport *vport = &hdev->vport[i];
11521 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11523 dev_warn(&hdev->pdev->dev,
11529 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11544 dev_err(&hdev->pdev->dev,
11551 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11553 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11554 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11557 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11559 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11560 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11570 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11583 dev_err(&hdev->pdev->dev,
11593 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11606 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11608 dev_err(&hdev->pdev->dev,
11614 static int hclge_update_wol(struct hclge_dev *hdev)
11616 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11618 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11621 return hclge_set_wol_cfg(hdev, wol_info);
11624 static int hclge_init_wol(struct hclge_dev *hdev)
11626 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11629 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11633 ret = hclge_get_wol_supported_mode(hdev,
11640 return hclge_update_wol(hdev);
11684 struct hclge_dev *hdev;
11687 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11688 if (!hdev)
11691 hdev->pdev = pdev;
11692 hdev->ae_dev = ae_dev;
11693 hdev->reset_type = HNAE3_NONE_RESET;
11694 hdev->reset_level = HNAE3_FUNC_RESET;
11695 ae_dev->priv = hdev;
11698 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11700 mutex_init(&hdev->vport_lock);
11701 spin_lock_init(&hdev->fd_rule_lock);
11702 sema_init(&hdev->reset_sem, 1);
11704 ret = hclge_pci_init(hdev);
11709 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11714 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
11715 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11716 true, hdev->reset_pending);
11720 ret = hclge_clear_hw_resource(hdev);
11724 ret = hclge_get_cap(hdev);
11728 ret = hclge_query_dev_specs(hdev);
11735 ret = hclge_configure(hdev);
11741 ret = hclge_init_msi(hdev);
11747 ret = hclge_misc_irq_init(hdev);
11751 ret = hclge_alloc_tqps(hdev);
11757 ret = hclge_alloc_vport(hdev);
11761 ret = hclge_map_tqp(hdev);
11765 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11767 if (hnae3_dev_phy_imp_supported(hdev))
11768 ret = hclge_update_tp_port_info(hdev);
11770 ret = hclge_mac_mdio_config(hdev);
11776 ret = hclge_init_umv_space(hdev);
11780 ret = hclge_mac_init(hdev);
11786 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11792 ret = hclge_config_gro(hdev);
11796 ret = hclge_init_vlan_config(hdev);
11802 ret = hclge_tm_schd_init(hdev);
11808 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11809 &hdev->rss_cfg);
11815 ret = hclge_rss_init_hw(hdev);
11821 ret = init_mgr_tbl(hdev);
11827 ret = hclge_init_fd_config(hdev);
11834 ret = hclge_ptp_init(hdev);
11838 ret = hclge_update_port_info(hdev);
11842 INIT_KFIFO(hdev->mac_tnl_log);
11844 hclge_dcb_ops_set(hdev);
11846 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11847 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11849 hclge_clear_all_event_cause(hdev);
11850 hclge_clear_resetting_state(hdev);
11853 if (hnae3_dev_ras_imp_supported(hdev))
11854 hclge_handle_occurred_error(hdev);
11867 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11870 hclge_init_rxd_adv_layout(hdev);
11873 hclge_enable_vector(&hdev->misc_vector, true);
11875 ret = hclge_init_wol(hdev);
11880 ret = hclge_devlink_init(hdev);
11884 hclge_state_init(hdev);
11885 hdev->last_reset_time = jiffies;
11887 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11890 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11894 hclge_ptp_uninit(hdev);
11896 if (hdev->hw.mac.phydev)
11897 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11899 hclge_misc_irq_uninit(hdev);
11903 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11905 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11909 mutex_destroy(&hdev->vport_lock);
11913 static void hclge_stats_clear(struct hclge_dev *hdev)
11915 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11916 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11919 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11921 return hclge_config_switch_param(hdev, vf, enable,
11925 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11927 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11932 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11936 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11938 dev_err(&hdev->pdev->dev,
11944 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11946 dev_err(&hdev->pdev->dev,
11957 struct hclge_dev *hdev = vport->back;
11961 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11964 vport = hclge_get_vf_vport(hdev, vf);
11971 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11972 dev_warn(&hdev->pdev->dev,
11976 dev_warn(&hdev->pdev->dev,
11980 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11988 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11990 struct hclge_vport *vport = hdev->vport;
11994 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11998 for (i = 0; i < hdev->num_alloc_vport; i++) {
11999 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
12013 struct hclge_dev *hdev = vport->back;
12016 vport = hclge_get_vf_vport(hdev, vf);
12025 hclge_task_schedule(hdev, 0);
12030 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
12036 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12037 struct hclge_vport *vport = &hdev->vport[vf];
12042 dev_err(&hdev->pdev->dev,
12048 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12052 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12053 dev_err(&hdev->pdev->dev,
12055 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12066 struct hclge_dev *hdev = vport->back;
12069 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12073 vport = hclge_get_vf_vport(hdev, vf);
12089 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12091 struct hnae3_handle *handle = &hdev->vport->nic;
12097 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12098 vport = hclge_get_vf_vport(hdev, vf);
12111 dev_err(&hdev->pdev->dev,
12121 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12123 struct hclge_vport *vport = hdev->vport;
12126 for (i = 0; i < hdev->num_alloc_vport; i++) {
12134 struct hclge_dev *hdev = ae_dev->priv;
12138 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12140 hclge_stats_clear(hdev);
12144 if (hdev->reset_type == HNAE3_IMP_RESET ||
12145 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12146 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12147 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12148 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12149 hclge_reset_umv_space(hdev);
12152 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12153 true, hdev->reset_pending);
12159 ret = hclge_map_tqp(hdev);
12165 ret = hclge_mac_init(hdev);
12171 ret = hclge_tp_port_init(hdev);
12178 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12184 ret = hclge_config_gro(hdev);
12188 ret = hclge_init_vlan_config(hdev);
12194 hclge_reset_tc_config(hdev);
12196 ret = hclge_tm_init_hw(hdev, true);
12202 ret = hclge_rss_init_hw(hdev);
12208 ret = init_mgr_tbl(hdev);
12215 ret = hclge_init_fd_config(hdev);
12221 ret = hclge_ptp_init(hdev);
12226 if (hnae3_dev_ras_imp_supported(hdev))
12227 hclge_handle_occurred_error(hdev);
12234 ret = hclge_config_nic_hw_error(hdev, true);
12242 if (hdev->roce_client) {
12243 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12252 hclge_reset_vport_state(hdev);
12253 ret = hclge_reset_vport_spoofchk(hdev);
12257 ret = hclge_resume_vf_rate(hdev);
12261 hclge_init_rxd_adv_layout(hdev);
12263 ret = hclge_update_wol(hdev);
12276 struct hclge_dev *hdev = ae_dev->priv;
12277 struct hclge_mac *mac = &hdev->hw.mac;
12279 hclge_reset_vf_rate(hdev);
12280 hclge_clear_vf_vlan(hdev);
12281 hclge_state_uninit(hdev);
12282 hclge_ptp_uninit(hdev);
12283 hclge_uninit_rxd_adv_layout(hdev);
12284 hclge_uninit_mac_table(hdev);
12285 hclge_del_all_fd_entries(hdev);
12291 hclge_enable_vector(&hdev->misc_vector, false);
12292 synchronize_irq(hdev->misc_vector.vector_irq);
12295 hclge_config_mac_tnl_int(hdev, false);
12296 hclge_config_nic_hw_error(hdev, false);
12297 hclge_config_rocee_ras_interrupt(hdev, false);
12299 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12300 hclge_misc_irq_uninit(hdev);
12301 hclge_devlink_uninit(hdev);
12302 hclge_pci_uninit(hdev);
12303 hclge_uninit_vport_vlan_table(hdev);
12304 mutex_destroy(&hdev->vport_lock);
12311 struct hclge_dev *hdev = vport->back;
12313 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12329 struct hclge_dev *hdev = vport->back;
12332 *max_rss_size = hdev->pf_rss_size_max;
12339 struct hclge_dev *hdev = vport->back;
12351 if (!(hdev->hw_tc_map & BIT(i)))
12359 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12369 struct hclge_dev *hdev = vport->back;
12378 ret = hclge_tm_vport_map_update(hdev);
12380 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12403 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12410 dev_info(&hdev->pdev->dev,
12418 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12430 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12432 dev_err(&hdev->pdev->dev,
12448 struct hclge_dev *hdev = vport->back;
12452 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12454 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12466 struct hclge_dev *hdev = vport->back;
12470 supported[idx] = hdev->hw.mac.supported[idx];
12471 advertising[idx] = hdev->hw.mac.advertising[idx];
12478 struct hclge_dev *hdev = vport->back;
12479 bool gro_en_old = hdev->gro_en;
12482 hdev->gro_en = enable;
12483 ret = hclge_config_gro(hdev);
12485 hdev->gro_en = gro_en_old;
12493 struct hclge_dev *hdev = vport->back;
12532 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12543 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12549 for (i = 0; i < hdev->num_alloc_vport; i++) {
12550 vport = &hdev->vport[i];
12558 static bool hclge_module_existed(struct hclge_dev *hdev)
12565 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12567 dev_err(&hdev->pdev->dev,
12580 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12606 ret = hclge_cmd_send(&hdev->hw, desc, i);
12608 dev_err(&hdev->pdev->dev,
12635 struct hclge_dev *hdev = vport->back;
12639 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12642 if (!hclge_module_existed(hdev))
12646 data_len = hclge_get_sfp_eeprom_info(hdev,
12663 struct hclge_dev *hdev = vport->back;
12667 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12671 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12673 dev_err(&hdev->pdev->dev,
12687 struct hclge_dev *hdev = vport->back;
12699 dev_err(&hdev->pdev->dev,
12710 dev_err(&hdev->pdev->dev,
12714 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12716 dev_err(&hdev->pdev->dev,
12725 struct hclge_dev *hdev = ae_dev->priv;
12730 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];