Lines Matching refs:pf

63 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
65 return &pf->pdev->dev;
73 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
75 static void ice_vsi_release_all(struct ice_pf *pf);
77 static int ice_rebuild_channels(struct ice_pf *pf);
110 * @pf: pointer to PF struct
112 static void ice_check_for_hang_subtask(struct ice_pf *pf)
120 ice_for_each_vsi(pf, v)
121 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
122 vsi = pf->vsi[v];
174 * @pf: board private structure
180 static int ice_init_mac_fltr(struct ice_pf *pf)
185 vsi = ice_get_main_vsi(pf);
325 struct ice_pf *pf = vsi->back;
326 struct ice_hw *hw = &pf->hw;
480 * @pf: board private structure
482 static void ice_sync_fltr_subtask(struct ice_pf *pf)
486 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
489 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
491 ice_for_each_vsi(pf, v)
492 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
493 ice_vsi_sync_fltr(pf->vsi[v])) {
495 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
502 * @pf: the PF
505 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
510 ice_for_each_vsi(pf, v)
511 if (pf->vsi[v])
512 ice_dis_vsi(pf->vsi[v], locked);
515 pf->pf_agg_node[node].num_vsis = 0;
518 pf->vf_agg_node[node].num_vsis = 0;
523 * @pf: board private structure
530 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
535 recp = pf->hw.switch_info->recp_list;
542 * @pf: board private structure
548 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
550 struct ice_hw *hw = &pf->hw;
555 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
558 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
561 ice_unplug_aux_dev(pf);
565 ice_vc_notify_reset(pf);
568 mutex_lock(&pf->vfs.table_lock);
569 ice_for_each_vf(pf, bkt, vf)
571 mutex_unlock(&pf->vfs.table_lock);
573 if (ice_is_eswitch_mode_switchdev(pf)) {
575 ice_clear_sw_switch_recipes(pf);
579 vsi = ice_get_main_vsi(pf);
588 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
604 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
613 ice_pf_dis_all_vsi(pf, false);
615 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
616 ice_ptp_prepare_for_reset(pf, reset_type);
618 if (ice_is_feature_supported(pf, ICE_F_GNSS))
619 ice_gnss_exit(pf);
626 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
631 * @pf: board private structure
634 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
636 struct device *dev = ice_pf_to_dev(pf);
637 struct ice_hw *hw = &pf->hw;
641 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
646 ice_prepare_for_reset(pf, reset_type);
651 set_bit(ICE_RESET_FAILED, pf->state);
652 clear_bit(ICE_RESET_OICR_RECV, pf->state);
653 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
654 clear_bit(ICE_PFR_REQ, pf->state);
655 clear_bit(ICE_CORER_REQ, pf->state);
656 clear_bit(ICE_GLOBR_REQ, pf->state);
657 wake_up(&pf->reset_wait_queue);
666 pf->pfr_count++;
667 ice_rebuild(pf, reset_type);
668 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
669 clear_bit(ICE_PFR_REQ, pf->state);
670 wake_up(&pf->reset_wait_queue);
671 ice_reset_all_vfs(pf);
677 * @pf: board private structure
679 static void ice_reset_subtask(struct ice_pf *pf)
685 * of reset is pending and sets bits in pf->state indicating the reset
693 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
695 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
697 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
699 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
704 ice_prepare_for_reset(pf, reset_type);
707 if (ice_check_reset(&pf->hw)) {
708 set_bit(ICE_RESET_FAILED, pf->state);
711 pf->hw.reset_ongoing = false;
712 ice_rebuild(pf, reset_type);
716 clear_bit(ICE_RESET_OICR_RECV, pf->state);
717 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
718 clear_bit(ICE_PFR_REQ, pf->state);
719 clear_bit(ICE_CORER_REQ, pf->state);
720 clear_bit(ICE_GLOBR_REQ, pf->state);
721 wake_up(&pf->reset_wait_queue);
722 ice_reset_all_vfs(pf);
729 if (test_bit(ICE_PFR_REQ, pf->state)) {
731 if (pf->lag && pf->lag->bonded) {
732 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
736 if (test_bit(ICE_CORER_REQ, pf->state))
738 if (test_bit(ICE_GLOBR_REQ, pf->state))
745 if (!test_bit(ICE_DOWN, pf->state) &&
746 !test_bit(ICE_CFG_BUSY, pf->state)) {
747 ice_do_reset(pf, reset_type);
940 * @pf: private PF struct
949 static void ice_set_dflt_mib(struct ice_pf *pf)
951 struct device *dev = ice_pf_to_dev(pf);
955 struct ice_hw *hw = &pf->hw;
1028 * @pf: pointer to PF struct
1033 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1036 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1040 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1044 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1045 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1051 * @pf: pointer to PF struct
1057 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1062 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1069 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1073 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1074 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1076 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1077 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1083 * @pf: pointer to the PF struct
1089 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1091 ice_check_module_power(pf, link_cfg_err);
1092 ice_check_phy_fw_load(pf, link_cfg_err);
1097 * @pf: PF that the link event is associated with
1105 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1108 struct device *dev = ice_pf_to_dev(pf);
1130 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1138 vsi = ice_get_main_vsi(pf);
1143 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1145 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1153 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1155 if (ice_is_dcb_active(pf)) {
1156 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1157 ice_dcb_rebuild(pf);
1160 ice_set_dflt_mib(pf);
1165 ice_vc_notify_link_state(pf);
1172 * @pf: board private structure
1174 static void ice_watchdog_subtask(struct ice_pf *pf)
1179 if (test_bit(ICE_DOWN, pf->state) ||
1180 test_bit(ICE_CFG_BUSY, pf->state))
1185 pf->serv_tmr_prev + pf->serv_tmr_period))
1188 pf->serv_tmr_prev = jiffies;
1193 ice_update_pf_stats(pf);
1194 ice_for_each_vsi(pf, i)
1195 if (pf->vsi[i] && pf->vsi[i]->netdev)
1196 ice_update_vsi_stats(pf->vsi[i]);
1230 * @pf: PF that the link event is associated with
1234 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1241 port_info = pf->hw.port_info;
1245 status = ice_link_event(pf, port_info,
1249 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1257 * @pf: PF that the FW log event is associated with
1261 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1264 struct ice_hw *hw = &pf->hw;
1283 * @pf: pointer to the PF private structure
1297 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1304 spin_lock_bh(&pf->aq_wait_lock);
1305 hlist_add_head(&task->entry, &pf->aq_wait_list);
1306 spin_unlock_bh(&pf->aq_wait_lock);
1311 * @pf: pointer to the PF private structure
1321 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1325 struct device *dev = ice_pf_to_dev(pf);
1330 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1358 spin_lock_bh(&pf->aq_wait_lock);
1360 spin_unlock_bh(&pf->aq_wait_lock);
1367 * @pf: pointer to the PF private structure
1383 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1390 spin_lock_bh(&pf->aq_wait_lock);
1391 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1411 spin_unlock_bh(&pf->aq_wait_lock);
1414 wake_up(&pf->aq_wait_queue);
1419 * @pf: the PF private structure
1424 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1428 spin_lock_bh(&pf->aq_wait_lock);
1429 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1431 spin_unlock_bh(&pf->aq_wait_lock);
1433 wake_up(&pf->aq_wait_queue);
1440 * @pf: ptr to struct ice_pf
1443 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1445 struct device *dev = ice_pf_to_dev(pf);
1447 struct ice_hw *hw = &pf->hw;
1454 if (test_bit(ICE_RESET_FAILED, pf->state))
1544 ice_aq_check_events(pf, opcode, &event);
1548 if (ice_handle_link_event(pf, &event))
1552 ice_vf_lan_overflow_event(pf, &event);
1560 ice_vc_process_vf_msg(pf, &event, &data);
1563 ice_get_fwlog_data(pf, &event);
1566 ice_dcb_process_lldp_set_mib_change(pf, &event);
1597 * @pf: board private structure
1599 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1601 struct ice_hw *hw = &pf->hw;
1603 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1606 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1609 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1617 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1624 * @pf: board private structure
1626 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1628 struct ice_hw *hw = &pf->hw;
1630 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1633 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1636 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1639 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1646 * @pf: board private structure
1648 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1650 struct ice_hw *hw = &pf->hw;
1656 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1660 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1663 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1666 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1669 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1676 * @pf: board private structure
1680 void ice_service_task_schedule(struct ice_pf *pf)
1682 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1683 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1684 !test_bit(ICE_NEEDS_RESTART, pf->state))
1685 queue_work(ice_wq, &pf->serv_task);
1690 * @pf: board private structure
1692 static void ice_service_task_complete(struct ice_pf *pf)
1694 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1696 /* force memory (pf->state) to sync before next service task */
1698 clear_bit(ICE_SERVICE_SCHED, pf->state);
1703 * @pf: board private structure
1708 static int ice_service_task_stop(struct ice_pf *pf)
1712 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1714 if (pf->serv_tmr.function)
1715 del_timer_sync(&pf->serv_tmr);
1716 if (pf->serv_task.func)
1717 cancel_work_sync(&pf->serv_task);
1719 clear_bit(ICE_SERVICE_SCHED, pf->state);
1725 * @pf: board private structure
1729 static void ice_service_task_restart(struct ice_pf *pf)
1731 clear_bit(ICE_SERVICE_DIS, pf->state);
1732 ice_service_task_schedule(pf);
1741 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1743 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1744 ice_service_task_schedule(pf);
1749 * @pf: pointer to the PF structure
1757 static void ice_handle_mdd_event(struct ice_pf *pf)
1759 struct device *dev = ice_pf_to_dev(pf);
1760 struct ice_hw *hw = &pf->hw;
1765 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1769 ice_print_vfs_mdd_events(pf);
1781 if (netif_msg_tx_err(pf))
1794 if (netif_msg_tx_err(pf))
1807 if (netif_msg_rx_err(pf))
1817 if (netif_msg_tx_err(pf))
1824 if (netif_msg_tx_err(pf))
1831 if (netif_msg_rx_err(pf))
1838 mutex_lock(&pf->vfs.table_lock);
1839 ice_for_each_vf(pf, bkt, vf) {
1844 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1845 if (netif_msg_tx_err(pf))
1854 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1855 if (netif_msg_tx_err(pf))
1864 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1865 if (netif_msg_tx_err(pf))
1874 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1875 if (netif_msg_rx_err(pf))
1883 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1892 mutex_unlock(&pf->vfs.table_lock);
1894 ice_print_vfs_mdd_events(pf);
1982 struct ice_pf *pf = pi->hw->back;
1993 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1997 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1998 pf->nvm_phy_type_lo = pcaps->phy_type_low;
2014 struct ice_pf *pf = pi->hw->back;
2016 ldo = &pf->link_dflt_override;
2026 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2027 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2052 struct ice_pf *pf = pi->hw->back;
2054 ldo = &pf->link_dflt_override;
2062 cfg->phy_type_low = pf->nvm_phy_type_lo &
2064 cfg->phy_type_high = pf->nvm_phy_type_hi &
2070 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2091 struct ice_pf *pf = pi->hw->back;
2108 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2118 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2125 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2140 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2161 struct ice_pf *pf = vsi->back;
2170 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2174 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2260 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2273 * @pf: pointer to PF struct
2278 static void ice_check_media_subtask(struct ice_pf *pf)
2285 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2288 vsi = ice_get_main_vsi(pf);
2298 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2301 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2313 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2327 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2333 ice_reset_subtask(pf);
2336 if (ice_is_reset_in_progress(pf->state) ||
2337 test_bit(ICE_SUSPENDED, pf->state) ||
2338 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2339 ice_service_task_complete(pf);
2343 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2350 swap(event->reg, pf->oicr_err_reg);
2351 ice_send_event_to_aux(pf, event);
2359 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2360 ice_unplug_aux_dev(pf);
2363 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2364 ice_plug_aux_dev(pf);
2366 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2372 ice_send_event_to_aux(pf, event);
2377 ice_clean_adminq_subtask(pf);
2378 ice_check_media_subtask(pf);
2379 ice_check_for_hang_subtask(pf);
2380 ice_sync_fltr_subtask(pf);
2381 ice_handle_mdd_event(pf);
2382 ice_watchdog_subtask(pf);
2384 if (ice_is_safe_mode(pf)) {
2385 ice_service_task_complete(pf);
2389 ice_process_vflr_event(pf);
2390 ice_clean_mailboxq_subtask(pf);
2391 ice_clean_sbq_subtask(pf);
2392 ice_sync_arfs_fltrs(pf);
2393 ice_flush_fdir_ctx(pf);
2396 ice_service_task_complete(pf);
2402 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2403 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2404 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2405 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2406 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2407 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2408 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2409 mod_timer(&pf->serv_tmr, jiffies);
2434 * @pf: board private structure
2437 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2439 struct device *dev = ice_pf_to_dev(pf);
2442 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2447 if (ice_is_reset_in_progress(pf->state)) {
2454 set_bit(ICE_PFR_REQ, pf->state);
2457 set_bit(ICE_CORER_REQ, pf->state);
2460 set_bit(ICE_GLOBR_REQ, pf->state);
2466 ice_service_task_schedule(pf);
2522 struct ice_pf *pf = vsi->back;
2529 dev = ice_pf_to_dev(pf);
2684 struct ice_pf *pf = vsi->back;
2686 .qs_mutex = &pf->avail_q_mutex,
2687 .pf_map = pf->avail_txqs,
2688 .pf_map_size = pf->max_pf_txqs,
2699 dev = ice_pf_to_dev(pf);
2756 if (ice_is_reset_in_progress(pf->state))
2794 mutex_lock(&pf->avail_q_mutex);
2796 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2799 mutex_unlock(&pf->avail_q_mutex);
2815 struct ice_pf *pf = vsi->back;
2820 * in pf->state won't be set, so additionally check first q_vector
2823 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2839 mutex_lock(&pf->avail_q_mutex);
2841 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2844 mutex_unlock(&pf->avail_q_mutex);
2858 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2864 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
3046 * @pf: board private structure
3048 static void ice_ena_misc_vector(struct ice_pf *pf)
3050 struct ice_hw *hw = &pf->hw;
3078 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3081 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3084 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3095 struct ice_pf *pf = data;
3103 hw = &pf->hw;
3104 tx = &pf->ptp.port.tx;
3117 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3130 struct ice_pf *pf = (struct ice_pf *)data;
3132 struct ice_hw *hw = &pf->hw;
3136 dev = ice_pf_to_dev(pf);
3137 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3138 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3139 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3146 pf->sw_int_count++;
3151 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3155 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3162 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3175 pf->corer_count++;
3177 pf->globr_count++;
3179 pf->empr_count++;
3184 * pf->state so that the service task can start a reset/rebuild.
3186 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3188 set_bit(ICE_CORER_RECV, pf->state);
3190 set_bit(ICE_GLOBR_RECV, pf->state);
3192 set_bit(ICE_EMPR_RECV, pf->state);
3200 * ICE_RESET_OICR_RECV in pf->state indicates
3213 if (ice_pf_state_is_nominal(pf) &&
3214 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3215 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3225 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3226 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3237 if (ice_pf_src_tmr_owned(pf)) {
3239 pf->ptp.ext_ts_irq |= gltsyn_stat &
3244 ice_ptp_extts_event(pf);
3250 pf->oicr_err_reg |= oicr;
3251 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3264 set_bit(ICE_PFR_REQ, pf->state);
3267 ice_service_task_schedule(pf);
3281 struct ice_pf *pf = data;
3284 hw = &pf->hw;
3286 if (ice_is_reset_in_progress(pf->state))
3289 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3293 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3331 * @pf: board private structure
3333 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3335 int irq_num = pf->ll_ts_irq.virq;
3338 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3340 ice_free_irq(pf, pf->ll_ts_irq);
3345 * @pf: board private structure
3347 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3349 int misc_irq_num = pf->oicr_irq.virq;
3350 struct ice_hw *hw = &pf->hw;
3359 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3361 ice_free_irq(pf, pf->oicr_irq);
3362 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3363 ice_free_irq_msix_ll_ts(pf);
3401 * @pf: board private structure
3407 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3409 struct device *dev = ice_pf_to_dev(pf);
3410 struct ice_hw *hw = &pf->hw;
3415 if (!pf->int_name[0])
3416 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3419 if (!pf->int_name_ll_ts[0])
3420 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3426 if (ice_is_reset_in_progress(pf->state))
3430 irq = ice_alloc_irq(pf, false);
3434 pf->oicr_irq = irq;
3435 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3437 pf->int_name, pf);
3440 pf->int_name, err);
3441 ice_free_irq(pf, pf->oicr_irq);
3446 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3449 irq = ice_alloc_irq(pf, false);
3453 pf->ll_ts_irq = irq;
3454 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3455 pf->int_name_ll_ts, pf);
3458 pf->int_name_ll_ts, err);
3459 ice_free_irq(pf, pf->ll_ts_irq);
3464 ice_ena_misc_vector(pf);
3466 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3469 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3471 ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3473 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3511 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3513 if (ice_is_safe_mode(pf)) {
3520 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3539 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3540 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3546 if (ice_is_safe_mode(pf)) {
3639 * @pf: board private structure
3646 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3654 return ice_vsi_setup(pf, &params);
3658 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3668 return ice_vsi_setup(pf, &params);
3673 * @pf: board private structure
3680 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3688 return ice_vsi_setup(pf, &params);
3693 * @pf: board private structure
3700 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3708 return ice_vsi_setup(pf, &params);
3908 * @pf: pointer to an ice_pf instance
3910 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3912 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3913 pf->max_pf_txqs);
3918 * @pf: pointer to an ice_pf instance
3920 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3922 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3923 pf->max_pf_rxqs);
3928 * @pf: board private structure to initialize
3930 static void ice_deinit_pf(struct ice_pf *pf)
3932 ice_service_task_stop(pf);
3933 mutex_destroy(&pf->lag_mutex);
3934 mutex_destroy(&pf->adev_mutex);
3935 mutex_destroy(&pf->sw_mutex);
3936 mutex_destroy(&pf->tc_mutex);
3937 mutex_destroy(&pf->avail_q_mutex);
3938 mutex_destroy(&pf->vfs.table_lock);
3940 if (pf->avail_txqs) {
3941 bitmap_free(pf->avail_txqs);
3942 pf->avail_txqs = NULL;
3945 if (pf->avail_rxqs) {
3946 bitmap_free(pf->avail_rxqs);
3947 pf->avail_rxqs = NULL;
3950 if (pf->ptp.clock)
3951 ptp_clock_unregister(pf->ptp.clock);
3956 * @pf: pointer to the PF instance
3958 static void ice_set_pf_caps(struct ice_pf *pf)
3960 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3962 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3964 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3965 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3967 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3968 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3970 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3971 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3974 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3976 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3978 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3985 pf->ctrl_vsi_idx = ICE_NO_VSI;
3986 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3988 ice_alloc_fd_guar_item(&pf->hw, &unused,
3991 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3995 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3997 !(pf->hw.mac_type == ICE_MAC_E830))
3998 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4000 pf->max_pf_txqs = func_caps->common_cap.num_txq;
4001 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4006 * @pf: board private structure to initialize
4008 static int ice_init_pf(struct ice_pf *pf)
4010 ice_set_pf_caps(pf);
4012 mutex_init(&pf->sw_mutex);
4013 mutex_init(&pf->tc_mutex);
4014 mutex_init(&pf->adev_mutex);
4015 mutex_init(&pf->lag_mutex);
4017 INIT_HLIST_HEAD(&pf->aq_wait_list);
4018 spin_lock_init(&pf->aq_wait_lock);
4019 init_waitqueue_head(&pf->aq_wait_queue);
4021 init_waitqueue_head(&pf->reset_wait_queue);
4024 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4025 pf->serv_tmr_period = HZ;
4026 INIT_WORK(&pf->serv_task, ice_service_task);
4027 clear_bit(ICE_SERVICE_SCHED, pf->state);
4029 mutex_init(&pf->avail_q_mutex);
4030 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4031 if (!pf->avail_txqs)
4034 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4035 if (!pf->avail_rxqs) {
4036 bitmap_free(pf->avail_txqs);
4037 pf->avail_txqs = NULL;
4041 mutex_init(&pf->vfs.table_lock);
4042 hash_init(pf->vfs.table);
4043 ice_mbx_init_snapshot(&pf->hw);
4081 struct ice_pf *pf = vsi->back;
4087 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4102 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4108 ice_pf_dcb_recfg(pf, locked);
4111 clear_bit(ICE_CFG_BUSY, pf->state);
4117 * @pf: PF to configure
4122 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4124 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4136 hw = &pf->hw;
4175 struct ice_pf *pf = hw->back;
4178 dev = ice_pf_to_dev(pf);
4252 * @pf: pointer to the PF instance
4258 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4261 struct device *dev = ice_pf_to_dev(pf);
4262 struct ice_hw *hw = &pf->hw;
4279 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4286 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4291 * @pf: pointer to the PF structure
4297 static void ice_verify_cacheline_size(struct ice_pf *pf)
4299 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4300 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4306 * @pf: PF struct
4310 static int ice_send_version(struct ice_pf *pf)
4320 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4325 * @pf: pointer to the PF instance
4329 static int ice_init_fdir(struct ice_pf *pf)
4331 struct device *dev = ice_pf_to_dev(pf);
4338 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4350 mutex_init(&pf->hw.fdir_fltr_lock);
4352 err = ice_fdir_create_dflt_rules(pf);
4359 ice_fdir_release_flows(&pf->hw);
4363 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4364 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4365 pf->ctrl_vsi_idx = ICE_NO_VSI;
4370 static void ice_deinit_fdir(struct ice_pf *pf)
4372 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4379 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4380 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4381 pf->ctrl_vsi_idx = ICE_NO_VSI;
4384 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4389 * @pf: pointer to the PF instance
4391 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4396 struct pci_dev *pdev = pf->pdev;
4419 * @pf: pointer to the PF instance
4421 static void ice_request_fw(struct ice_pf *pf)
4423 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4425 struct device *dev = ice_pf_to_dev(pf);
4440 ice_load_pkg(firmware, pf);
4454 ice_load_pkg(firmware, pf);
4460 * @pf: pointer to the PF struct
4462 static void ice_print_wake_reason(struct ice_pf *pf)
4464 u32 wus = pf->wakeup_reason;
4482 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4487 * @pf: pointer to the PF struct
4491 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4493 struct ice_hw *hw = &pf->hw;
4602 int ice_init_dev(struct ice_pf *pf)
4604 struct device *dev = ice_pf_to_dev(pf);
4605 struct ice_hw *hw = &pf->hw;
4626 ice_init_feature_support(pf);
4628 ice_request_fw(pf);
4631 * set in pf->state, which will cause ice_is_safe_mode to return
4634 if (ice_is_safe_mode(pf)) {
4643 err = ice_init_pf(pf);
4649 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4650 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4651 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4652 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4653 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4654 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4655 pf->hw.tnl.valid_count[TNL_VXLAN];
4656 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4659 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4660 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4661 pf->hw.tnl.valid_count[TNL_GENEVE];
4662 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4666 err = ice_init_interrupt_scheme(pf);
4678 err = ice_req_irq_msix_misc(pf);
4687 ice_clear_interrupt_scheme(pf);
4689 ice_deinit_pf(pf);
4695 void ice_deinit_dev(struct ice_pf *pf)
4697 ice_free_irq_msix_misc(pf);
4698 ice_deinit_pf(pf);
4699 ice_deinit_hw(&pf->hw);
4702 ice_reset(&pf->hw, ICE_RESET_PFR);
4703 pci_wait_for_pending_transaction(pf->pdev);
4704 ice_clear_interrupt_scheme(pf);
4707 static void ice_init_features(struct ice_pf *pf)
4709 struct device *dev = ice_pf_to_dev(pf);
4711 if (ice_is_safe_mode(pf))
4715 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4716 ice_ptp_init(pf);
4718 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4719 ice_gnss_init(pf);
4721 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4722 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4723 ice_dpll_init(pf);
4726 if (ice_init_fdir(pf))
4730 if (ice_init_pf_dcb(pf, false)) {
4731 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4732 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4734 ice_cfg_lldp_mib_change(&pf->hw, true);
4737 if (ice_init_lag(pf))
4740 ice_hwmon_init(pf);
4743 static void ice_deinit_features(struct ice_pf *pf)
4745 if (ice_is_safe_mode(pf))
4748 ice_deinit_lag(pf);
4749 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4750 ice_cfg_lldp_mib_change(&pf->hw, false);
4751 ice_deinit_fdir(pf);
4752 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4753 ice_gnss_exit(pf);
4754 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4755 ice_ptp_release(pf);
4756 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4757 ice_dpll_deinit(pf);
4758 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4759 xa_destroy(&pf->eswitch.reprs);
4762 static void ice_init_wakeup(struct ice_pf *pf)
4765 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4768 ice_print_wake_reason(pf);
4771 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4774 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4777 static int ice_init_link(struct ice_pf *pf)
4779 struct device *dev = ice_pf_to_dev(pf);
4782 err = ice_init_link_events(pf->hw.port_info);
4789 err = ice_init_nvm_phy_type(pf->hw.port_info);
4794 err = ice_update_link_info(pf->hw.port_info);
4798 ice_init_link_dflt_override(pf->hw.port_info);
4800 ice_check_link_cfg_err(pf,
4801 pf->hw.port_info->phy.link_info.link_cfg_err);
4804 if (pf->hw.port_info->phy.link_info.link_info &
4807 err = ice_init_phy_user_cfg(pf->hw.port_info);
4811 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4812 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4818 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4824 static int ice_init_pf_sw(struct ice_pf *pf)
4826 bool dvm = ice_is_dvm_ena(&pf->hw);
4831 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4832 if (!pf->first_sw)
4835 if (pf->hw.evb_veb)
4836 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4838 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4840 pf->first_sw->pf = pf;
4843 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4845 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4849 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4859 kfree(pf->first_sw);
4863 static void ice_deinit_pf_sw(struct ice_pf *pf)
4865 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4871 kfree(pf->first_sw);
4874 static int ice_alloc_vsis(struct ice_pf *pf)
4876 struct device *dev = ice_pf_to_dev(pf);
4878 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4879 if (!pf->num_alloc_vsi)
4882 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4885 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4886 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4889 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4891 if (!pf->vsi)
4894 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4895 sizeof(*pf->vsi_stats), GFP_KERNEL);
4896 if (!pf->vsi_stats) {
4897 devm_kfree(dev, pf->vsi);
4904 static void ice_dealloc_vsis(struct ice_pf *pf)
4906 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4907 pf->vsi_stats = NULL;
4909 pf->num_alloc_vsi = 0;
4910 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4911 pf->vsi = NULL;
4914 static int ice_init_devlink(struct ice_pf *pf)
4918 err = ice_devlink_register_params(pf);
4922 ice_devlink_init_regions(pf);
4923 ice_devlink_register(pf);
4928 static void ice_deinit_devlink(struct ice_pf *pf)
4930 ice_devlink_unregister(pf);
4931 ice_devlink_destroy_regions(pf);
4932 ice_devlink_unregister_params(pf);
4935 static int ice_init(struct ice_pf *pf)
4939 err = ice_init_dev(pf);
4943 err = ice_alloc_vsis(pf);
4947 err = ice_init_pf_sw(pf);
4951 ice_init_wakeup(pf);
4953 err = ice_init_link(pf);
4957 err = ice_send_version(pf);
4961 ice_verify_cacheline_size(pf);
4963 if (ice_is_safe_mode(pf))
4964 ice_set_safe_mode_vlan_cfg(pf);
4967 pcie_print_link_status(pf->pdev);
4970 clear_bit(ICE_DOWN, pf->state);
4971 clear_bit(ICE_SERVICE_DIS, pf->state);
4974 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4979 ice_deinit_pf_sw(pf);
4981 ice_dealloc_vsis(pf);
4983 ice_deinit_dev(pf);
4987 static void ice_deinit(struct ice_pf *pf)
4989 set_bit(ICE_SERVICE_DIS, pf->state);
4990 set_bit(ICE_DOWN, pf->state);
4992 ice_deinit_pf_sw(pf);
4993 ice_dealloc_vsis(pf);
4994 ice_deinit_dev(pf);
4998 * ice_load - load pf by init hw and starting VSI
4999 * @pf: pointer to the pf instance
5003 int ice_load(struct ice_pf *pf)
5008 devl_assert_locked(priv_to_devlink(pf));
5010 vsi = ice_get_main_vsi(pf);
5022 err = ice_init_mac_fltr(pf);
5026 err = ice_devlink_create_pf_port(pf);
5030 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5042 err = ice_init_rdma(pf);
5046 ice_init_features(pf);
5047 ice_service_task_restart(pf);
5049 clear_bit(ICE_DOWN, pf->state);
5058 ice_devlink_destroy_pf_port(pf);
5066 * ice_unload - unload pf by stopping VSI and deinit hw
5067 * @pf: pointer to the pf instance
5071 void ice_unload(struct ice_pf *pf)
5073 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5075 devl_assert_locked(priv_to_devlink(pf));
5077 ice_deinit_features(pf);
5078 ice_deinit_rdma(pf);
5081 ice_devlink_destroy_pf_port(pf);
5096 struct ice_pf *pf;
5132 pf = ice_allocate_pf(dev);
5133 if (!pf)
5137 pf->aux_idx = -1;
5148 pf->pdev = pdev;
5149 pci_set_drvdata(pdev, pf);
5150 set_bit(ICE_DOWN, pf->state);
5152 set_bit(ICE_SERVICE_DIS, pf->state);
5154 hw = &pf->hw;
5158 hw->back = pf;
5169 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5176 err = ice_init(pf);
5180 devl_lock(priv_to_devlink(pf));
5181 err = ice_load(pf);
5182 devl_unlock(priv_to_devlink(pf));
5186 err = ice_init_devlink(pf);
5193 devl_lock(priv_to_devlink(pf));
5194 ice_unload(pf);
5195 devl_unlock(priv_to_devlink(pf));
5197 ice_deinit(pf);
5205 * @pf: pointer to the PF struct
5209 static void ice_set_wake(struct ice_pf *pf)
5211 struct ice_hw *hw = &pf->hw;
5212 bool wol = pf->wol_ena;
5226 * @pf: pointer to the PF struct
5232 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5234 struct device *dev = ice_pf_to_dev(pf);
5235 struct ice_hw *hw = &pf->hw;
5241 if (!pf->wol_ena)
5244 vsi = ice_get_main_vsi(pf);
5270 struct ice_pf *pf = pci_get_drvdata(pdev);
5274 if (!ice_is_reset_in_progress(pf->state))
5279 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5280 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5281 ice_free_vfs(pf);
5284 ice_hwmon_exit(pf);
5286 ice_service_task_stop(pf);
5287 ice_aq_cancel_waiting_tasks(pf);
5288 set_bit(ICE_DOWN, pf->state);
5290 if (!ice_is_safe_mode(pf))
5291 ice_remove_arfs(pf);
5293 ice_deinit_devlink(pf);
5295 devl_lock(priv_to_devlink(pf));
5296 ice_unload(pf);
5297 devl_unlock(priv_to_devlink(pf));
5299 ice_deinit(pf);
5300 ice_vsi_release_all(pf);
5302 ice_setup_mc_magic_wake(pf);
5303 ice_set_wake(pf);
5314 struct ice_pf *pf = pci_get_drvdata(pdev);
5319 pci_wake_from_d3(pdev, pf->wol_ena);
5327 * @pf: board private structure
5331 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5333 struct ice_hw *hw = &pf->hw;
5338 ice_vc_notify_reset(pf);
5340 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5343 ice_pf_dis_all_vsi(pf, false);
5345 ice_for_each_vsi(pf, v)
5346 if (pf->vsi[v])
5347 pf->vsi[v]->vsi_num = 0;
5354 * @pf: board private structure to reinitialize
5362 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5364 struct device *dev = ice_pf_to_dev(pf);
5371 ret = ice_init_interrupt_scheme(pf);
5378 ice_for_each_vsi(pf, v) {
5379 if (!pf->vsi[v])
5382 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5385 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5386 ice_vsi_set_napi_queues(pf->vsi[v]);
5389 ret = ice_req_irq_msix_misc(pf);
5400 if (pf->vsi[v])
5401 ice_vsi_free_q_vectors(pf->vsi[v]);
5416 struct ice_pf *pf;
5419 pf = pci_get_drvdata(pdev);
5421 if (!ice_pf_state_is_nominal(pf)) {
5432 disabled = ice_service_task_stop(pf);
5434 ice_unplug_aux_dev(pf);
5437 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5439 ice_service_task_restart(pf);
5443 if (test_bit(ICE_DOWN, pf->state) ||
5444 ice_is_reset_in_progress(pf->state)) {
5447 ice_service_task_restart(pf);
5451 ice_setup_mc_magic_wake(pf);
5453 ice_prepare_for_shutdown(pf);
5455 ice_set_wake(pf);
5462 ice_free_irq_msix_misc(pf);
5463 ice_for_each_vsi(pf, v) {
5464 if (!pf->vsi[v])
5466 ice_vsi_free_q_vectors(pf->vsi[v]);
5468 ice_clear_interrupt_scheme(pf);
5471 pci_wake_from_d3(pdev, pf->wol_ena);
5484 struct ice_pf *pf;
5501 pf = pci_get_drvdata(pdev);
5502 hw = &pf->hw;
5504 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5505 ice_print_wake_reason(pf);
5510 ret = ice_reinit_interrupt_scheme(pf);
5514 clear_bit(ICE_DOWN, pf->state);
5518 clear_bit(ICE_SERVICE_DIS, pf->state);
5520 if (ice_schedule_reset(pf, reset_type))
5523 clear_bit(ICE_SUSPENDED, pf->state);
5524 ice_service_task_restart(pf);
5527 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5544 struct ice_pf *pf = pci_get_drvdata(pdev);
5546 if (!pf) {
5552 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5553 ice_service_task_stop(pf);
5555 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5556 set_bit(ICE_PFR_REQ, pf->state);
5557 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5573 struct ice_pf *pf = pci_get_drvdata(pdev);
5590 reg = rd32(&pf->hw, GLGEN_RTRIG);
5609 struct ice_pf *pf = pci_get_drvdata(pdev);
5611 if (!pf) {
5617 if (test_bit(ICE_SUSPENDED, pf->state)) {
5623 ice_restore_all_vfs_msi_state(pf);
5625 ice_do_reset(pf, ICE_RESET_PFR);
5626 ice_service_task_restart(pf);
5627 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5636 struct ice_pf *pf = pci_get_drvdata(pdev);
5638 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5639 ice_service_task_stop(pf);
5641 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5642 set_bit(ICE_PFR_REQ, pf->state);
5643 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5803 struct ice_pf *pf = vsi->back;
5804 struct ice_hw *hw = &pf->hw;
5816 if (test_bit(ICE_DOWN, pf->state) ||
5817 ice_is_reset_in_progress(pf->state)) {
5823 if (ice_chnl_dmac_fltr_cnt(pf)) {
6300 struct ice_pf *pf = vsi->back;
6304 if (ice_is_safe_mode(pf)) {
6305 dev_err(ice_pf_to_dev(pf),
6311 if (ice_is_reset_in_progress(pf->state)) {
6312 dev_err(ice_pf_to_dev(pf),
6352 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6353 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6360 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6361 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6566 struct ice_pf *pf = vsi->back;
6589 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6598 ice_service_task_schedule(pf);
6681 struct ice_pf *pf = vsi->back;
6732 if (likely(pf->stat_prev_loaded)) {
6755 struct ice_pf *pf = vsi->back;
6758 test_bit(ICE_CFG_BUSY, pf->state))
6774 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6775 cur_ns->rx_errors = pf->stats.crc_errors +
6776 pf->stats.illegal_bytes +
6777 pf->stats.rx_undersize +
6778 pf->hw_csum_rx_error +
6779 pf->stats.rx_jabber +
6780 pf->stats.rx_fragments +
6781 pf->stats.rx_oversize;
6783 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6789 * @pf: PF whose stats needs to be updated
6791 void ice_update_pf_stats(struct ice_pf *pf)
6794 struct ice_hw *hw = &pf->hw;
6799 prev_ps = &pf->stats_prev;
6800 cur_ps = &pf->stats;
6802 if (ice_is_reset_in_progress(pf->state))
6803 pf->stat_prev_loaded = false;
6805 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6809 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6813 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6817 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6821 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6825 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6829 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6833 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6837 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6841 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6845 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6848 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6851 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6854 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6857 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6860 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6863 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6866 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6869 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6872 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6875 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6878 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6881 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6884 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6891 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6893 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6896 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6899 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6902 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6905 ice_update_dcb_stats(pf);
6907 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6910 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6913 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6917 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6921 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6924 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6927 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6930 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6933 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6935 pf->stat_prev_loaded = true;
7008 struct ice_pf *pf = vsi->back;
7009 struct ice_hw *hw = &pf->hw;
7206 struct ice_pf *pf = vsi->back;
7210 dev = ice_pf_to_dev(pf);
7262 struct ice_pf *pf = vsi->back;
7279 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7317 * @pf: PF from which all VSIs are being removed
7319 static void ice_vsi_release_all(struct ice_pf *pf)
7323 if (!pf->vsi)
7326 ice_for_each_vsi(pf, i) {
7327 if (!pf->vsi[i])
7330 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7333 err = ice_vsi_release(pf->vsi[i]);
7335 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7336 i, err, pf->vsi[i]->vsi_num);
7342 * @pf: pointer to the PF instance
7345 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7347 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7349 struct device *dev = ice_pf_to_dev(pf);
7352 ice_for_each_vsi(pf, i) {
7353 struct ice_vsi *vsi = pf->vsi[i];
7367 err = ice_replay_vsi(&pf->hw, vsi->idx);
7377 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7396 * @pf: pointer to the PF instance
7398 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7403 ice_for_each_vsi(pf, i) {
7404 struct ice_vsi *vsi = pf->vsi[i];
7409 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7411 netif_carrier_on(pf->vsi[i]->netdev);
7412 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7414 netif_carrier_off(pf->vsi[i]->netdev);
7415 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7422 * @pf: PF to rebuild
7430 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7432 struct device *dev = ice_pf_to_dev(pf);
7433 struct ice_hw *hw = &pf->hw;
7437 if (test_bit(ICE_DOWN, pf->state))
7448 pf->fw_emp_reset_disabled = false;
7460 if (!ice_is_safe_mode(pf)) {
7466 ice_load_pkg(NULL, pf);
7497 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7506 err = ice_req_irq_msix_misc(pf);
7512 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7527 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7528 ice_dcb_rebuild(pf);
7534 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7535 ice_ptp_rebuild(pf, reset_type);
7537 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7538 ice_gnss_init(pf);
7541 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7547 err = ice_eswitch_rebuild(pf);
7554 err = ice_rebuild_channels(pf);
7563 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7564 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7575 ice_fdir_replay_fltrs(pf);
7577 ice_rebuild_arfs(pf);
7580 ice_update_pf_netdev_link(pf);
7583 err = ice_send_version(pf);
7593 clear_bit(ICE_RESET_FAILED, pf->state);
7595 ice_plug_aux_dev(pf);
7596 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7597 ice_lag_rebuild(pf);
7600 ice_ptp_restore_timestamp_mode(pf);
7608 set_bit(ICE_RESET_FAILED, pf->state);
7611 set_bit(ICE_NEEDS_RESTART, pf->state);
7626 struct ice_pf *pf = vsi->back;
7645 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7655 if (ice_is_reset_in_progress(pf->state)) {
7675 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7689 struct ice_pf *pf = np->vsi->back;
7693 return ice_ptp_get_ts_config(pf, ifr);
7695 return ice_ptp_set_ts_config(pf, ifr);
7919 struct ice_pf *pf = vsi->back;
7922 bmode = pf->first_sw->bridge_mode;
7990 struct ice_pf *pf = np->vsi->back;
7992 struct ice_hw *hw = &pf->hw;
7996 pf_sw = pf->first_sw;
8016 ice_for_each_vsi(pf, v) {
8017 if (!pf->vsi[v])
8019 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8054 struct ice_pf *pf = vsi->back;
8057 pf->tx_timeout_count++;
8063 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8064 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8080 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8081 pf->tx_timeout_recovery_level = 1;
8082 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8087 struct ice_hw *hw = &pf->hw;
8100 pf->tx_timeout_last_recovery = jiffies;
8102 pf->tx_timeout_recovery_level, txqueue);
8104 switch (pf->tx_timeout_recovery_level) {
8106 set_bit(ICE_PFR_REQ, pf->state);
8109 set_bit(ICE_CORER_REQ, pf->state);
8112 set_bit(ICE_GLOBR_REQ, pf->state);
8116 set_bit(ICE_DOWN, pf->state);
8118 set_bit(ICE_SERVICE_DIS, pf->state);
8122 ice_service_task_schedule(pf);
8123 pf->tx_timeout_recovery_level++;
8186 struct ice_pf *pf = vsi->back;
8201 dev = ice_pf_to_dev(pf);
8314 * @pf: ptr to PF device
8317 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8319 struct device *dev = ice_pf_to_dev(pf);
8327 hw = &pf->hw;
8374 * @pf: ptr to PF device
8380 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8382 struct device *dev = ice_pf_to_dev(pf);
8390 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8396 ice_add_vsi_to_fdir(pf, vsi);
8488 * @pf: ptr to PF device
8498 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8501 struct device *dev = ice_pf_to_dev(pf);
8507 ret = ice_add_channel(pf, sw_id, ch);
8528 * @pf: ptr to PF device
8536 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8539 struct device *dev = ice_pf_to_dev(pf);
8548 sw_id = pf->first_sw->sw_id;
8551 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8589 struct ice_pf *pf = vsi->back;
8595 dev = ice_pf_to_dev(pf);
8607 if (!ice_setup_channel(pf, vsi, ch)) {
8632 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8637 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8644 &pf->tc_flower_fltr_list,
8656 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8659 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8662 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8672 pf->num_dmac_chnl_fltrs--;
8691 struct ice_pf *pf = vsi->back;
8696 ice_rem_all_chnl_fltrs(pf);
8700 struct ice_hw *hw = &pf->hw;
8738 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8761 * @pf: ptr to PF
8765 static int ice_rebuild_channels(struct ice_pf *pf)
8767 struct device *dev = ice_pf_to_dev(pf);
8775 main_vsi = ice_get_main_vsi(pf);
8779 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8794 ice_for_each_vsi(pf, i) {
8797 vsi = pf->vsi[i];
8814 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8817 err = ice_replay_vsi(&pf->hw, vsi->idx);
8881 struct ice_pf *pf = vsi->back;
8911 dev_err(ice_pf_to_dev(pf),
8918 dev_dbg(ice_pf_to_dev(pf),
8939 struct ice_pf *pf = vsi->back;
8946 dev = ice_pf_to_dev(pf);
8951 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8964 if (pf->hw.port_info->is_custom_tx_enabled) {
8968 ice_tear_down_devlink_rate_tree(pf);
8977 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8983 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8999 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9002 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9003 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9005 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9042 clear_bit(ICE_RESET_FAILED, pf->state);
9058 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9111 struct ice_pf *pf = np->vsi->back;
9122 if (ice_is_eswitch_mode_switchdev(pf)) {
9127 if (pf->adev) {
9128 mutex_lock(&pf->adev_mutex);
9129 device_lock(&pf->adev->dev);
9131 if (pf->adev->dev.driver) {
9139 mutex_lock(&pf->tc_mutex);
9141 mutex_unlock(&pf->tc_mutex);
9145 device_unlock(&pf->adev->dev);
9146 mutex_unlock(&pf->adev_mutex);
9285 struct ice_pf *pf = np->vsi->back;
9287 if (ice_is_reset_in_progress(pf->state)) {
9308 struct ice_pf *pf = vsi->back;
9312 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9326 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9330 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9331 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9347 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9376 struct ice_pf *pf = vsi->back;
9378 if (ice_is_reset_in_progress(pf->state)) {