Lines Matching defs:vf

38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
44 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
46 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
50 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
51 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
54 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
55 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
56 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
57 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
60 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
61 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
62 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
65 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
66 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
72 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
125 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
141 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
153 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
165 for (i = 0; i < vf->qtag.num_active; i++)
166 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
172 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
177 vf->vsi.seid = vsi_ctx.seid;
178 vf->vsi.vsi_num = vsi_ctx.vsi_number;
179 vf->vsi.num_rx_queues = vf->qtag.num_active;
180 vf->vsi.num_tx_queues = vf->qtag.num_active;
186 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
193 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
198 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
204 vf->vsi.flags |= IXL_FLAGS_IS_VF;
206 error = ixl_vf_alloc_vsi(pf, vf);
210 vf->vsi.dev = pf->dev;
212 ixl_init_filters(&vf->vsi);
214 error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
218 ixl_reconfigure_filters(&vf->vsi);
224 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
238 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
241 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
245 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
257 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
261 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
266 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
267 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
270 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
273 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
279 for (i = 0; i < vf->vsi.num_tx_queues; i++)
280 ixl_vf_map_vsi_queue(hw, vf, i,
281 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
285 ixl_vf_map_vsi_queue(hw, vf, i,
322 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
330 ixl_vf_vsi_release(pf, &vf->vsi);
333 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
336 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
341 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
344 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
348 vf->vsi.num_tx_queues = 0;
349 vf->vsi.num_rx_queues = 0;
353 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
361 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
376 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
383 ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
385 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
387 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
390 ixl_reinit_vf(pf, vf);
392 ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
396 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
404 error = ixl_flush_pcie(pf, vf);
408 vf->vf_num);
413 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
419 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
421 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
423 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
425 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
427 if (vf->vsi.seid != 0)
428 ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
429 ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
431 ixl_vf_release_resources(pf, vf);
432 ixl_vf_setup_vsi(pf, vf);
433 ixl_vf_map_queues(pf, vf);
435 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
451 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
458 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
462 ixl_vc_opcode_str(op), op, status, vf->vf_num);
468 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
471 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
475 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
482 status, vf->vf_num, file, line);
483 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
487 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
497 vf->version.major = 1;
498 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
500 vf->version.major = VIRTCHNL_VERSION_MAJOR;
501 vf->version.minor = VIRTCHNL_VERSION_MINOR;
507 __func__, vf->vf_num,
512 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
513 &vf->version, sizeof(vf->version));
517 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
520 ixl_reset_vf(pf, vf);
526 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
533 if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
545 reply.num_queue_pairs = vf->vsi.num_tx_queues;
549 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
551 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
552 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
554 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
559 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
569 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
570 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
574 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
585 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
598 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
604 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
613 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
617 vf->vf_num, global_queue_num, info->queue_id);
665 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
671 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
679 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
681 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
682 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
687 if (info->vsi_id != vf->vsi.vsi_num) {
689 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
690 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
698 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
699 pair->rxq.vsi_id != vf->vsi.vsi_num ||
701 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
703 i40e_send_vf_nack(pf, vf,
708 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
709 i40e_send_vf_nack(pf, vf,
714 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
715 i40e_send_vf_nack(pf, vf,
721 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
754 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
780 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
789 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
797 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
800 vf->vf_num);
809 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
824 vector->vsi_id != vf->vsi.vsi_num) {
825 i40e_send_vf_nack(pf, vf,
832 if (largest_rxq >= vf->vsi.num_rx_queues) {
833 i40e_send_vf_nack(pf, vf,
842 if (largest_txq >= vf->vsi.num_tx_queues) {
843 i40e_send_vf_nack(pf, vf,
852 i40e_send_vf_nack(pf, vf,
858 ixl_vf_config_vector(pf, vf, vector);
861 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
865 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
873 if (select->vsi_id != vf->vsi.vsi_num ||
875 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
884 if (i >= vf->vsi.num_tx_queues) {
886 vf->vf_num, i);
890 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
893 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
895 vf->vf_num, i);
897 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
901 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
909 if (i >= vf->vsi.num_rx_queues) {
911 vf->vf_num, i);
915 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
918 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
920 vf->vf_num, i);
921 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
925 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
930 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
935 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
939 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
947 if (select->vsi_id != vf->vsi.vsi_num ||
949 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
958 if (i >= vf->vsi.num_tx_queues) {
960 vf->vf_num, i);
964 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
967 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
969 vf->vf_num, i);
972 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
976 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
984 if (i >= vf->vsi.num_rx_queues) {
986 vf->vf_num, i);
990 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
993 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
995 vf->vf_num, i);
998 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1002 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1007 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1012 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1016 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1027 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1028 !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1035 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1043 vsi = &vf->vsi;
1047 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1053 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1054 i40e_send_vf_nack(pf, vf,
1065 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1069 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1077 vsi = &vf->vsi;
1081 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1089 i40e_send_vf_nack(pf, vf,
1097 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1100 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1104 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1108 vsi_ctx.seid = vf->vsi.seid;
1118 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1127 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1128 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1133 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1134 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1141 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1147 code = ixl_vf_enable_vlan_strip(pf, vf);
1149 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1154 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1156 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1160 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1168 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1169 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1176 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1182 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1183 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1189 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1191 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1195 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1202 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1206 ixl_send_vf_ack(pf, vf,
1212 if (info->vsi_id != vf->vsi.vsi_num) {
1213 i40e_send_vf_nack(pf, vf,
1218 code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1222 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1224 i40e_send_vf_nack(pf, vf,
1229 code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1233 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1235 i40e_send_vf_nack(pf, vf,
1240 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1244 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1250 if (queue->vsi_id != vf->vsi.vsi_num) {
1251 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1256 ixl_update_eth_stats(&vf->vsi);
1258 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1259 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1263 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1277 vf->vf_num, key->key_len, 52);
1278 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1283 if (key->vsi_id != vf->vsi.vsi_num) {
1285 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1286 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1300 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1304 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1310 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1314 vf->vf_num, key->key[0]);
1316 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1320 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1333 vf->vf_num, lut->lut_entries, 64);
1334 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1339 if (lut->vsi_id != vf->vsi.vsi_num) {
1341 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1342 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1349 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1353 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1359 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1363 vf->vf_num, lut->lut[0], lut->lut_entries);
1365 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1369 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1379 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1380 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1383 vf->vf_num, hena->hena);
1385 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1389 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1401 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1418 struct ixl_vf *vf;
1432 vf = &pf->vfs[vf_num];
1439 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1443 err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1446 __func__, vf->vf_num, opcode, msg_size, err);
1447 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1452 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1457 ixl_vf_version_msg(pf, vf, msg, msg_size);
1460 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1463 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1467 ixl_notify_vf_link_state(pf, vf);
1470 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1473 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1476 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1480 ixl_notify_vf_link_state(pf, vf);
1483 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1486 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1489 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1492 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1495 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1498 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1501 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1504 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1507 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1510 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1517 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1526 struct ixl_vf *vf;
1546 vf = &pf->vfs[i];
1547 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1558 ixl_reinit_vf(pf, vf);
1747 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1755 num_queues, vf->vf_num);
1757 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1760 device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1763 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1766 num_queues, vf->vf_num);
1771 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1782 struct ixl_vf *vf;
1788 vf = &pf->vfs[vfnum];
1789 vf->vf_num = vfnum;
1790 vf->vsi.back = pf;
1791 vf->vf_flags = VF_FLAG_ENABLED;
1795 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1799 error = ixl_vf_setup_vsi(pf, vf);
1805 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1808 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1814 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1817 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1820 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1822 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1825 ixl_reset_vf(pf, vf);
1828 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1829 ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);