Lines Matching refs:vsi

9  * @vsi: VSI to check
11 static bool ice_is_arfs_active(struct ice_vsi *vsi)
13 return !!vsi->arfs_fltr_list;
29 struct ice_vsi *vsi;
31 vsi = ice_get_main_vsi(pf);
32 if (!vsi)
35 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
55 * @vsi: VSI that aRFS is active on
60 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
63 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
91 dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
98 * @vsi: VSI for the flow rules that need to be deleted
106 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
112 dev = ice_pf_to_dev(vsi->back);
117 result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
120 ice_arfs_update_active_fltr_cntrs(vsi, e, false);
134 * @vsi: VSI for the flow rules that need to be added
143 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
149 dev = ice_pf_to_dev(vsi->back);
154 result = ice_fdir_write_fltr(vsi->back,
158 ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
174 * @vsi: VSI containing the aRFS entry
182 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
185 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
202 * @vsi: the VSI to be forwarded to
215 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
223 dev = ice_pf_to_dev(vsi->back);
226 hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
246 if (ice_arfs_is_flow_expired(vsi, e)) {
293 * @vsi: destination VSI for this flow
301 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
308 arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
317 fltr_info->dest_vsi = vsi->idx;
344 atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
402 struct ice_vsi *vsi = np->vsi;
411 if (unlikely(!vsi->arfs_fltr_list))
414 pf = vsi->back;
441 spin_lock_bh(&vsi->arfs_lock);
442 hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
460 ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
464 arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
472 hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
476 spin_unlock_bh(&vsi->arfs_lock);
482 * @vsi: VSI that aRFS counters need to be initialized on
484 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
486 if (!vsi || vsi->type != ICE_VSI_PF)
489 vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
491 if (!vsi->arfs_fltr_cntrs)
494 vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
496 if (!vsi->arfs_last_fltr_id) {
497 kfree(vsi->arfs_fltr_cntrs);
498 vsi->arfs_fltr_cntrs = NULL;
507 * @vsi: the VSI to be forwarded to
509 void ice_init_arfs(struct ice_vsi *vsi)
514 if (!vsi || vsi->type != ICE_VSI_PF)
522 if (ice_init_arfs_cntrs(vsi))
528 spin_lock_init(&vsi->arfs_lock);
530 vsi->arfs_fltr_list = arfs_fltr_list;
540 * @vsi: the VSI to be forwarded to
542 void ice_clear_arfs(struct ice_vsi *vsi)
547 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
548 !vsi->arfs_fltr_list)
551 dev = ice_pf_to_dev(vsi->back);
556 spin_lock_bh(&vsi->arfs_lock);
557 hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
562 spin_unlock_bh(&vsi->arfs_lock);
565 kfree(vsi->arfs_fltr_list);
566 vsi->arfs_fltr_list = NULL;
567 kfree(vsi->arfs_last_fltr_id);
568 vsi->arfs_last_fltr_id = NULL;
569 kfree(vsi->arfs_fltr_cntrs);
570 vsi->arfs_fltr_cntrs = NULL;
575 * @vsi: the VSI to be forwarded to
577 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
581 if (!vsi || vsi->type != ICE_VSI_PF)
584 netdev = vsi->netdev;
594 * @vsi: the VSI to be forwarded to
596 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
602 if (!vsi || vsi->type != ICE_VSI_PF)
605 pf = vsi->back;
606 netdev = vsi->netdev;
607 if (!pf || !netdev || !vsi->num_q_vectors)
610 netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
611 vsi->type, netdev->name, vsi->num_q_vectors);
613 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
617 ice_for_each_q_vector(vsi, i)
619 vsi->q_vectors[i]->irq.virq)) {
620 ice_free_cpu_rx_rmap(vsi);