Lines Matching refs:rule

5178 			       struct hclge_fd_rule *rule)
5180 hlist_del(&rule->rule_node);
5181 kfree(rule);
5193 /* 1) if the new state is TO_ADD, just replace the old rule
5195 * new rule will be configured to the hardware.
5196 * 2) if the new state is ACTIVE, it means the new rule
5198 * the old rule node with the same location.
5200 * unnecessary to update the rule number and fd_bmap.
5212 /* if new request is TO_DEL, and old rule is existent
5213 * 1) the state of old rule is TO_DEL, we need do nothing,
5214 * because we delete rule by location, other rule content
5216 * 2) the state of old rule is ACTIVE, we need to change its
5217 * state to TO_DEL, so the rule will be deleted when periodic
5219 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5220 * been added to hardware, so we just delete the rule node from
5237 struct hclge_fd_rule *rule;
5240 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5241 if (rule->location == location)
5242 return rule;
5243 else if (rule->location > location)
5248 *parent = rule;
5254 /* insert fd rule node in ascend order according to rule->location */
5256 struct hclge_fd_rule *rule,
5259 INIT_HLIST_NODE(&rule->rule_node);
5262 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5264 hlist_add_head(&rule->rule_node, hlist);
5322 struct hclge_fd_rule *rule)
5329 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5330 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5334 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5335 info = &rule->ep.user_def;
5343 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5352 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5358 struct hclge_fd_rule *rule)
5362 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5363 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5366 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5368 cfg->offset = rule->ep.user_def.offset;
5375 struct hclge_fd_rule *rule)
5379 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5380 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5383 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5412 /* it's unlikely to fail here, because we have checked the rule
5417 "failed to delete fd rule %u, it's inexistent\n",
5571 * dst_vport is used to specify the rule
5675 struct hclge_fd_rule *rule)
5681 u8 *p = (u8 *)rule;
5684 if (rule->unused_tuple & BIT(tuple_bit))
5757 struct hclge_fd_rule *rule)
5774 rule->vf_id, 0);
5798 struct hclge_fd_rule *rule)
5821 cur_key_y, rule);
5834 rule);
5836 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5841 rule->queue_id, ret);
5845 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5850 rule->queue_id, ret);
5855 struct hclge_fd_rule *rule)
5862 ad_data.ad_id = rule->location;
5864 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5866 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5869 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5871 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5874 ad_data.queue_id = rule->queue_id;
5879 ad_data.counter_id = rule->vf_id %
5890 ad_data.rule_id = rule->location;
6166 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6227 struct hclge_fd_rule *rule, u8 ip_proto)
6229 rule->tuples.src_ip[IPV4_INDEX] =
6231 rule->tuples_mask.src_ip[IPV4_INDEX] =
6234 rule->tuples.dst_ip[IPV4_INDEX] =
6236 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6239 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6240 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6242 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6243 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6245 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6246 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6248 rule->tuples.ether_proto = ETH_P_IP;
6249 rule->tuples_mask.ether_proto = 0xFFFF;
6251 rule->tuples.ip_proto = ip_proto;
6252 rule->tuples_mask.ip_proto = 0xFF;
6256 struct hclge_fd_rule *rule)
6258 rule->tuples.src_ip[IPV4_INDEX] =
6260 rule->tuples_mask.src_ip[IPV4_INDEX] =
6263 rule->tuples.dst_ip[IPV4_INDEX] =
6265 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6268 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6269 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6271 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6272 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6274 rule->tuples.ether_proto = ETH_P_IP;
6275 rule->tuples_mask.ether_proto = 0xFFFF;
6279 struct hclge_fd_rule *rule, u8 ip_proto)
6281 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6283 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6286 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6288 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6291 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6292 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6294 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6295 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6297 rule->tuples.ether_proto = ETH_P_IPV6;
6298 rule->tuples_mask.ether_proto = 0xFFFF;
6300 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6301 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6303 rule->tuples.ip_proto = ip_proto;
6304 rule->tuples_mask.ip_proto = 0xFF;
6308 struct hclge_fd_rule *rule)
6310 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6312 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6315 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6317 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6320 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6321 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6323 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6324 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6326 rule->tuples.ether_proto = ETH_P_IPV6;
6327 rule->tuples_mask.ether_proto = 0xFFFF;
6331 struct hclge_fd_rule *rule)
6333 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6334 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6336 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6337 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6339 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6340 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6344 struct hclge_fd_rule *rule)
6348 rule->tuples.l2_user_def = info->data;
6349 rule->tuples_mask.l2_user_def = info->data_mask;
6352 rule->tuples.l3_user_def = info->data;
6353 rule->tuples_mask.l3_user_def = info->data_mask;
6356 rule->tuples.l4_user_def = (u32)info->data << 16;
6357 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6363 rule->ep.user_def = *info;
6367 struct hclge_fd_rule *rule,
6374 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
6377 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
6380 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
6383 hclge_fd_get_ip4_tuple(fs, rule);
6386 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
6389 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
6392 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
6395 hclge_fd_get_ip6_tuple(fs, rule);
6398 hclge_fd_get_ether_tuple(fs, rule);
6405 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6406 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6407 hclge_fd_get_user_def_tuple(info, rule);
6411 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6412 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6419 struct hclge_fd_rule *rule)
6423 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6427 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6431 struct hclge_fd_rule *rule)
6437 if (hdev->fd_active_type != rule->rule_type &&
6442 rule->rule_type, hdev->fd_active_type);
6447 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6455 ret = hclge_fd_config_rule(hdev, rule);
6459 rule->state = HCLGE_FD_ACTIVE;
6460 hdev->fd_active_type = rule->rule_type;
6461 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6523 struct hclge_fd_rule *rule;
6551 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6552 if (!rule)
6555 ret = hclge_fd_get_tuple(fs, rule, &info);
6557 kfree(rule);
6561 rule->flow_type = fs->flow_type;
6562 rule->location = fs->location;
6563 rule->unused_tuple = unused;
6564 rule->vf_id = dst_vport_id;
6565 rule->queue_id = q_index;
6566 rule->action = action;
6567 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6569 ret = hclge_add_fd_entry_common(hdev, rule);
6571 kfree(rule);
6596 "Delete fail, rule %u is inexistent\n", fs->location);
6616 struct hclge_fd_rule *rule;
6628 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6630 hlist_del(&rule->rule_node);
6631 kfree(rule);
6655 struct hclge_fd_rule *rule;
6670 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6671 if (rule->state == HCLGE_FD_ACTIVE)
6672 rule->state = HCLGE_FD_TO_ADD;
6695 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6699 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6700 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6701 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6703 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6704 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6705 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6707 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6708 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6709 0 : cpu_to_be16(rule->tuples_mask.src_port);
6711 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6712 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6713 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6715 spec->tos = rule->tuples.ip_tos;
6716 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6717 0 : rule->tuples_mask.ip_tos;
6720 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6724 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6725 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6726 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6728 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6729 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6730 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6732 spec->tos = rule->tuples.ip_tos;
6733 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6734 0 : rule->tuples_mask.ip_tos;
6736 spec->proto = rule->tuples.ip_proto;
6737 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6738 0 : rule->tuples_mask.ip_proto;
6743 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6748 rule->tuples.src_ip, IPV6_SIZE);
6750 rule->tuples.dst_ip, IPV6_SIZE);
6751 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6754 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6757 if (rule->unused_tuple & BIT(INNER_DST_IP))
6760 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6763 spec->tclass = rule->tuples.ip_tos;
6764 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6765 0 : rule->tuples_mask.ip_tos;
6767 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6768 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6769 0 : cpu_to_be16(rule->tuples_mask.src_port);
6771 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6772 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6773 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6776 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6780 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6781 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6782 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6786 rule->tuples_mask.src_ip, IPV6_SIZE);
6788 if (rule->unused_tuple & BIT(INNER_DST_IP))
6792 rule->tuples_mask.dst_ip, IPV6_SIZE);
6794 spec->tclass = rule->tuples.ip_tos;
6795 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6796 0 : rule->tuples_mask.ip_tos;
6798 spec->l4_proto = rule->tuples.ip_proto;
6799 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6800 0 : rule->tuples_mask.ip_proto;
6803 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6807 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6808 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6810 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6813 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6815 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6818 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6820 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6821 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6822 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6826 struct hclge_fd_rule *rule)
6828 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6835 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6836 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6839 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6844 struct hclge_fd_rule *rule)
6847 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6849 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6850 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6852 hclge_fd_get_user_def_info(fs, rule);
6856 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6857 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6861 rule->tuples_mask.dst_mac);
6868 struct hclge_fd_rule *rule = NULL;
6871 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6872 if (rule->location == location)
6873 return rule;
6874 else if (rule->location > location)
6882 struct hclge_fd_rule *rule)
6884 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6889 fs->ring_cookie = rule->queue_id;
6890 vf_id = rule->vf_id;
6900 struct hclge_fd_rule *rule = NULL;
6911 rule = hclge_get_fd_rule(hdev, fs->location);
6912 if (!rule) {
6917 fs->flow_type = rule->flow_type;
6922 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6926 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6932 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6936 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6939 /* The flow type of fd rule has been checked before adding in to rule
6944 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6949 hclge_fd_get_ext_info(fs, rule);
6951 hclge_fd_get_ring_cookie(fs, rule);
6963 struct hclge_fd_rule *rule;
6973 hlist_for_each_entry_safe(rule, node2,
6980 if (rule->state == HCLGE_FD_TO_DEL)
6983 rule_locs[cnt] = rule->location;
7017 /* traverse all rules, check whether an existed rule has the same tuples */
7022 struct hclge_fd_rule *rule = NULL;
7025 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7026 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7027 return rule;
7034 struct hclge_fd_rule *rule)
7036 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7039 rule->action = 0;
7040 rule->vf_id = 0;
7041 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7042 rule->state = HCLGE_FD_TO_ADD;
7045 rule->flow_type = TCP_V4_FLOW;
7047 rule->flow_type = UDP_V4_FLOW;
7050 rule->flow_type = TCP_V6_FLOW;
7052 rule->flow_type = UDP_V6_FLOW;
7054 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7055 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7064 struct hclge_fd_rule *rule;
7070 /* when there is already fd rule existed add by user,
7087 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7088 if (!rule) {
7095 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7096 if (!rule) {
7101 rule->location = bit_id;
7102 rule->arfs.flow_id = flow_id;
7103 rule->queue_id = queue_id;
7104 hclge_fd_build_arfs_rule(&new_tuples, rule);
7105 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7107 } else if (rule->queue_id != queue_id) {
7108 rule->queue_id = queue_id;
7109 rule->state = HCLGE_FD_TO_ADD;
7114 return rule->location;
7121 struct hclge_fd_rule *rule;
7129 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7130 if (rule->state != HCLGE_FD_ACTIVE)
7132 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7133 rule->arfs.flow_id, rule->location)) {
7134 rule->state = HCLGE_FD_TO_DEL;
7146 struct hclge_fd_rule *rule;
7153 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7154 switch (rule->state) {
7158 rule->location, NULL, false);
7163 hclge_fd_dec_rule_cnt(hdev, rule->location);
7164 hlist_del(&rule->rule_node);
7165 kfree(rule);
7178 struct hclge_fd_rule *rule)
7192 rule->tuples.ether_proto = ethtype_key;
7193 rule->tuples_mask.ether_proto = ethtype_mask;
7194 rule->tuples.ip_proto = match.key->ip_proto;
7195 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7197 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7198 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7203 struct hclge_fd_rule *rule)
7209 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7210 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7211 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7212 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7214 rule->unused_tuple |= BIT(INNER_DST_MAC);
7215 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7220 struct hclge_fd_rule *rule)
7226 rule->tuples.vlan_tag1 = match.key->vlan_id |
7228 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7231 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7236 struct hclge_fd_rule *rule,
7255 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7256 rule->tuples_mask.src_ip[IPV4_INDEX] =
7258 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7259 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7265 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7267 be32_to_cpu_array(rule->tuples_mask.src_ip,
7269 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7271 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7274 rule->unused_tuple |= BIT(INNER_SRC_IP);
7275 rule->unused_tuple |= BIT(INNER_DST_IP);
7282 struct hclge_fd_rule *rule)
7289 rule->tuples.src_port = be16_to_cpu(match.key->src);
7290 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7291 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7292 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7294 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7295 rule->unused_tuple |= BIT(INNER_DST_PORT);
7301 struct hclge_fd_rule *rule)
7321 hclge_get_cls_key_basic(flow, rule);
7322 hclge_get_cls_key_mac(flow, rule);
7323 hclge_get_cls_key_vlan(flow, rule);
7325 ret = hclge_get_cls_key_ip(flow, rule, extack);
7329 hclge_get_cls_key_port(flow, rule);
7365 struct hclge_fd_rule *rule;
7381 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7382 if (!rule)
7385 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7387 kfree(rule);
7391 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7392 rule->cls_flower.tc = tc;
7393 rule->location = cls_flower->common.prio - 1;
7394 rule->vf_id = 0;
7395 rule->cls_flower.cookie = cls_flower->cookie;
7396 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7398 ret = hclge_add_fd_entry_common(hdev, rule);
7400 kfree(rule);
7408 struct hclge_fd_rule *rule;
7411 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7412 if (rule->cls_flower.cookie == cookie)
7413 return rule;
7424 struct hclge_fd_rule *rule;
7432 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7433 if (!rule) {
7438 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7441 /* if tcam config fail, set rule state to TO_DEL,
7442 * so the rule will be deleted when periodic
7445 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7451 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7459 struct hclge_fd_rule *rule;
7468 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7469 switch (rule->state) {
7471 ret = hclge_fd_config_rule(hdev, rule);
7474 rule->state = HCLGE_FD_ACTIVE;
7478 rule->location, NULL, false);
7481 hclge_fd_dec_rule_cnt(hdev, rule->location);
7482 hclge_fd_free_node(hdev, rule);