Searched refs:offloads (Results 1 - 25 of 34) sorted by relevance

12

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/
H A Dingress_ofld.c43 if (vport->ingress.offloads.modify_metadata_rule) {
45 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
82 vport->ingress.offloads.modify_metadata =
85 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
86 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
94 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
95 flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
96 vport->ingress.offloads.modify_metadata_rule =
99 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
100 err = PTR_ERR(vport->ingress.offloads
[all...]
H A Degress_ofld.c11 if (!vport->egress.offloads.fwd_rule)
14 mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
15 vport->egress.offloads.fwd_rule = NULL;
21 xa_load(&vport->egress.offloads.bounce_rules, rule_index);
27 xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
35 xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) {
37 xa_erase(&vport->egress.offloads.bounce_rules, i);
56 vport->egress.offloads.fwd_rule =
59 if (IS_ERR(vport->egress.offloads.fwd_rule)) {
60 err = PTR_ERR(vport->egress.offloads
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/esw/
H A Dvporttbl.c27 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
73 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
90 mutex_lock(&esw->fdb_table.offloads.vports.lock);
119 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
121 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
127 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
138 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
H A Dindir_table.c293 hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
316 hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
330 mutex_lock(&esw->fdb_table.offloads.indir->lock);
348 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
352 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
361 mutex_lock(&esw->fdb_table.offloads.indir->lock);
381 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
H A Dipsec_fs.c153 xa_for_each(&esw->offloads.vport_reps, i, rep) {
H A Ddevlink_port.c38 controller_num = dev->priv.eswitch->offloads.host_number + 1;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Deswitch_offloads.c57 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
79 return xa_load(&esw->offloads.vport_reps, vport_num);
434 if (esw->offloads.ft_ipsec_tx_pol &&
451 if (!esw->offloads.ft_ipsec_tx_pol)
496 dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
770 atomic64_inc(&esw->offloads.num_flows);
856 atomic64_inc(&esw->offloads.num_flows);
891 atomic64_dec(&esw->offloads.num_flows);
993 on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
994 dest.ft = on_esw->offloads
2013 struct mlx5_esw_offload *offloads = &esw->offloads; local
2224 struct mlx5_esw_offload *offloads = &esw->offloads; local
[all...]
H A Deswitch_offloads_termtbl.c122 mutex_lock(&esw->offloads.termtbl_mutex);
124 hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
151 hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
154 mutex_unlock(&esw->offloads.termtbl_mutex);
158 mutex_unlock(&esw->offloads.termtbl_mutex);
166 mutex_lock(&esw->offloads.termtbl_mutex);
169 mutex_unlock(&esw->offloads.termtbl_mutex);
H A Deswitch.c1458 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1876 mutex_init(&esw->offloads.encap_tbl_lock);
1877 hash_init(esw->offloads.encap_tbl);
1878 mutex_init(&esw->offloads.decap_tbl_lock);
1879 hash_init(esw->offloads.decap_tbl);
1880 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1881 atomic64_set(&esw->offloads.num_flows, 0);
1882 ida_init(&esw->offloads.vport_metadata_ida);
1883 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1890 esw->offloads
[all...]
H A Deswitch.h87 ((esw)->fdb_table.offloads.esw_chains_priv)
123 } offloads; member in struct:vport_ingress
147 } offloads; member in union:vport_egress::__anon1486
267 } offloads; member in union:mlx5_eswitch_fdb::__anon1490
373 struct mlx5_esw_offload offloads; member in struct:mlx5_eswitch
862 return esw->fdb_table.offloads.slow_fdb;
/linux-master/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_ctrl_net.h117 /* offloads */
119 /* supported rx offloads OCTEP_RX_OFFLOAD_* */
121 /* supported tx offloads OCTEP_TX_OFFLOAD_* */
125 /* extra offloads */
129 /* get/set offloads */
134 struct octep_ctrl_net_offloads offloads; member in struct:octep_ctrl_net_h2f_req_cmd_offloads
146 struct octep_ctrl_net_h2f_req_cmd_offloads offloads; member in union:octep_ctrl_net_h2f_req::__anon1407
204 struct octep_ctrl_net_offloads offloads; member in union:octep_ctrl_net_h2f_resp::__anon1409
408 * octep_ctrl_net_set_offloads() - Set offloads in firmware.
412 * @offloads
[all...]
H A Doctep_ctrl_net.c419 struct octep_ctrl_net_offloads *offloads,
428 req->offloads.cmd = OCTEP_CTRL_NET_CMD_SET;
429 req->offloads.offloads = *offloads;
418 octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid, struct octep_ctrl_net_offloads *offloads, bool wait_for_response) argument
H A Doctep_main.c1107 struct octep_ctrl_net_offloads offloads = { 0 }; local
1116 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
1119 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
1122 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
1125 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
1128 offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM;
1132 &offloads,
H A Doctep_pfvf_mbox.c224 struct octep_ctrl_net_offloads offloads = { local
230 err = octep_ctrl_net_set_offloads(oct, vf_id, &offloads, true);
233 dev_err(&oct->pdev->dev, "Set VF offloads failed via host control Mbox\n");
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtc_tun_encap.c412 mutex_lock(&esw->offloads.encap_tbl_lock);
429 mutex_unlock(&esw->offloads.encap_tbl_lock);
487 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
491 mutex_unlock(&esw->offloads.encap_tbl_lock);
500 lockdep_assert_held(&esw->offloads.encap_tbl_lock);
513 if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
516 mutex_unlock(&esw->offloads.decap_tbl_lock);
544 mutex_lock(&esw->offloads.encap_tbl_lock);
548 mutex_unlock(&esw->offloads.encap_tbl_lock);
553 mutex_unlock(&esw->offloads
[all...]
/linux-master/net/ipv4/
H A Dfou_core.c235 const struct net_offload __rcu **offloads; local
251 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
252 ops = rcu_dereference(offloads[proto]);
265 const struct net_offload __rcu **offloads; local
270 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
271 ops = rcu_dereference(offloads[proto]);
310 const struct net_offload __rcu **offloads; local
434 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
435 ops = rcu_dereference(offloads[proto]);
451 const struct net_offload __rcu **offloads; local
[all...]
H A Dudp_offload.c157 const struct net_offload __rcu **offloads; local
172 offloads = is_ipv6 ? inet6_offloads : inet_offloads;
173 ops = rcu_dereference(offloads[skb->inner_ipproto]);
/linux-master/kernel/bpf/
H A Doffload.c115 list_del_init(&offload->offloads);
141 list_del_init(&offmap->offloads);
169 list_for_each_entry(offload, &ondev->progs, offloads)
173 list_for_each_entry(offmap, &ondev->maps, offloads)
177 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
179 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
220 list_add_tail(&offload->offloads, &ondev->progs);
389 list_del_init(&prog->aux->offload->offloads);
549 list_add_tail(&offmap->offloads, &ondev->maps);
/linux-master/drivers/net/hyperv/
H A Drndis_filter.c1353 struct ndis_offload_params offloads; local
1363 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1366 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1382 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1385 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1393 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1401 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1406 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1414 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1423 offloads
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/tc/
H A Dint_port.c93 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
96 mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
182 ctx = esw->offloads.reg_c0_obj_pool;
233 ctx = esw->offloads.reg_c0_obj_pool;
H A Dsample.c525 err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
577 mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
607 mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_virtchnl.c380 /* VLAN offloads based on current device configuration */
385 * and offloads being based on the inner VLAN or the
747 * RSS offloads
2206 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2827 supported_caps = &caps->offloads.stripping_support;
2833 supported_caps = &caps->offloads.insertion_support;
2839 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2840 caps->offloads.ethertype_match =
2853 supported_caps = &caps->offloads.stripping_support;
2864 supported_caps = &caps->offloads
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/rep/
H A Dtc.c101 mutex_lock(&esw->offloads.encap_tbl_lock);
127 mutex_unlock(&esw->offloads.encap_tbl_lock);
723 mapping_ctx = esw->offloads.reg_c0_obj_pool;
/linux-master/drivers/net/ethernet/intel/idpf/
H A Didpf_lib.c722 netdev_features_t offloads = 0; local
805 offloads |= NETIF_F_GRO_HW;
806 /* advertise to stack only if offloads for encapsulated packets is
811 offloads |= NETIF_F_GSO_UDP_TUNNEL |
826 offloads |= NETIF_F_TSO_MANGLEID;
829 offloads |= NETIF_F_LOOPBACK;
832 netdev->hw_features |= dflt_features | offloads;
833 netdev->hw_enc_features |= dflt_features | offloads;
H A Didpf_singleq_txrx.c7 * idpf_tx_singleq_csum - Enable tx checksum offloads
120 /* Enable IP checksum offloads */
146 /* Enable L4 checksum offloads */
149 /* enable checksum offloads */
183 * @offloads: pointer to struct that holds offload parameters
191 struct idpf_tx_offload_params *offloads)
193 u32 offsets = offloads->hdr_offsets;
197 u64 td_cmd = offloads->td_cmd;
189 idpf_tx_singleq_map(struct idpf_queue *tx_q, struct idpf_tx_buf *first, struct idpf_tx_offload_params *offloads) argument

Completed in 524 milliseconds

12