Lines Matching refs:dp

127 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
131 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141 const char *ovs_dp_name(const struct datapath *dp)
143 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
147 static int get_dpifindex(const struct datapath *dp)
154 local = ovs_vport_rcu(dp, OVSP_LOCAL);
167 struct datapath *dp = container_of(rcu, struct datapath, rcu);
169 ovs_flow_tbl_destroy(&dp->table);
170 free_percpu(dp->stats_percpu);
171 kfree(dp->ports);
172 ovs_meters_exit(dp);
173 kfree(rcu_dereference_raw(dp->upcall_portids));
174 kfree(dp);
177 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
180 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
184 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
189 head = vport_hash_bucket(dp, port_no);
205 struct datapath *dp = parms->dp;
206 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
248 struct datapath *dp = p->dp;
257 stats = this_cpu_ptr(dp->stats_percpu);
260 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
268 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
270 ovs_dp_get_upcall_portid(dp, smp_processor_id());
275 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
293 error = ovs_execute_actions(dp, skb, sf_acts, key);
296 ovs_dp_name(dp), error);
309 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
318 trace_ovs_dp_upcall(dp, skb, key, upcall_info);
326 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
328 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
337 stats = this_cpu_ptr(dp->stats_percpu);
346 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
377 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
421 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
423 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
431 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
445 dp_ifindex = get_dpifindex(dp);
475 if (dp->user_features & OVS_DP_F_UNALIGNED)
575 pad_packet(dp, user_skb);
579 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
599 struct datapath *dp;
657 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
659 if (!dp)
662 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
664 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
674 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
722 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
729 stats->n_flows = ovs_flow_tbl_count(&dp->table);
730 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
739 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
973 struct datapath *dp;
1038 dp = get_dp(net, ovs_header->dp_ifindex);
1039 if (unlikely(!dp)) {
1046 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1048 flow = ovs_flow_tbl_lookup(&dp->table, key);
1053 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1088 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1220 struct datapath *dp;
1252 dp = get_dp(net, ovs_header->dp_ifindex);
1253 if (unlikely(!dp)) {
1259 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1261 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1322 struct datapath *dp;
1344 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1345 if (!dp) {
1351 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1353 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1381 struct datapath *dp;
1399 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1400 if (unlikely(!dp)) {
1406 err = ovs_flow_tbl_flush(&dp->table);
1411 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1413 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1419 ovs_flow_tbl_remove(&dp->table, flow);
1458 struct datapath *dp;
1469 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1470 if (!dp) {
1475 ti = rcu_dereference(dp->table.ti);
1564 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1570 struct dp_nlsk_pids *pids = ovsl_dereference(dp->upcall_portids);
1578 ovs_header->dp_ifindex = get_dpifindex(dp);
1580 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1584 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1594 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1598 ovs_flow_tbl_masks_cache_size(&dp->table)))
1601 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && pids) {
1626 struct datapath *dp;
1629 dp = get_dp(net, ovs_header->dp_ifindex);
1634 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1636 return dp ? dp : ERR_PTR(-ENODEV);
1642 struct datapath *dp;
1644 dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
1646 if (IS_ERR(dp))
1650 ovs_dp_name(dp));
1651 dp->user_features = 0;
1654 static int ovs_dp_set_upcall_portids(struct datapath *dp,
1662 old = ovsl_dereference(dp->upcall_portids);
1672 rcu_assign_pointer(dp->upcall_portids, dp_nlsk_pids);
1679 u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id)
1683 dp_nlsk_pids = rcu_dereference(dp->upcall_portids);
1706 static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1708 u32 user_features = 0, old_features = dp->user_features;
1731 err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
1736 dp->user_features = user_features;
1738 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU &&
1741 err = ovs_dp_set_upcall_portids(dp,
1747 if ((dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
1750 else if (!(dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
1757 static int ovs_dp_stats_init(struct datapath *dp)
1759 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1760 if (!dp->stats_percpu)
1766 static int ovs_dp_vport_init(struct datapath *dp)
1770 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1773 if (!dp->ports)
1777 INIT_HLIST_HEAD(&dp->ports[i]);
1787 struct datapath *dp;
1801 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1802 if (dp == NULL)
1805 ovs_dp_set_net(dp, sock_net(skb->sk));
1808 err = ovs_flow_tbl_init(&dp->table);
1812 err = ovs_dp_stats_init(dp);
1816 err = ovs_dp_vport_init(dp);
1820 err = ovs_meters_init(dp);
1828 parms.dp = dp;
1837 err = ovs_dp_change(dp, a);
1859 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1863 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1864 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1872 kfree(rcu_dereference_raw(dp->upcall_portids));
1875 ovs_meters_exit(dp);
1877 kfree(dp->ports);
1879 free_percpu(dp->stats_percpu);
1881 ovs_flow_tbl_destroy(&dp->table);
1883 kfree(dp);
1891 static void __dp_destroy(struct datapath *dp)
1893 struct flow_table *table = &dp->table;
1896 if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1903 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1908 list_del_rcu(&dp->list_node);
1913 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1916 * such as dp, ports and tables. That may avoid some issues
1923 call_rcu(&dp->rcu, destroy_dp_rcu);
1929 struct datapath *dp;
1937 dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
1939 err = PTR_ERR(dp);
1940 if (IS_ERR(dp))
1943 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1947 __dp_destroy(dp);
1963 struct datapath *dp;
1971 dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
1973 err = PTR_ERR(dp);
1974 if (IS_ERR(dp))
1977 err = ovs_dp_change(dp, info->attrs);
1981 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1999 struct datapath *dp;
2007 dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
2009 if (IS_ERR(dp)) {
2010 err = PTR_ERR(dp);
2013 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
2029 struct datapath *dp;
2034 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2036 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
2112 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
2182 struct datapath *dp;
2192 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2201 dp = get_dp(net, ovs_header->dp_ifindex);
2202 if (!dp)
2205 vport = ovs_vport_ovsl_rcu(dp, port_no);
2214 static unsigned int ovs_get_max_headroom(struct datapath *dp)
2222 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2235 static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2240 dp->max_headroom = new_headroom;
2242 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2255 struct datapath *dp;
2280 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2282 if (!dp)
2286 vport = ovs_vport_ovsl(dp, port_no);
2296 vport = ovs_vport_ovsl(dp, port_no);
2304 parms.dp = dp;
2324 if (new_headroom > dp->max_headroom)
2325 ovs_update_headroom(dp, new_headroom);
2327 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2399 struct datapath *dp;
2424 /* the vport deletion may trigger dp headroom update */
2425 dp = vport->dp;
2426 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2433 new_headroom = ovs_get_max_headroom(dp);
2435 if (new_headroom < dp->max_headroom)
2436 ovs_update_headroom(dp, new_headroom);
2483 struct datapath *dp;
2488 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2489 if (!dp) {
2497 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2525 struct datapath *dp;
2529 list_for_each_entry(dp, &ovs_net->dps, list_node)
2530 ovs_flow_masks_rebalance(&dp->table);
2650 struct datapath *dp;
2652 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2658 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2671 struct datapath *dp, *dp_next;
2681 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2682 __dp_destroy(dp);