Lines Matching defs:flow

21 flow_offload_fill_dir(struct flow_offload *flow,
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
54 struct flow_offload *flow;
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
60 if (!flow)
64 flow->ct = ct;
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
74 return flow;
96 static int flow_offload_fill_route(struct flow_offload *flow,
100 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
147 static void nft_flow_dst_release(struct flow_offload *flow,
150 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
151 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
152 dst_release(flow->tuplehash[dir].tuple.dst_cache);
155 void flow_offload_route_init(struct flow_offload *flow,
158 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
159 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160 flow->type = NF_FLOW_OFFLOAD_ROUTE;
202 static void flow_offload_route_release(struct flow_offload *flow)
204 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
205 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
208 void flow_offload_free(struct flow_offload *flow)
210 switch (flow->type) {
212 flow_offload_route_release(flow);
217 nf_ct_put(flow->ct);
218 kfree_rcu(flow, rcu_head);
256 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
259 struct net *net = nf_ct_net(flow->ct);
260 int l4num = nf_ct_protonum(flow->ct);
275 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
279 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
282 &flow->tuplehash[0].node,
288 &flow->tuplehash[1].node,
292 &flow->tuplehash[0].node,
297 nf_ct_offload_timeout(flow->ct);
300 __set_bit(NF_FLOW_HW, &flow->flags);
301 nf_flow_offload_add(flow_table, flow);
309 struct flow_offload *flow, bool force)
313 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
314 if (force || timeout - READ_ONCE(flow->timeout) > HZ)
315 WRITE_ONCE(flow->timeout, timeout);
322 nf_flow_offload_add(flow_table, flow);
326 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
328 return nf_flow_timeout_delta(flow->timeout) <= 0;
332 struct flow_offload *flow)
335 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
338 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
340 flow_offload_free(flow);
343 void flow_offload_teardown(struct flow_offload *flow)
345 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
346 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
347 flow_offload_fixup_ct(flow->ct);
356 struct flow_offload *flow;
365 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
366 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
369 if (unlikely(nf_ct_is_dying(flow->ct)))
379 struct flow_offload *flow, void *data),
384 struct flow_offload *flow;
401 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
403 iter(flow_table, flow, data);
412 const struct flow_offload *flow)
414 return flow_table->type->gc && flow_table->type->gc(flow);
418 struct flow_offload *flow, void *data)
420 if (nf_flow_has_expired(flow) ||
421 nf_ct_is_dying(flow->ct) ||
422 nf_flow_custom_gc(flow_table, flow))
423 flow_offload_teardown(flow);
425 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
426 if (test_bit(NF_FLOW_HW, &flow->flags)) {
427 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
428 nf_flow_offload_del(flow_table, flow);
429 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
430 flow_offload_del(flow_table, flow);
432 flow_offload_del(flow_table, flow);
434 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
435 nf_flow_offload_stats(flow_table, flow);
489 void nf_flow_snat_port(const struct flow_offload *flow,
501 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
506 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
515 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
527 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
532 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
566 struct flow_offload *flow, void *data)
571 flow_offload_teardown(flow);
575 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
576 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
577 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
578 flow_offload_teardown(flow);
691 MODULE_DESCRIPTION("Netfilter flow table module");