Lines Matching refs:tuple

85 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
90 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
108 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
113 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
142 struct flow_offload_tuple *tuple)
149 tuple->encap[i].id = skb_vlan_tag_get(skb);
150 tuple->encap[i].proto = skb->vlan_proto;
156 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
157 tuple->encap[i].proto = skb->protocol;
161 tuple->encap[i].id = ntohs(phdr->sid);
162 tuple->encap[i].proto = skb->protocol;
174 struct flow_offload_tuple *tuple)
220 tuple->src_port = ports->source;
221 tuple->dst_port = ports->dest;
235 tuple->src_v4.s_addr = iph->saddr;
236 tuple->dst_v4.s_addr = iph->daddr;
237 tuple->l3proto = AF_INET;
238 tuple->l4proto = ipproto;
239 tuple->iifidx = ctx->in->ifindex;
240 nf_flow_tuple_encap(skb, tuple);
257 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
259 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
260 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
263 return dst_check(tuple->dst_cache, tuple->dst_cookie);
308 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
335 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
340 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
341 tuplehash->tuple.out.h_source, skb->len);
351 struct flow_offload_tuple tuple = {};
357 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
360 return flow_offload_lookup(flow_table, &tuple);
373 dir = tuplehash->tuple.dir;
376 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
385 if (!nf_flow_dst_check(&tuplehash->tuple)) {
405 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
436 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
437 rt = (struct rtable *)tuplehash->tuple.dst_cache;
444 dir = tuplehash->tuple.dir;
447 switch (tuplehash->tuple.xmit_type) {
449 rt = (struct rtable *)tuplehash->tuple.dst_cache;
452 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
523 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
528 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
546 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
551 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
577 struct flow_offload_tuple *tuple)
617 tuple->src_port = ports->source;
618 tuple->dst_port = ports->dest;
632 tuple->src_v6 = ip6h->saddr;
633 tuple->dst_v6 = ip6h->daddr;
634 tuple->l3proto = AF_INET6;
635 tuple->l4proto = nexthdr;
636 tuple->iifidx = ctx->in->ifindex;
637 nf_flow_tuple_encap(skb, tuple);
652 dir = tuplehash->tuple.dir;
655 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
664 if (!nf_flow_dst_check(&tuplehash->tuple)) {
683 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
693 struct flow_offload_tuple tuple = {};
699 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
702 return flow_offload_lookup(flow_table, &tuple);
731 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
732 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
739 dir = tuplehash->tuple.dir;
742 switch (tuplehash->tuple.xmit_type) {
744 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
747 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);