1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  SR-IPv6 implementation
4 *
5 *  Authors:
6 *  David Lebrun <david.lebrun@uclouvain.be>
7 *  eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
8 */
9
10#include <linux/filter.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/net.h>
14#include <linux/module.h>
15#include <net/ip.h>
16#include <net/lwtunnel.h>
17#include <net/netevent.h>
18#include <net/netns/generic.h>
19#include <net/ip6_fib.h>
20#include <net/route.h>
21#include <net/seg6.h>
22#include <linux/seg6.h>
23#include <linux/seg6_local.h>
24#include <net/addrconf.h>
25#include <net/ip6_route.h>
26#include <net/dst_cache.h>
27#include <net/ip_tunnels.h>
28#ifdef CONFIG_IPV6_SEG6_HMAC
29#include <net/seg6_hmac.h>
30#endif
31#include <net/seg6_local.h>
32#include <linux/etherdevice.h>
33#include <linux/bpf.h>
34#include <linux/netfilter.h>
35
36#define SEG6_F_ATTR(i)		BIT(i)
37
38struct seg6_local_lwt;
39
40/* callbacks used for customizing the creation and destruction of a behavior */
41struct seg6_local_lwtunnel_ops {
42	int (*build_state)(struct seg6_local_lwt *slwt, const void *cfg,
43			   struct netlink_ext_ack *extack);
44	void (*destroy_state)(struct seg6_local_lwt *slwt);
45};
46
47struct seg6_action_desc {
48	int action;
49	unsigned long attrs;
50
51	/* The optattrs field is used for specifying all the optional
52	 * attributes supported by a specific behavior.
53	 * It means that if one of these attributes is not provided in the
54	 * netlink message during the behavior creation, no errors will be
55	 * returned to the userspace.
56	 *
57	 * Each attribute can be only of two types (mutually exclusive):
58	 * 1) required or 2) optional.
59	 * Every user MUST obey to this rule! If you set an attribute as
60	 * required the same attribute CANNOT be set as optional and vice
61	 * versa.
62	 */
63	unsigned long optattrs;
64
65	int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
66	int static_headroom;
67
68	struct seg6_local_lwtunnel_ops slwt_ops;
69};
70
71struct bpf_lwt_prog {
72	struct bpf_prog *prog;
73	char *name;
74};
75
76/* default length values (expressed in bits) for both Locator-Block and
77 * Locator-Node Function.
78 *
79 * Both SEG6_LOCAL_LCBLOCK_DBITS and SEG6_LOCAL_LCNODE_FN_DBITS *must* be:
80 *    i) greater than 0;
81 *   ii) evenly divisible by 8. In other terms, the lengths of the
82 *	 Locator-Block and Locator-Node Function must be byte-aligned (we can
83 *	 relax this constraint in the future if really needed).
84 *
85 * Moreover, a third condition must hold:
86 *  iii) SEG6_LOCAL_LCBLOCK_DBITS + SEG6_LOCAL_LCNODE_FN_DBITS <= 128.
87 *
88 * The correctness of SEG6_LOCAL_LCBLOCK_DBITS and SEG6_LOCAL_LCNODE_FN_DBITS
89 * values are checked during the kernel compilation. If the compilation stops,
90 * check the value of these parameters to see if they meet conditions (i), (ii)
91 * and (iii).
92 */
93#define SEG6_LOCAL_LCBLOCK_DBITS	32
94#define SEG6_LOCAL_LCNODE_FN_DBITS	16
95
96/* The following next_csid_chk_{cntr,lcblock,lcblock_fn}_bits macros can be
97 * used directly to check whether the lengths (in bits) of Locator-Block and
98 * Locator-Node Function are valid according to (i), (ii), (iii).
99 */
100#define next_csid_chk_cntr_bits(blen, flen)		\
101	((blen) + (flen) > 128)
102
103#define next_csid_chk_lcblock_bits(blen)		\
104({							\
105	typeof(blen) __tmp = blen;			\
106	(!__tmp || __tmp > 120 || (__tmp & 0x07));	\
107})
108
109#define next_csid_chk_lcnode_fn_bits(flen)		\
110	next_csid_chk_lcblock_bits(flen)
111
112/* flag indicating that flavors are set up for a given End* behavior */
113#define SEG6_F_LOCAL_FLAVORS		SEG6_F_ATTR(SEG6_LOCAL_FLAVORS)
114
115#define SEG6_F_LOCAL_FLV_OP(flvname)	BIT(SEG6_LOCAL_FLV_OP_##flvname)
116#define SEG6_F_LOCAL_FLV_NEXT_CSID	SEG6_F_LOCAL_FLV_OP(NEXT_CSID)
117#define SEG6_F_LOCAL_FLV_PSP		SEG6_F_LOCAL_FLV_OP(PSP)
118
119/* Supported RFC8986 Flavor operations are reported in this bitmask */
120#define SEG6_LOCAL_FLV8986_SUPP_OPS	SEG6_F_LOCAL_FLV_PSP
121
122#define SEG6_LOCAL_END_FLV_SUPP_OPS	(SEG6_F_LOCAL_FLV_NEXT_CSID | \
123					 SEG6_LOCAL_FLV8986_SUPP_OPS)
124#define SEG6_LOCAL_END_X_FLV_SUPP_OPS	SEG6_F_LOCAL_FLV_NEXT_CSID
125
126struct seg6_flavors_info {
127	/* Flavor operations */
128	__u32 flv_ops;
129
130	/* Locator-Block length, expressed in bits */
131	__u8 lcblock_bits;
132	/* Locator-Node Function length, expressed in bits*/
133	__u8 lcnode_func_bits;
134};
135
136enum seg6_end_dt_mode {
137	DT_INVALID_MODE	= -EINVAL,
138	DT_LEGACY_MODE	= 0,
139	DT_VRF_MODE	= 1,
140};
141
142struct seg6_end_dt_info {
143	enum seg6_end_dt_mode mode;
144
145	struct net *net;
146	/* VRF device associated to the routing table used by the SRv6
147	 * End.DT4/DT6 behavior for routing IPv4/IPv6 packets.
148	 */
149	int vrf_ifindex;
150	int vrf_table;
151
152	/* tunneled packet family (IPv4 or IPv6).
153	 * Protocol and header length are inferred from family.
154	 */
155	u16 family;
156};
157
158struct pcpu_seg6_local_counters {
159	u64_stats_t packets;
160	u64_stats_t bytes;
161	u64_stats_t errors;
162
163	struct u64_stats_sync syncp;
164};
165
166/* This struct groups all the SRv6 Behavior counters supported so far.
167 *
168 * put_nla_counters() makes use of this data structure to collect all counter
169 * values after the per-CPU counter evaluation has been performed.
170 * Finally, each counter value (in seg6_local_counters) is stored in the
171 * corresponding netlink attribute and sent to user space.
172 *
173 * NB: we don't want to expose this structure to user space!
174 */
175struct seg6_local_counters {
176	__u64 packets;
177	__u64 bytes;
178	__u64 errors;
179};
180
181#define seg6_local_alloc_pcpu_counters(__gfp)				\
182	__netdev_alloc_pcpu_stats(struct pcpu_seg6_local_counters,	\
183				  ((__gfp) | __GFP_ZERO))
184
185#define SEG6_F_LOCAL_COUNTERS	SEG6_F_ATTR(SEG6_LOCAL_COUNTERS)
186
187struct seg6_local_lwt {
188	int action;
189	struct ipv6_sr_hdr *srh;
190	int table;
191	struct in_addr nh4;
192	struct in6_addr nh6;
193	int iif;
194	int oif;
195	struct bpf_lwt_prog bpf;
196#ifdef CONFIG_NET_L3_MASTER_DEV
197	struct seg6_end_dt_info dt_info;
198#endif
199	struct seg6_flavors_info flv_info;
200
201	struct pcpu_seg6_local_counters __percpu *pcpu_counters;
202
203	int headroom;
204	struct seg6_action_desc *desc;
205	/* unlike the required attrs, we have to track the optional attributes
206	 * that have been effectively parsed.
207	 */
208	unsigned long parsed_optattrs;
209};
210
211static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
212{
213	return (struct seg6_local_lwt *)lwt->data;
214}
215
216static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
217{
218	struct ipv6_sr_hdr *srh;
219
220	srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
221	if (!srh)
222		return NULL;
223
224#ifdef CONFIG_IPV6_SEG6_HMAC
225	if (!seg6_hmac_validate_skb(skb))
226		return NULL;
227#endif
228
229	return srh;
230}
231
232static bool decap_and_validate(struct sk_buff *skb, int proto)
233{
234	struct ipv6_sr_hdr *srh;
235	unsigned int off = 0;
236
237	srh = seg6_get_srh(skb, 0);
238	if (srh && srh->segments_left > 0)
239		return false;
240
241#ifdef CONFIG_IPV6_SEG6_HMAC
242	if (srh && !seg6_hmac_validate_skb(skb))
243		return false;
244#endif
245
246	if (ipv6_find_hdr(skb, &off, proto, NULL, NULL) < 0)
247		return false;
248
249	if (!pskb_pull(skb, off))
250		return false;
251
252	skb_postpull_rcsum(skb, skb_network_header(skb), off);
253
254	skb_reset_network_header(skb);
255	skb_reset_transport_header(skb);
256	if (iptunnel_pull_offloads(skb))
257		return false;
258
259	return true;
260}
261
262static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
263{
264	struct in6_addr *addr;
265
266	srh->segments_left--;
267	addr = srh->segments + srh->segments_left;
268	*daddr = *addr;
269}
270
271static int
272seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
273			u32 tbl_id, bool local_delivery)
274{
275	struct net *net = dev_net(skb->dev);
276	struct ipv6hdr *hdr = ipv6_hdr(skb);
277	int flags = RT6_LOOKUP_F_HAS_SADDR;
278	struct dst_entry *dst = NULL;
279	struct rt6_info *rt;
280	struct flowi6 fl6;
281	int dev_flags = 0;
282
283	memset(&fl6, 0, sizeof(fl6));
284	fl6.flowi6_iif = skb->dev->ifindex;
285	fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
286	fl6.saddr = hdr->saddr;
287	fl6.flowlabel = ip6_flowinfo(hdr);
288	fl6.flowi6_mark = skb->mark;
289	fl6.flowi6_proto = hdr->nexthdr;
290
291	if (nhaddr)
292		fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
293
294	if (!tbl_id) {
295		dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
296	} else {
297		struct fib6_table *table;
298
299		table = fib6_get_table(net, tbl_id);
300		if (!table)
301			goto out;
302
303		rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
304		dst = &rt->dst;
305	}
306
307	/* we want to discard traffic destined for local packet processing,
308	 * if @local_delivery is set to false.
309	 */
310	if (!local_delivery)
311		dev_flags |= IFF_LOOPBACK;
312
313	if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
314		dst_release(dst);
315		dst = NULL;
316	}
317
318out:
319	if (!dst) {
320		rt = net->ipv6.ip6_blk_hole_entry;
321		dst = &rt->dst;
322		dst_hold(dst);
323	}
324
325	skb_dst_drop(skb);
326	skb_dst_set(skb, dst);
327	return dst->error;
328}
329
330int seg6_lookup_nexthop(struct sk_buff *skb,
331			struct in6_addr *nhaddr, u32 tbl_id)
332{
333	return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
334}
335
336static __u8 seg6_flv_lcblock_octects(const struct seg6_flavors_info *finfo)
337{
338	return finfo->lcblock_bits >> 3;
339}
340
341static __u8 seg6_flv_lcnode_func_octects(const struct seg6_flavors_info *finfo)
342{
343	return finfo->lcnode_func_bits >> 3;
344}
345
346static bool seg6_next_csid_is_arg_zero(const struct in6_addr *addr,
347				       const struct seg6_flavors_info *finfo)
348{
349	__u8 fnc_octects = seg6_flv_lcnode_func_octects(finfo);
350	__u8 blk_octects = seg6_flv_lcblock_octects(finfo);
351	__u8 arg_octects;
352	int i;
353
354	arg_octects = 16 - blk_octects - fnc_octects;
355	for (i = 0; i < arg_octects; ++i) {
356		if (addr->s6_addr[blk_octects + fnc_octects + i] != 0x00)
357			return false;
358	}
359
360	return true;
361}
362
363/* assume that DA.Argument length > 0 */
364static void seg6_next_csid_advance_arg(struct in6_addr *addr,
365				       const struct seg6_flavors_info *finfo)
366{
367	__u8 fnc_octects = seg6_flv_lcnode_func_octects(finfo);
368	__u8 blk_octects = seg6_flv_lcblock_octects(finfo);
369
370	/* advance DA.Argument */
371	memmove(&addr->s6_addr[blk_octects],
372		&addr->s6_addr[blk_octects + fnc_octects],
373		16 - blk_octects - fnc_octects);
374
375	memset(&addr->s6_addr[16 - fnc_octects], 0x00, fnc_octects);
376}
377
378static int input_action_end_finish(struct sk_buff *skb,
379				   struct seg6_local_lwt *slwt)
380{
381	seg6_lookup_nexthop(skb, NULL, 0);
382
383	return dst_input(skb);
384}
385
386static int input_action_end_core(struct sk_buff *skb,
387				 struct seg6_local_lwt *slwt)
388{
389	struct ipv6_sr_hdr *srh;
390
391	srh = get_and_validate_srh(skb);
392	if (!srh)
393		goto drop;
394
395	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
396
397	return input_action_end_finish(skb, slwt);
398
399drop:
400	kfree_skb(skb);
401	return -EINVAL;
402}
403
404static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
405{
406	const struct seg6_flavors_info *finfo = &slwt->flv_info;
407	struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
408
409	if (seg6_next_csid_is_arg_zero(daddr, finfo))
410		return input_action_end_core(skb, slwt);
411
412	/* update DA */
413	seg6_next_csid_advance_arg(daddr, finfo);
414
415	return input_action_end_finish(skb, slwt);
416}
417
418static int input_action_end_x_finish(struct sk_buff *skb,
419				     struct seg6_local_lwt *slwt)
420{
421	seg6_lookup_nexthop(skb, &slwt->nh6, 0);
422
423	return dst_input(skb);
424}
425
426static int input_action_end_x_core(struct sk_buff *skb,
427				   struct seg6_local_lwt *slwt)
428{
429	struct ipv6_sr_hdr *srh;
430
431	srh = get_and_validate_srh(skb);
432	if (!srh)
433		goto drop;
434
435	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
436
437	return input_action_end_x_finish(skb, slwt);
438
439drop:
440	kfree_skb(skb);
441	return -EINVAL;
442}
443
444static int end_x_next_csid_core(struct sk_buff *skb,
445				struct seg6_local_lwt *slwt)
446{
447	const struct seg6_flavors_info *finfo = &slwt->flv_info;
448	struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
449
450	if (seg6_next_csid_is_arg_zero(daddr, finfo))
451		return input_action_end_x_core(skb, slwt);
452
453	/* update DA */
454	seg6_next_csid_advance_arg(daddr, finfo);
455
456	return input_action_end_x_finish(skb, slwt);
457}
458
459static bool seg6_next_csid_enabled(__u32 fops)
460{
461	return fops & SEG6_F_LOCAL_FLV_NEXT_CSID;
462}
463
464/* Processing of SRv6 End, End.X, and End.T behaviors can be extended through
465 * the flavors framework. These behaviors must report the subset of (flavor)
466 * operations they currently implement. In this way, if a user specifies a
467 * flavor combination that is not supported by a given End* behavior, the
468 * kernel refuses to instantiate the tunnel reporting the error.
469 */
470static int seg6_flv_supp_ops_by_action(int action, __u32 *fops)
471{
472	switch (action) {
473	case SEG6_LOCAL_ACTION_END:
474		*fops = SEG6_LOCAL_END_FLV_SUPP_OPS;
475		break;
476	case SEG6_LOCAL_ACTION_END_X:
477		*fops = SEG6_LOCAL_END_X_FLV_SUPP_OPS;
478		break;
479	default:
480		return -EOPNOTSUPP;
481	}
482
483	return 0;
484}
485
486/* We describe the packet state in relation to the absence/presence of the SRH
487 * and the Segment Left (SL) field.
488 * For our purposes, it is not necessary to record the exact value of the SL
489 * when the SID List consists of two or more segments.
490 */
491enum seg6_local_pktinfo {
492	/* the order really matters! */
493	SEG6_LOCAL_PKTINFO_NOHDR	= 0,
494	SEG6_LOCAL_PKTINFO_SL_ZERO,
495	SEG6_LOCAL_PKTINFO_SL_ONE,
496	SEG6_LOCAL_PKTINFO_SL_MORE,
497	__SEG6_LOCAL_PKTINFO_MAX,
498};
499
500#define SEG6_LOCAL_PKTINFO_MAX (__SEG6_LOCAL_PKTINFO_MAX - 1)
501
502static enum seg6_local_pktinfo seg6_get_srh_pktinfo(struct ipv6_sr_hdr *srh)
503{
504	__u8 sgl;
505
506	if (!srh)
507		return SEG6_LOCAL_PKTINFO_NOHDR;
508
509	sgl = srh->segments_left;
510	if (sgl < 2)
511		return SEG6_LOCAL_PKTINFO_SL_ZERO + sgl;
512
513	return SEG6_LOCAL_PKTINFO_SL_MORE;
514}
515
516enum seg6_local_flv_action {
517	SEG6_LOCAL_FLV_ACT_UNSPEC	= 0,
518	SEG6_LOCAL_FLV_ACT_END,
519	SEG6_LOCAL_FLV_ACT_PSP,
520	SEG6_LOCAL_FLV_ACT_USP,
521	SEG6_LOCAL_FLV_ACT_USD,
522	__SEG6_LOCAL_FLV_ACT_MAX
523};
524
525#define SEG6_LOCAL_FLV_ACT_MAX (__SEG6_LOCAL_FLV_ACT_MAX - 1)
526
527/* The action table for RFC8986 flavors (see the flv8986_act_tbl below)
528 * contains the actions (i.e. processing operations) to be applied on packets
529 * when flavors are configured for an End* behavior.
530 * By combining the pkinfo data and from the flavors mask, the macro
531 * computes the index used to access the elements (actions) stored in the
532 * action table. The index is structured as follows:
533 *
534 *                     index
535 *       _______________/\________________
536 *      /                                 \
537 *      +----------------+----------------+
538 *      |        pf      |      afm       |
539 *      +----------------+----------------+
540 *        ph-1 ... p1 p0   fk-1 ... f1 f0
541 *     MSB                               LSB
542 *
543 * where:
544 *  - 'afm' (adjusted flavor mask) is the mask containing a combination of the
545 *     RFC8986 flavors currently supported. 'afm' corresponds to the @fm
546 *     argument of the macro whose value is righ-shifted by 1 bit. By doing so,
547 *     we discard the SEG6_LOCAL_FLV_OP_UNSPEC flag (bit 0 in @fm) which is
548 *     never used here;
549 *  - 'pf' encodes the packet info (pktinfo) regarding the presence/absence of
550 *    the SRH, SL = 0, etc. 'pf' is set with the value of @pf provided as
551 *    argument to the macro.
552 */
553#define flv8986_act_tbl_idx(pf, fm)					\
554	((((pf) << bits_per(SEG6_LOCAL_FLV8986_SUPP_OPS)) |		\
555	  ((fm) & SEG6_LOCAL_FLV8986_SUPP_OPS)) >> SEG6_LOCAL_FLV_OP_PSP)
556
557/* We compute the size of the action table by considering the RFC8986 flavors
558 * actually supported by the kernel. In this way, the size is automatically
559 * adjusted when new flavors are supported.
560 */
561#define FLV8986_ACT_TBL_SIZE						\
562	roundup_pow_of_two(flv8986_act_tbl_idx(SEG6_LOCAL_PKTINFO_MAX,	\
563					       SEG6_LOCAL_FLV8986_SUPP_OPS))
564
565/* tbl_cfg(act, pf, fm) macro is used to easily configure the action
566 * table; it accepts 3 arguments:
567 *     i) @act, the suffix from SEG6_LOCAL_FLV_ACT_{act} representing
568 *        the action that should be applied on the packet;
569 *    ii) @pf, the suffix from SEG6_LOCAL_PKTINFO_{pf} reporting the packet
570 *        info about the lack/presence of SRH, SRH with SL = 0, etc;
571 *   iii) @fm, the mask of flavors.
572 */
573#define tbl_cfg(act, pf, fm)						\
574	[flv8986_act_tbl_idx(SEG6_LOCAL_PKTINFO_##pf,			\
575			     (fm))] = SEG6_LOCAL_FLV_ACT_##act
576
577/* shorthand for improving readability */
578#define F_PSP	SEG6_F_LOCAL_FLV_PSP
579
580/* The table contains, for each combination of the pktinfo data and
581 * flavors, the action that should be taken on a packet (e.g.
582 * "standard" Endpoint processing, Penultimate Segment Pop, etc).
583 *
584 * By default, table entries not explicitly configured are initialized with the
585 * SEG6_LOCAL_FLV_ACT_UNSPEC action, which generally has the effect of
586 * discarding the processed packet.
587 */
588static const u8 flv8986_act_tbl[FLV8986_ACT_TBL_SIZE] = {
589	/* PSP variant for packet where SRH with SL = 1 */
590	tbl_cfg(PSP, SL_ONE, F_PSP),
591	/* End for packet where the SRH with SL > 1*/
592	tbl_cfg(END, SL_MORE, F_PSP),
593};
594
595#undef F_PSP
596#undef tbl_cfg
597
598/* For each flavor defined in RFC8986 (or a combination of them) an action is
599 * performed on the packet. The specific action depends on:
600 *  - info extracted from the packet (i.e. pktinfo data) regarding the
601 *    lack/presence of the SRH, and if the SRH is available, on the value of
602 *    Segment Left field;
603 *  - the mask of flavors configured for the specific SRv6 End* behavior.
604 *
605 * The function combines both the pkinfo and the flavors mask to evaluate the
606 * corresponding action to be taken on the packet.
607 */
608static enum seg6_local_flv_action
609seg6_local_flv8986_act_lookup(enum seg6_local_pktinfo pinfo, __u32 flvmask)
610{
611	unsigned long index;
612
613	/* check if the provided mask of flavors is supported */
614	if (unlikely(flvmask & ~SEG6_LOCAL_FLV8986_SUPP_OPS))
615		return SEG6_LOCAL_FLV_ACT_UNSPEC;
616
617	index = flv8986_act_tbl_idx(pinfo, flvmask);
618	if (unlikely(index >= FLV8986_ACT_TBL_SIZE))
619		return SEG6_LOCAL_FLV_ACT_UNSPEC;
620
621	return flv8986_act_tbl[index];
622}
623
624/* skb->data must be aligned with skb->network_header */
625static bool seg6_pop_srh(struct sk_buff *skb, int srhoff)
626{
627	struct ipv6_sr_hdr *srh;
628	struct ipv6hdr *iph;
629	__u8 srh_nexthdr;
630	int thoff = -1;
631	int srhlen;
632	int nhlen;
633
634	if (unlikely(srhoff < sizeof(*iph) ||
635		     !pskb_may_pull(skb, srhoff + sizeof(*srh))))
636		return false;
637
638	srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
639	srhlen = ipv6_optlen(srh);
640
641	/* we are about to mangle the pkt, let's check if we can write on it */
642	if (unlikely(skb_ensure_writable(skb, srhoff + srhlen)))
643		return false;
644
645	/* skb_ensure_writable() may change skb pointers; evaluate srh again */
646	srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
647	srh_nexthdr = srh->nexthdr;
648
649	if (unlikely(!skb_transport_header_was_set(skb)))
650		goto pull;
651
652	nhlen = skb_network_header_len(skb);
653	/* we have to deal with the transport header: it could be set before
654	 * the SRH, after the SRH, or within it (which is considered wrong,
655	 * however).
656	 */
657	if (likely(nhlen <= srhoff))
658		thoff = nhlen;
659	else if (nhlen >= srhoff + srhlen)
660		/* transport_header is set after the SRH */
661		thoff = nhlen - srhlen;
662	else
663		/* transport_header falls inside the SRH; hence, we can't
664		 * restore the transport_header pointer properly after
665		 * SRH removing operation.
666		 */
667		return false;
668pull:
669	/* we need to pop the SRH:
670	 *  1) first of all, we pull out everything from IPv6 header up to SRH
671	 *     (included) evaluating also the rcsum;
672	 *  2) we overwrite (and then remove) the SRH by properly moving the
673	 *     IPv6 along with any extension header that precedes the SRH;
674	 *  3) At the end, we push back the pulled headers (except for SRH,
675	 *     obviously).
676	 */
677	skb_pull_rcsum(skb, srhoff + srhlen);
678	memmove(skb_network_header(skb) + srhlen, skb_network_header(skb),
679		srhoff);
680	skb_push(skb, srhoff);
681
682	skb_reset_network_header(skb);
683	skb_mac_header_rebuild(skb);
684	if (likely(thoff >= 0))
685		skb_set_transport_header(skb, thoff);
686
687	iph = ipv6_hdr(skb);
688	if (iph->nexthdr == NEXTHDR_ROUTING) {
689		iph->nexthdr = srh_nexthdr;
690	} else {
691		/* we must look for the extension header (EXTH, for short) that
692		 * immediately precedes the SRH we have just removed.
693		 * Then, we update the value of the EXTH nexthdr with the one
694		 * contained in the SRH nexthdr.
695		 */
696		unsigned int off = sizeof(*iph);
697		struct ipv6_opt_hdr *hp, _hdr;
698		__u8 nexthdr = iph->nexthdr;
699
700		for (;;) {
701			if (unlikely(!ipv6_ext_hdr(nexthdr) ||
702				     nexthdr == NEXTHDR_NONE))
703				return false;
704
705			hp = skb_header_pointer(skb, off, sizeof(_hdr), &_hdr);
706			if (unlikely(!hp))
707				return false;
708
709			if (hp->nexthdr == NEXTHDR_ROUTING) {
710				hp->nexthdr = srh_nexthdr;
711				break;
712			}
713
714			switch (nexthdr) {
715			case NEXTHDR_FRAGMENT:
716				fallthrough;
717			case NEXTHDR_AUTH:
718				/* we expect SRH before FRAG and AUTH */
719				return false;
720			default:
721				off += ipv6_optlen(hp);
722				break;
723			}
724
725			nexthdr = hp->nexthdr;
726		}
727	}
728
729	iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
730
731	skb_postpush_rcsum(skb, iph, srhoff);
732
733	return true;
734}
735
736/* process the packet on the basis of the RFC8986 flavors set for the given
737 * SRv6 End behavior instance.
738 */
739static int end_flv8986_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
740{
741	const struct seg6_flavors_info *finfo = &slwt->flv_info;
742	enum seg6_local_flv_action action;
743	enum seg6_local_pktinfo pinfo;
744	struct ipv6_sr_hdr *srh;
745	__u32 flvmask;
746	int srhoff;
747
748	srh = seg6_get_srh(skb, 0);
749	srhoff = srh ? ((unsigned char *)srh - skb->data) : 0;
750	pinfo = seg6_get_srh_pktinfo(srh);
751#ifdef CONFIG_IPV6_SEG6_HMAC
752	if (srh && !seg6_hmac_validate_skb(skb))
753		goto drop;
754#endif
755	flvmask = finfo->flv_ops;
756	if (unlikely(flvmask & ~SEG6_LOCAL_FLV8986_SUPP_OPS)) {
757		pr_warn_once("seg6local: invalid RFC8986 flavors\n");
758		goto drop;
759	}
760
761	/* retrieve the action triggered by the combination of pktinfo data and
762	 * the flavors mask.
763	 */
764	action = seg6_local_flv8986_act_lookup(pinfo, flvmask);
765	switch (action) {
766	case SEG6_LOCAL_FLV_ACT_END:
767		/* process the packet as the "standard" End behavior */
768		advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
769		break;
770	case SEG6_LOCAL_FLV_ACT_PSP:
771		advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
772
773		if (unlikely(!seg6_pop_srh(skb, srhoff)))
774			goto drop;
775		break;
776	case SEG6_LOCAL_FLV_ACT_UNSPEC:
777		fallthrough;
778	default:
779		/* by default, we drop the packet since we could not find a
780		 * suitable action.
781		 */
782		goto drop;
783	}
784
785	return input_action_end_finish(skb, slwt);
786
787drop:
788	kfree_skb(skb);
789	return -EINVAL;
790}
791
792/* regular endpoint function */
793static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
794{
795	const struct seg6_flavors_info *finfo = &slwt->flv_info;
796	__u32 fops = finfo->flv_ops;
797
798	if (!fops)
799		return input_action_end_core(skb, slwt);
800
801	/* check for the presence of NEXT-C-SID since it applies first */
802	if (seg6_next_csid_enabled(fops))
803		return end_next_csid_core(skb, slwt);
804
805	/* the specific processing function to be performed on the packet
806	 * depends on the combination of flavors defined in RFC8986 and some
807	 * information extracted from the packet, e.g. presence/absence of SRH,
808	 * Segment Left = 0, etc.
809	 */
810	return end_flv8986_core(skb, slwt);
811}
812
813/* regular endpoint, and forward to specified nexthop */
814static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
815{
816	const struct seg6_flavors_info *finfo = &slwt->flv_info;
817	__u32 fops = finfo->flv_ops;
818
819	/* check for the presence of NEXT-C-SID since it applies first */
820	if (seg6_next_csid_enabled(fops))
821		return end_x_next_csid_core(skb, slwt);
822
823	return input_action_end_x_core(skb, slwt);
824}
825
826static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
827{
828	struct ipv6_sr_hdr *srh;
829
830	srh = get_and_validate_srh(skb);
831	if (!srh)
832		goto drop;
833
834	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
835
836	seg6_lookup_nexthop(skb, NULL, slwt->table);
837
838	return dst_input(skb);
839
840drop:
841	kfree_skb(skb);
842	return -EINVAL;
843}
844
845/* decapsulate and forward inner L2 frame on specified interface */
846static int input_action_end_dx2(struct sk_buff *skb,
847				struct seg6_local_lwt *slwt)
848{
849	struct net *net = dev_net(skb->dev);
850	struct net_device *odev;
851	struct ethhdr *eth;
852
853	if (!decap_and_validate(skb, IPPROTO_ETHERNET))
854		goto drop;
855
856	if (!pskb_may_pull(skb, ETH_HLEN))
857		goto drop;
858
859	skb_reset_mac_header(skb);
860	eth = (struct ethhdr *)skb->data;
861
862	/* To determine the frame's protocol, we assume it is 802.3. This avoids
863	 * a call to eth_type_trans(), which is not really relevant for our
864	 * use case.
865	 */
866	if (!eth_proto_is_802_3(eth->h_proto))
867		goto drop;
868
869	odev = dev_get_by_index_rcu(net, slwt->oif);
870	if (!odev)
871		goto drop;
872
873	/* As we accept Ethernet frames, make sure the egress device is of
874	 * the correct type.
875	 */
876	if (odev->type != ARPHRD_ETHER)
877		goto drop;
878
879	if (!(odev->flags & IFF_UP) || !netif_carrier_ok(odev))
880		goto drop;
881
882	skb_orphan(skb);
883
884	if (skb_warn_if_lro(skb))
885		goto drop;
886
887	skb_forward_csum(skb);
888
889	if (skb->len - ETH_HLEN > odev->mtu)
890		goto drop;
891
892	skb->dev = odev;
893	skb->protocol = eth->h_proto;
894
895	return dev_queue_xmit(skb);
896
897drop:
898	kfree_skb(skb);
899	return -EINVAL;
900}
901
902static int input_action_end_dx6_finish(struct net *net, struct sock *sk,
903				       struct sk_buff *skb)
904{
905	struct dst_entry *orig_dst = skb_dst(skb);
906	struct in6_addr *nhaddr = NULL;
907	struct seg6_local_lwt *slwt;
908
909	slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
910
911	/* The inner packet is not associated to any local interface,
912	 * so we do not call netif_rx().
913	 *
914	 * If slwt->nh6 is set to ::, then lookup the nexthop for the
915	 * inner packet's DA. Otherwise, use the specified nexthop.
916	 */
917	if (!ipv6_addr_any(&slwt->nh6))
918		nhaddr = &slwt->nh6;
919
920	seg6_lookup_nexthop(skb, nhaddr, 0);
921
922	return dst_input(skb);
923}
924
925/* decapsulate and forward to specified nexthop */
926static int input_action_end_dx6(struct sk_buff *skb,
927				struct seg6_local_lwt *slwt)
928{
929	/* this function accepts IPv6 encapsulated packets, with either
930	 * an SRH with SL=0, or no SRH.
931	 */
932
933	if (!decap_and_validate(skb, IPPROTO_IPV6))
934		goto drop;
935
936	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
937		goto drop;
938
939	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
940	nf_reset_ct(skb);
941
942	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
943		return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
944			       dev_net(skb->dev), NULL, skb, NULL,
945			       skb_dst(skb)->dev, input_action_end_dx6_finish);
946
947	return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
948drop:
949	kfree_skb(skb);
950	return -EINVAL;
951}
952
953static int input_action_end_dx4_finish(struct net *net, struct sock *sk,
954				       struct sk_buff *skb)
955{
956	struct dst_entry *orig_dst = skb_dst(skb);
957	struct seg6_local_lwt *slwt;
958	struct iphdr *iph;
959	__be32 nhaddr;
960	int err;
961
962	slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
963
964	iph = ip_hdr(skb);
965
966	nhaddr = slwt->nh4.s_addr ?: iph->daddr;
967
968	skb_dst_drop(skb);
969
970	err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
971	if (err) {
972		kfree_skb(skb);
973		return -EINVAL;
974	}
975
976	return dst_input(skb);
977}
978
979static int input_action_end_dx4(struct sk_buff *skb,
980				struct seg6_local_lwt *slwt)
981{
982	if (!decap_and_validate(skb, IPPROTO_IPIP))
983		goto drop;
984
985	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
986		goto drop;
987
988	skb->protocol = htons(ETH_P_IP);
989	skb_set_transport_header(skb, sizeof(struct iphdr));
990	nf_reset_ct(skb);
991
992	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
993		return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
994			       dev_net(skb->dev), NULL, skb, NULL,
995			       skb_dst(skb)->dev, input_action_end_dx4_finish);
996
997	return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
998drop:
999	kfree_skb(skb);
1000	return -EINVAL;
1001}
1002
1003#ifdef CONFIG_NET_L3_MASTER_DEV
1004static struct net *fib6_config_get_net(const struct fib6_config *fib6_cfg)
1005{
1006	const struct nl_info *nli = &fib6_cfg->fc_nlinfo;
1007
1008	return nli->nl_net;
1009}
1010
1011static int __seg6_end_dt_vrf_build(struct seg6_local_lwt *slwt, const void *cfg,
1012				   u16 family, struct netlink_ext_ack *extack)
1013{
1014	struct seg6_end_dt_info *info = &slwt->dt_info;
1015	int vrf_ifindex;
1016	struct net *net;
1017
1018	net = fib6_config_get_net(cfg);
1019
1020	/* note that vrf_table was already set by parse_nla_vrftable() */
1021	vrf_ifindex = l3mdev_ifindex_lookup_by_table_id(L3MDEV_TYPE_VRF, net,
1022							info->vrf_table);
1023	if (vrf_ifindex < 0) {
1024		if (vrf_ifindex == -EPERM) {
1025			NL_SET_ERR_MSG(extack,
1026				       "Strict mode for VRF is disabled");
1027		} else if (vrf_ifindex == -ENODEV) {
1028			NL_SET_ERR_MSG(extack,
1029				       "Table has no associated VRF device");
1030		} else {
1031			pr_debug("seg6local: SRv6 End.DT* creation error=%d\n",
1032				 vrf_ifindex);
1033		}
1034
1035		return vrf_ifindex;
1036	}
1037
1038	info->net = net;
1039	info->vrf_ifindex = vrf_ifindex;
1040
1041	info->family = family;
1042	info->mode = DT_VRF_MODE;
1043
1044	return 0;
1045}
1046
1047/* The SRv6 End.DT4/DT6 behavior extracts the inner (IPv4/IPv6) packet and
1048 * routes the IPv4/IPv6 packet by looking at the configured routing table.
1049 *
1050 * In the SRv6 End.DT4/DT6 use case, we can receive traffic (IPv6+Segment
1051 * Routing Header packets) from several interfaces and the outer IPv6
1052 * destination address (DA) is used for retrieving the specific instance of the
1053 * End.DT4/DT6 behavior that should process the packets.
1054 *
1055 * However, the inner IPv4/IPv6 packet is not really bound to any receiving
1056 * interface and thus the End.DT4/DT6 sets the VRF (associated with the
1057 * corresponding routing table) as the *receiving* interface.
1058 * In other words, the End.DT4/DT6 processes a packet as if it has been received
1059 * directly by the VRF (and not by one of its slave devices, if any).
1060 * In this way, the VRF interface is used for routing the IPv4/IPv6 packet in
1061 * according to the routing table configured by the End.DT4/DT6 instance.
1062 *
1063 * This design allows you to get some interesting features like:
1064 *  1) the statistics on rx packets;
1065 *  2) the possibility to install a packet sniffer on the receiving interface
1066 *     (the VRF one) for looking at the incoming packets;
1067 *  3) the possibility to leverage the netfilter prerouting hook for the inner
1068 *     IPv4 packet.
1069 *
1070 * This function returns:
1071 *  - the sk_buff* when the VRF rcv handler has processed the packet correctly;
1072 *  - NULL when the skb is consumed by the VRF rcv handler;
1073 *  - a pointer which encodes a negative error number in case of error.
1074 *    Note that in this case, the function takes care of freeing the skb.
1075 */
1076static struct sk_buff *end_dt_vrf_rcv(struct sk_buff *skb, u16 family,
1077				      struct net_device *dev)
1078{
1079	/* based on l3mdev_ip_rcv; we are only interested in the master */
1080	if (unlikely(!netif_is_l3_master(dev) && !netif_has_l3_rx_handler(dev)))
1081		goto drop;
1082
1083	if (unlikely(!dev->l3mdev_ops->l3mdev_l3_rcv))
1084		goto drop;
1085
1086	/* the decap packet IPv4/IPv6 does not come with any mac header info.
1087	 * We must unset the mac header to allow the VRF device to rebuild it,
1088	 * just in case there is a sniffer attached on the device.
1089	 */
1090	skb_unset_mac_header(skb);
1091
1092	skb = dev->l3mdev_ops->l3mdev_l3_rcv(dev, skb, family);
1093	if (!skb)
1094		/* the skb buffer was consumed by the handler */
1095		return NULL;
1096
1097	/* when a packet is received by a VRF or by one of its slaves, the
1098	 * master device reference is set into the skb.
1099	 */
1100	if (unlikely(skb->dev != dev || skb->skb_iif != dev->ifindex))
1101		goto drop;
1102
1103	return skb;
1104
1105drop:
1106	kfree_skb(skb);
1107	return ERR_PTR(-EINVAL);
1108}
1109
1110static struct net_device *end_dt_get_vrf_rcu(struct sk_buff *skb,
1111					     struct seg6_end_dt_info *info)
1112{
1113	int vrf_ifindex = info->vrf_ifindex;
1114	struct net *net = info->net;
1115
1116	if (unlikely(vrf_ifindex < 0))
1117		goto error;
1118
1119	if (unlikely(!net_eq(dev_net(skb->dev), net)))
1120		goto error;
1121
1122	return dev_get_by_index_rcu(net, vrf_ifindex);
1123
1124error:
1125	return NULL;
1126}
1127
1128static struct sk_buff *end_dt_vrf_core(struct sk_buff *skb,
1129				       struct seg6_local_lwt *slwt, u16 family)
1130{
1131	struct seg6_end_dt_info *info = &slwt->dt_info;
1132	struct net_device *vrf;
1133	__be16 protocol;
1134	int hdrlen;
1135
1136	vrf = end_dt_get_vrf_rcu(skb, info);
1137	if (unlikely(!vrf))
1138		goto drop;
1139
1140	switch (family) {
1141	case AF_INET:
1142		protocol = htons(ETH_P_IP);
1143		hdrlen = sizeof(struct iphdr);
1144		break;
1145	case AF_INET6:
1146		protocol = htons(ETH_P_IPV6);
1147		hdrlen = sizeof(struct ipv6hdr);
1148		break;
1149	case AF_UNSPEC:
1150		fallthrough;
1151	default:
1152		goto drop;
1153	}
1154
1155	if (unlikely(info->family != AF_UNSPEC && info->family != family)) {
1156		pr_warn_once("seg6local: SRv6 End.DT* family mismatch");
1157		goto drop;
1158	}
1159
1160	skb->protocol = protocol;
1161
1162	skb_dst_drop(skb);
1163
1164	skb_set_transport_header(skb, hdrlen);
1165	nf_reset_ct(skb);
1166
1167	return end_dt_vrf_rcv(skb, family, vrf);
1168
1169drop:
1170	kfree_skb(skb);
1171	return ERR_PTR(-EINVAL);
1172}
1173
1174static int input_action_end_dt4(struct sk_buff *skb,
1175				struct seg6_local_lwt *slwt)
1176{
1177	struct iphdr *iph;
1178	int err;
1179
1180	if (!decap_and_validate(skb, IPPROTO_IPIP))
1181		goto drop;
1182
1183	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1184		goto drop;
1185
1186	skb = end_dt_vrf_core(skb, slwt, AF_INET);
1187	if (!skb)
1188		/* packet has been processed and consumed by the VRF */
1189		return 0;
1190
1191	if (IS_ERR(skb))
1192		return PTR_ERR(skb);
1193
1194	iph = ip_hdr(skb);
1195
1196	err = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev);
1197	if (unlikely(err))
1198		goto drop;
1199
1200	return dst_input(skb);
1201
1202drop:
1203	kfree_skb(skb);
1204	return -EINVAL;
1205}
1206
1207static int seg6_end_dt4_build(struct seg6_local_lwt *slwt, const void *cfg,
1208			      struct netlink_ext_ack *extack)
1209{
1210	return __seg6_end_dt_vrf_build(slwt, cfg, AF_INET, extack);
1211}
1212
1213static enum
1214seg6_end_dt_mode seg6_end_dt6_parse_mode(struct seg6_local_lwt *slwt)
1215{
1216	unsigned long parsed_optattrs = slwt->parsed_optattrs;
1217	bool legacy, vrfmode;
1218
1219	legacy	= !!(parsed_optattrs & SEG6_F_ATTR(SEG6_LOCAL_TABLE));
1220	vrfmode	= !!(parsed_optattrs & SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE));
1221
1222	if (!(legacy ^ vrfmode))
1223		/* both are absent or present: invalid DT6 mode */
1224		return DT_INVALID_MODE;
1225
1226	return legacy ? DT_LEGACY_MODE : DT_VRF_MODE;
1227}
1228
1229static enum seg6_end_dt_mode seg6_end_dt6_get_mode(struct seg6_local_lwt *slwt)
1230{
1231	struct seg6_end_dt_info *info = &slwt->dt_info;
1232
1233	return info->mode;
1234}
1235
1236static int seg6_end_dt6_build(struct seg6_local_lwt *slwt, const void *cfg,
1237			      struct netlink_ext_ack *extack)
1238{
1239	enum seg6_end_dt_mode mode = seg6_end_dt6_parse_mode(slwt);
1240	struct seg6_end_dt_info *info = &slwt->dt_info;
1241
1242	switch (mode) {
1243	case DT_LEGACY_MODE:
1244		info->mode = DT_LEGACY_MODE;
1245		return 0;
1246	case DT_VRF_MODE:
1247		return __seg6_end_dt_vrf_build(slwt, cfg, AF_INET6, extack);
1248	default:
1249		NL_SET_ERR_MSG(extack, "table or vrftable must be specified");
1250		return -EINVAL;
1251	}
1252}
1253#endif
1254
1255static int input_action_end_dt6(struct sk_buff *skb,
1256				struct seg6_local_lwt *slwt)
1257{
1258	if (!decap_and_validate(skb, IPPROTO_IPV6))
1259		goto drop;
1260
1261	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1262		goto drop;
1263
1264#ifdef CONFIG_NET_L3_MASTER_DEV
1265	if (seg6_end_dt6_get_mode(slwt) == DT_LEGACY_MODE)
1266		goto legacy_mode;
1267
1268	/* DT6_VRF_MODE */
1269	skb = end_dt_vrf_core(skb, slwt, AF_INET6);
1270	if (!skb)
1271		/* packet has been processed and consumed by the VRF */
1272		return 0;
1273
1274	if (IS_ERR(skb))
1275		return PTR_ERR(skb);
1276
1277	/* note: this time we do not need to specify the table because the VRF
1278	 * takes care of selecting the correct table.
1279	 */
1280	seg6_lookup_any_nexthop(skb, NULL, 0, true);
1281
1282	return dst_input(skb);
1283
1284legacy_mode:
1285#endif
1286	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1287
1288	seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
1289
1290	return dst_input(skb);
1291
1292drop:
1293	kfree_skb(skb);
1294	return -EINVAL;
1295}
1296
1297#ifdef CONFIG_NET_L3_MASTER_DEV
1298static int seg6_end_dt46_build(struct seg6_local_lwt *slwt, const void *cfg,
1299			       struct netlink_ext_ack *extack)
1300{
1301	return __seg6_end_dt_vrf_build(slwt, cfg, AF_UNSPEC, extack);
1302}
1303
1304static int input_action_end_dt46(struct sk_buff *skb,
1305				 struct seg6_local_lwt *slwt)
1306{
1307	unsigned int off = 0;
1308	int nexthdr;
1309
1310	nexthdr = ipv6_find_hdr(skb, &off, -1, NULL, NULL);
1311	if (unlikely(nexthdr < 0))
1312		goto drop;
1313
1314	switch (nexthdr) {
1315	case IPPROTO_IPIP:
1316		return input_action_end_dt4(skb, slwt);
1317	case IPPROTO_IPV6:
1318		return input_action_end_dt6(skb, slwt);
1319	}
1320
1321drop:
1322	kfree_skb(skb);
1323	return -EINVAL;
1324}
1325#endif
1326
1327/* push an SRH on top of the current one */
1328static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1329{
1330	struct ipv6_sr_hdr *srh;
1331	int err = -EINVAL;
1332
1333	srh = get_and_validate_srh(skb);
1334	if (!srh)
1335		goto drop;
1336
1337	err = seg6_do_srh_inline(skb, slwt->srh);
1338	if (err)
1339		goto drop;
1340
1341	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1342
1343	seg6_lookup_nexthop(skb, NULL, 0);
1344
1345	return dst_input(skb);
1346
1347drop:
1348	kfree_skb(skb);
1349	return err;
1350}
1351
1352/* encapsulate within an outer IPv6 header and a specified SRH */
1353static int input_action_end_b6_encap(struct sk_buff *skb,
1354				     struct seg6_local_lwt *slwt)
1355{
1356	struct ipv6_sr_hdr *srh;
1357	int err = -EINVAL;
1358
1359	srh = get_and_validate_srh(skb);
1360	if (!srh)
1361		goto drop;
1362
1363	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
1364
1365	skb_reset_inner_headers(skb);
1366	skb->encapsulation = 1;
1367
1368	err = seg6_do_srh_encap(skb, slwt->srh, IPPROTO_IPV6);
1369	if (err)
1370		goto drop;
1371
1372	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1373
1374	seg6_lookup_nexthop(skb, NULL, 0);
1375
1376	return dst_input(skb);
1377
1378drop:
1379	kfree_skb(skb);
1380	return err;
1381}
1382
1383DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
1384
1385bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
1386{
1387	struct seg6_bpf_srh_state *srh_state =
1388		this_cpu_ptr(&seg6_bpf_srh_states);
1389	struct ipv6_sr_hdr *srh = srh_state->srh;
1390
1391	if (unlikely(srh == NULL))
1392		return false;
1393
1394	if (unlikely(!srh_state->valid)) {
1395		if ((srh_state->hdrlen & 7) != 0)
1396			return false;
1397
1398		srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
1399		if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3, true))
1400			return false;
1401
1402		srh_state->valid = true;
1403	}
1404
1405	return true;
1406}
1407
1408static int input_action_end_bpf(struct sk_buff *skb,
1409				struct seg6_local_lwt *slwt)
1410{
1411	struct seg6_bpf_srh_state *srh_state =
1412		this_cpu_ptr(&seg6_bpf_srh_states);
1413	struct ipv6_sr_hdr *srh;
1414	int ret;
1415
1416	srh = get_and_validate_srh(skb);
1417	if (!srh) {
1418		kfree_skb(skb);
1419		return -EINVAL;
1420	}
1421	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
1422
1423	/* preempt_disable is needed to protect the per-CPU buffer srh_state,
1424	 * which is also accessed by the bpf_lwt_seg6_* helpers
1425	 */
1426	preempt_disable();
1427	srh_state->srh = srh;
1428	srh_state->hdrlen = srh->hdrlen << 3;
1429	srh_state->valid = true;
1430
1431	rcu_read_lock();
1432	bpf_compute_data_pointers(skb);
1433	ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
1434	rcu_read_unlock();
1435
1436	switch (ret) {
1437	case BPF_OK:
1438	case BPF_REDIRECT:
1439		break;
1440	case BPF_DROP:
1441		goto drop;
1442	default:
1443		pr_warn_once("bpf-seg6local: Illegal return value %u\n", ret);
1444		goto drop;
1445	}
1446
1447	if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
1448		goto drop;
1449
1450	preempt_enable();
1451	if (ret != BPF_REDIRECT)
1452		seg6_lookup_nexthop(skb, NULL, 0);
1453
1454	return dst_input(skb);
1455
1456drop:
1457	preempt_enable();
1458	kfree_skb(skb);
1459	return -EINVAL;
1460}
1461
1462static struct seg6_action_desc seg6_action_table[] = {
1463	{
1464		.action		= SEG6_LOCAL_ACTION_END,
1465		.attrs		= 0,
1466		.optattrs	= SEG6_F_LOCAL_COUNTERS |
1467				  SEG6_F_LOCAL_FLAVORS,
1468		.input		= input_action_end,
1469	},
1470	{
1471		.action		= SEG6_LOCAL_ACTION_END_X,
1472		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_NH6),
1473		.optattrs	= SEG6_F_LOCAL_COUNTERS |
1474				  SEG6_F_LOCAL_FLAVORS,
1475		.input		= input_action_end_x,
1476	},
1477	{
1478		.action		= SEG6_LOCAL_ACTION_END_T,
1479		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_TABLE),
1480		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1481		.input		= input_action_end_t,
1482	},
1483	{
1484		.action		= SEG6_LOCAL_ACTION_END_DX2,
1485		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_OIF),
1486		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1487		.input		= input_action_end_dx2,
1488	},
1489	{
1490		.action		= SEG6_LOCAL_ACTION_END_DX6,
1491		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_NH6),
1492		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1493		.input		= input_action_end_dx6,
1494	},
1495	{
1496		.action		= SEG6_LOCAL_ACTION_END_DX4,
1497		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_NH4),
1498		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1499		.input		= input_action_end_dx4,
1500	},
1501	{
1502		.action		= SEG6_LOCAL_ACTION_END_DT4,
1503		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE),
1504		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1505#ifdef CONFIG_NET_L3_MASTER_DEV
1506		.input		= input_action_end_dt4,
1507		.slwt_ops	= {
1508					.build_state = seg6_end_dt4_build,
1509				  },
1510#endif
1511	},
1512	{
1513		.action		= SEG6_LOCAL_ACTION_END_DT6,
1514#ifdef CONFIG_NET_L3_MASTER_DEV
1515		.attrs		= 0,
1516		.optattrs	= SEG6_F_LOCAL_COUNTERS		|
1517				  SEG6_F_ATTR(SEG6_LOCAL_TABLE) |
1518				  SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE),
1519		.slwt_ops	= {
1520					.build_state = seg6_end_dt6_build,
1521				  },
1522#else
1523		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_TABLE),
1524		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1525#endif
1526		.input		= input_action_end_dt6,
1527	},
1528	{
1529		.action		= SEG6_LOCAL_ACTION_END_DT46,
1530		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE),
1531		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1532#ifdef CONFIG_NET_L3_MASTER_DEV
1533		.input		= input_action_end_dt46,
1534		.slwt_ops	= {
1535					.build_state = seg6_end_dt46_build,
1536				  },
1537#endif
1538	},
1539	{
1540		.action		= SEG6_LOCAL_ACTION_END_B6,
1541		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_SRH),
1542		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1543		.input		= input_action_end_b6,
1544	},
1545	{
1546		.action		= SEG6_LOCAL_ACTION_END_B6_ENCAP,
1547		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_SRH),
1548		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1549		.input		= input_action_end_b6_encap,
1550		.static_headroom	= sizeof(struct ipv6hdr),
1551	},
1552	{
1553		.action		= SEG6_LOCAL_ACTION_END_BPF,
1554		.attrs		= SEG6_F_ATTR(SEG6_LOCAL_BPF),
1555		.optattrs	= SEG6_F_LOCAL_COUNTERS,
1556		.input		= input_action_end_bpf,
1557	},
1558
1559};
1560
1561static struct seg6_action_desc *__get_action_desc(int action)
1562{
1563	struct seg6_action_desc *desc;
1564	int i, count;
1565
1566	count = ARRAY_SIZE(seg6_action_table);
1567	for (i = 0; i < count; i++) {
1568		desc = &seg6_action_table[i];
1569		if (desc->action == action)
1570			return desc;
1571	}
1572
1573	return NULL;
1574}
1575
1576static bool seg6_lwtunnel_counters_enabled(struct seg6_local_lwt *slwt)
1577{
1578	return slwt->parsed_optattrs & SEG6_F_LOCAL_COUNTERS;
1579}
1580
1581static void seg6_local_update_counters(struct seg6_local_lwt *slwt,
1582				       unsigned int len, int err)
1583{
1584	struct pcpu_seg6_local_counters *pcounters;
1585
1586	pcounters = this_cpu_ptr(slwt->pcpu_counters);
1587	u64_stats_update_begin(&pcounters->syncp);
1588
1589	if (likely(!err)) {
1590		u64_stats_inc(&pcounters->packets);
1591		u64_stats_add(&pcounters->bytes, len);
1592	} else {
1593		u64_stats_inc(&pcounters->errors);
1594	}
1595
1596	u64_stats_update_end(&pcounters->syncp);
1597}
1598
1599static int seg6_local_input_core(struct net *net, struct sock *sk,
1600				 struct sk_buff *skb)
1601{
1602	struct dst_entry *orig_dst = skb_dst(skb);
1603	struct seg6_action_desc *desc;
1604	struct seg6_local_lwt *slwt;
1605	unsigned int len = skb->len;
1606	int rc;
1607
1608	slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
1609	desc = slwt->desc;
1610
1611	rc = desc->input(skb, slwt);
1612
1613	if (!seg6_lwtunnel_counters_enabled(slwt))
1614		return rc;
1615
1616	seg6_local_update_counters(slwt, len, rc);
1617
1618	return rc;
1619}
1620
1621static int seg6_local_input(struct sk_buff *skb)
1622{
1623	if (skb->protocol != htons(ETH_P_IPV6)) {
1624		kfree_skb(skb);
1625		return -EINVAL;
1626	}
1627
1628	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
1629		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
1630			       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
1631			       seg6_local_input_core);
1632
1633	return seg6_local_input_core(dev_net(skb->dev), NULL, skb);
1634}
1635
1636static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
1637	[SEG6_LOCAL_ACTION]	= { .type = NLA_U32 },
1638	[SEG6_LOCAL_SRH]	= { .type = NLA_BINARY },
1639	[SEG6_LOCAL_TABLE]	= { .type = NLA_U32 },
1640	[SEG6_LOCAL_VRFTABLE]	= { .type = NLA_U32 },
1641	[SEG6_LOCAL_NH4]	= { .type = NLA_BINARY,
1642				    .len = sizeof(struct in_addr) },
1643	[SEG6_LOCAL_NH6]	= { .type = NLA_BINARY,
1644				    .len = sizeof(struct in6_addr) },
1645	[SEG6_LOCAL_IIF]	= { .type = NLA_U32 },
1646	[SEG6_LOCAL_OIF]	= { .type = NLA_U32 },
1647	[SEG6_LOCAL_BPF]	= { .type = NLA_NESTED },
1648	[SEG6_LOCAL_COUNTERS]	= { .type = NLA_NESTED },
1649	[SEG6_LOCAL_FLAVORS]	= { .type = NLA_NESTED },
1650};
1651
1652static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1653			 struct netlink_ext_ack *extack)
1654{
1655	struct ipv6_sr_hdr *srh;
1656	int len;
1657
1658	srh = nla_data(attrs[SEG6_LOCAL_SRH]);
1659	len = nla_len(attrs[SEG6_LOCAL_SRH]);
1660
1661	/* SRH must contain at least one segment */
1662	if (len < sizeof(*srh) + sizeof(struct in6_addr))
1663		return -EINVAL;
1664
1665	if (!seg6_validate_srh(srh, len, false))
1666		return -EINVAL;
1667
1668	slwt->srh = kmemdup(srh, len, GFP_KERNEL);
1669	if (!slwt->srh)
1670		return -ENOMEM;
1671
1672	slwt->headroom += len;
1673
1674	return 0;
1675}
1676
1677static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1678{
1679	struct ipv6_sr_hdr *srh;
1680	struct nlattr *nla;
1681	int len;
1682
1683	srh = slwt->srh;
1684	len = (srh->hdrlen + 1) << 3;
1685
1686	nla = nla_reserve(skb, SEG6_LOCAL_SRH, len);
1687	if (!nla)
1688		return -EMSGSIZE;
1689
1690	memcpy(nla_data(nla), srh, len);
1691
1692	return 0;
1693}
1694
1695static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1696{
1697	int len = (a->srh->hdrlen + 1) << 3;
1698
1699	if (len != ((b->srh->hdrlen + 1) << 3))
1700		return 1;
1701
1702	return memcmp(a->srh, b->srh, len);
1703}
1704
1705static void destroy_attr_srh(struct seg6_local_lwt *slwt)
1706{
1707	kfree(slwt->srh);
1708}
1709
1710static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1711			   struct netlink_ext_ack *extack)
1712{
1713	slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
1714
1715	return 0;
1716}
1717
1718static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1719{
1720	if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
1721		return -EMSGSIZE;
1722
1723	return 0;
1724}
1725
1726static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1727{
1728	if (a->table != b->table)
1729		return 1;
1730
1731	return 0;
1732}
1733
1734static struct
1735seg6_end_dt_info *seg6_possible_end_dt_info(struct seg6_local_lwt *slwt)
1736{
1737#ifdef CONFIG_NET_L3_MASTER_DEV
1738	return &slwt->dt_info;
1739#else
1740	return ERR_PTR(-EOPNOTSUPP);
1741#endif
1742}
1743
1744static int parse_nla_vrftable(struct nlattr **attrs,
1745			      struct seg6_local_lwt *slwt,
1746			      struct netlink_ext_ack *extack)
1747{
1748	struct seg6_end_dt_info *info = seg6_possible_end_dt_info(slwt);
1749
1750	if (IS_ERR(info))
1751		return PTR_ERR(info);
1752
1753	info->vrf_table = nla_get_u32(attrs[SEG6_LOCAL_VRFTABLE]);
1754
1755	return 0;
1756}
1757
1758static int put_nla_vrftable(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1759{
1760	struct seg6_end_dt_info *info = seg6_possible_end_dt_info(slwt);
1761
1762	if (IS_ERR(info))
1763		return PTR_ERR(info);
1764
1765	if (nla_put_u32(skb, SEG6_LOCAL_VRFTABLE, info->vrf_table))
1766		return -EMSGSIZE;
1767
1768	return 0;
1769}
1770
1771static int cmp_nla_vrftable(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1772{
1773	struct seg6_end_dt_info *info_a = seg6_possible_end_dt_info(a);
1774	struct seg6_end_dt_info *info_b = seg6_possible_end_dt_info(b);
1775
1776	if (info_a->vrf_table != info_b->vrf_table)
1777		return 1;
1778
1779	return 0;
1780}
1781
1782static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1783			 struct netlink_ext_ack *extack)
1784{
1785	memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]),
1786	       sizeof(struct in_addr));
1787
1788	return 0;
1789}
1790
1791static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1792{
1793	struct nlattr *nla;
1794
1795	nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr));
1796	if (!nla)
1797		return -EMSGSIZE;
1798
1799	memcpy(nla_data(nla), &slwt->nh4, sizeof(struct in_addr));
1800
1801	return 0;
1802}
1803
1804static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1805{
1806	return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr));
1807}
1808
1809static int parse_nla_nh6(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1810			 struct netlink_ext_ack *extack)
1811{
1812	memcpy(&slwt->nh6, nla_data(attrs[SEG6_LOCAL_NH6]),
1813	       sizeof(struct in6_addr));
1814
1815	return 0;
1816}
1817
1818static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1819{
1820	struct nlattr *nla;
1821
1822	nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr));
1823	if (!nla)
1824		return -EMSGSIZE;
1825
1826	memcpy(nla_data(nla), &slwt->nh6, sizeof(struct in6_addr));
1827
1828	return 0;
1829}
1830
1831static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1832{
1833	return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr));
1834}
1835
1836static int parse_nla_iif(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1837			 struct netlink_ext_ack *extack)
1838{
1839	slwt->iif = nla_get_u32(attrs[SEG6_LOCAL_IIF]);
1840
1841	return 0;
1842}
1843
1844static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1845{
1846	if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif))
1847		return -EMSGSIZE;
1848
1849	return 0;
1850}
1851
1852static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1853{
1854	if (a->iif != b->iif)
1855		return 1;
1856
1857	return 0;
1858}
1859
1860static int parse_nla_oif(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1861			 struct netlink_ext_ack *extack)
1862{
1863	slwt->oif = nla_get_u32(attrs[SEG6_LOCAL_OIF]);
1864
1865	return 0;
1866}
1867
1868static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1869{
1870	if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif))
1871		return -EMSGSIZE;
1872
1873	return 0;
1874}
1875
1876static int cmp_nla_oif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1877{
1878	if (a->oif != b->oif)
1879		return 1;
1880
1881	return 0;
1882}
1883
1884#define MAX_PROG_NAME 256
1885static const struct nla_policy bpf_prog_policy[SEG6_LOCAL_BPF_PROG_MAX + 1] = {
1886	[SEG6_LOCAL_BPF_PROG]	   = { .type = NLA_U32, },
1887	[SEG6_LOCAL_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
1888				       .len = MAX_PROG_NAME },
1889};
1890
1891static int parse_nla_bpf(struct nlattr **attrs, struct seg6_local_lwt *slwt,
1892			 struct netlink_ext_ack *extack)
1893{
1894	struct nlattr *tb[SEG6_LOCAL_BPF_PROG_MAX + 1];
1895	struct bpf_prog *p;
1896	int ret;
1897	u32 fd;
1898
1899	ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_BPF_PROG_MAX,
1900					  attrs[SEG6_LOCAL_BPF],
1901					  bpf_prog_policy, NULL);
1902	if (ret < 0)
1903		return ret;
1904
1905	if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME])
1906		return -EINVAL;
1907
1908	slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL);
1909	if (!slwt->bpf.name)
1910		return -ENOMEM;
1911
1912	fd = nla_get_u32(tb[SEG6_LOCAL_BPF_PROG]);
1913	p = bpf_prog_get_type(fd, BPF_PROG_TYPE_LWT_SEG6LOCAL);
1914	if (IS_ERR(p)) {
1915		kfree(slwt->bpf.name);
1916		return PTR_ERR(p);
1917	}
1918
1919	slwt->bpf.prog = p;
1920	return 0;
1921}
1922
1923static int put_nla_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt)
1924{
1925	struct nlattr *nest;
1926
1927	if (!slwt->bpf.prog)
1928		return 0;
1929
1930	nest = nla_nest_start_noflag(skb, SEG6_LOCAL_BPF);
1931	if (!nest)
1932		return -EMSGSIZE;
1933
1934	if (nla_put_u32(skb, SEG6_LOCAL_BPF_PROG, slwt->bpf.prog->aux->id))
1935		return -EMSGSIZE;
1936
1937	if (slwt->bpf.name &&
1938	    nla_put_string(skb, SEG6_LOCAL_BPF_PROG_NAME, slwt->bpf.name))
1939		return -EMSGSIZE;
1940
1941	return nla_nest_end(skb, nest);
1942}
1943
1944static int cmp_nla_bpf(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
1945{
1946	if (!a->bpf.name && !b->bpf.name)
1947		return 0;
1948
1949	if (!a->bpf.name || !b->bpf.name)
1950		return 1;
1951
1952	return strcmp(a->bpf.name, b->bpf.name);
1953}
1954
1955static void destroy_attr_bpf(struct seg6_local_lwt *slwt)
1956{
1957	kfree(slwt->bpf.name);
1958	if (slwt->bpf.prog)
1959		bpf_prog_put(slwt->bpf.prog);
1960}
1961
1962static const struct
1963nla_policy seg6_local_counters_policy[SEG6_LOCAL_CNT_MAX + 1] = {
1964	[SEG6_LOCAL_CNT_PACKETS]	= { .type = NLA_U64 },
1965	[SEG6_LOCAL_CNT_BYTES]		= { .type = NLA_U64 },
1966	[SEG6_LOCAL_CNT_ERRORS]		= { .type = NLA_U64 },
1967};
1968
1969static int parse_nla_counters(struct nlattr **attrs,
1970			      struct seg6_local_lwt *slwt,
1971			      struct netlink_ext_ack *extack)
1972{
1973	struct pcpu_seg6_local_counters __percpu *pcounters;
1974	struct nlattr *tb[SEG6_LOCAL_CNT_MAX + 1];
1975	int ret;
1976
1977	ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_CNT_MAX,
1978					  attrs[SEG6_LOCAL_COUNTERS],
1979					  seg6_local_counters_policy, NULL);
1980	if (ret < 0)
1981		return ret;
1982
1983	/* basic support for SRv6 Behavior counters requires at least:
1984	 * packets, bytes and errors.
1985	 */
1986	if (!tb[SEG6_LOCAL_CNT_PACKETS] || !tb[SEG6_LOCAL_CNT_BYTES] ||
1987	    !tb[SEG6_LOCAL_CNT_ERRORS])
1988		return -EINVAL;
1989
1990	/* counters are always zero initialized */
1991	pcounters = seg6_local_alloc_pcpu_counters(GFP_KERNEL);
1992	if (!pcounters)
1993		return -ENOMEM;
1994
1995	slwt->pcpu_counters = pcounters;
1996
1997	return 0;
1998}
1999
2000static int seg6_local_fill_nla_counters(struct sk_buff *skb,
2001					struct seg6_local_counters *counters)
2002{
2003	if (nla_put_u64_64bit(skb, SEG6_LOCAL_CNT_PACKETS, counters->packets,
2004			      SEG6_LOCAL_CNT_PAD))
2005		return -EMSGSIZE;
2006
2007	if (nla_put_u64_64bit(skb, SEG6_LOCAL_CNT_BYTES, counters->bytes,
2008			      SEG6_LOCAL_CNT_PAD))
2009		return -EMSGSIZE;
2010
2011	if (nla_put_u64_64bit(skb, SEG6_LOCAL_CNT_ERRORS, counters->errors,
2012			      SEG6_LOCAL_CNT_PAD))
2013		return -EMSGSIZE;
2014
2015	return 0;
2016}
2017
2018static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt)
2019{
2020	struct seg6_local_counters counters = { 0, 0, 0 };
2021	struct nlattr *nest;
2022	int rc, i;
2023
2024	nest = nla_nest_start(skb, SEG6_LOCAL_COUNTERS);
2025	if (!nest)
2026		return -EMSGSIZE;
2027
2028	for_each_possible_cpu(i) {
2029		struct pcpu_seg6_local_counters *pcounters;
2030		u64 packets, bytes, errors;
2031		unsigned int start;
2032
2033		pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
2034		do {
2035			start = u64_stats_fetch_begin(&pcounters->syncp);
2036
2037			packets = u64_stats_read(&pcounters->packets);
2038			bytes = u64_stats_read(&pcounters->bytes);
2039			errors = u64_stats_read(&pcounters->errors);
2040
2041		} while (u64_stats_fetch_retry(&pcounters->syncp, start));
2042
2043		counters.packets += packets;
2044		counters.bytes += bytes;
2045		counters.errors += errors;
2046	}
2047
2048	rc = seg6_local_fill_nla_counters(skb, &counters);
2049	if (rc < 0) {
2050		nla_nest_cancel(skb, nest);
2051		return rc;
2052	}
2053
2054	return nla_nest_end(skb, nest);
2055}
2056
2057static int cmp_nla_counters(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
2058{
2059	/* a and b are equal if both have pcpu_counters set or not */
2060	return (!!((unsigned long)a->pcpu_counters)) ^
2061		(!!((unsigned long)b->pcpu_counters));
2062}
2063
2064static void destroy_attr_counters(struct seg6_local_lwt *slwt)
2065{
2066	free_percpu(slwt->pcpu_counters);
2067}
2068
2069static const
2070struct nla_policy seg6_local_flavors_policy[SEG6_LOCAL_FLV_MAX + 1] = {
2071	[SEG6_LOCAL_FLV_OPERATION]	= { .type = NLA_U32 },
2072	[SEG6_LOCAL_FLV_LCBLOCK_BITS]	= { .type = NLA_U8 },
2073	[SEG6_LOCAL_FLV_LCNODE_FN_BITS]	= { .type = NLA_U8 },
2074};
2075
2076/* check whether the lengths of the Locator-Block and Locator-Node Function
2077 * are compatible with the dimension of a C-SID container.
2078 */
2079static int seg6_chk_next_csid_cfg(__u8 block_len, __u8 func_len)
2080{
2081	/* Locator-Block and Locator-Node Function cannot exceed 128 bits
2082	 * (i.e. C-SID container lenghts).
2083	 */
2084	if (next_csid_chk_cntr_bits(block_len, func_len))
2085		return -EINVAL;
2086
2087	/* Locator-Block length must be greater than zero and evenly divisible
2088	 * by 8. There must be room for a Locator-Node Function, at least.
2089	 */
2090	if (next_csid_chk_lcblock_bits(block_len))
2091		return -EINVAL;
2092
2093	/* Locator-Node Function length must be greater than zero and evenly
2094	 * divisible by 8. There must be room for the Locator-Block.
2095	 */
2096	if (next_csid_chk_lcnode_fn_bits(func_len))
2097		return -EINVAL;
2098
2099	return 0;
2100}
2101
2102static int seg6_parse_nla_next_csid_cfg(struct nlattr **tb,
2103					struct seg6_flavors_info *finfo,
2104					struct netlink_ext_ack *extack)
2105{
2106	__u8 func_len = SEG6_LOCAL_LCNODE_FN_DBITS;
2107	__u8 block_len = SEG6_LOCAL_LCBLOCK_DBITS;
2108	int rc;
2109
2110	if (tb[SEG6_LOCAL_FLV_LCBLOCK_BITS])
2111		block_len = nla_get_u8(tb[SEG6_LOCAL_FLV_LCBLOCK_BITS]);
2112
2113	if (tb[SEG6_LOCAL_FLV_LCNODE_FN_BITS])
2114		func_len = nla_get_u8(tb[SEG6_LOCAL_FLV_LCNODE_FN_BITS]);
2115
2116	rc = seg6_chk_next_csid_cfg(block_len, func_len);
2117	if (rc < 0) {
2118		NL_SET_ERR_MSG(extack,
2119			       "Invalid Locator Block/Node Function lengths");
2120		return rc;
2121	}
2122
2123	finfo->lcblock_bits = block_len;
2124	finfo->lcnode_func_bits = func_len;
2125
2126	return 0;
2127}
2128
2129static int parse_nla_flavors(struct nlattr **attrs, struct seg6_local_lwt *slwt,
2130			     struct netlink_ext_ack *extack)
2131{
2132	struct seg6_flavors_info *finfo = &slwt->flv_info;
2133	struct nlattr *tb[SEG6_LOCAL_FLV_MAX + 1];
2134	int action = slwt->action;
2135	__u32 fops, supp_fops;
2136	int rc;
2137
2138	rc = nla_parse_nested_deprecated(tb, SEG6_LOCAL_FLV_MAX,
2139					 attrs[SEG6_LOCAL_FLAVORS],
2140					 seg6_local_flavors_policy, NULL);
2141	if (rc < 0)
2142		return rc;
2143
2144	/* this attribute MUST always be present since it represents the Flavor
2145	 * operation(s) to be carried out.
2146	 */
2147	if (!tb[SEG6_LOCAL_FLV_OPERATION])
2148		return -EINVAL;
2149
2150	fops = nla_get_u32(tb[SEG6_LOCAL_FLV_OPERATION]);
2151	rc = seg6_flv_supp_ops_by_action(action, &supp_fops);
2152	if (rc < 0 || (fops & ~supp_fops)) {
2153		NL_SET_ERR_MSG(extack, "Unsupported Flavor operation(s)");
2154		return -EOPNOTSUPP;
2155	}
2156
2157	finfo->flv_ops = fops;
2158
2159	if (seg6_next_csid_enabled(fops)) {
2160		/* Locator-Block and Locator-Node Function lengths can be
2161		 * provided by the user space. Otherwise, default values are
2162		 * applied.
2163		 */
2164		rc = seg6_parse_nla_next_csid_cfg(tb, finfo, extack);
2165		if (rc < 0)
2166			return rc;
2167	}
2168
2169	return 0;
2170}
2171
2172static int seg6_fill_nla_next_csid_cfg(struct sk_buff *skb,
2173				       struct seg6_flavors_info *finfo)
2174{
2175	if (nla_put_u8(skb, SEG6_LOCAL_FLV_LCBLOCK_BITS, finfo->lcblock_bits))
2176		return -EMSGSIZE;
2177
2178	if (nla_put_u8(skb, SEG6_LOCAL_FLV_LCNODE_FN_BITS,
2179		       finfo->lcnode_func_bits))
2180		return -EMSGSIZE;
2181
2182	return 0;
2183}
2184
2185static int put_nla_flavors(struct sk_buff *skb, struct seg6_local_lwt *slwt)
2186{
2187	struct seg6_flavors_info *finfo = &slwt->flv_info;
2188	__u32 fops = finfo->flv_ops;
2189	struct nlattr *nest;
2190	int rc;
2191
2192	nest = nla_nest_start(skb, SEG6_LOCAL_FLAVORS);
2193	if (!nest)
2194		return -EMSGSIZE;
2195
2196	if (nla_put_u32(skb, SEG6_LOCAL_FLV_OPERATION, fops)) {
2197		rc = -EMSGSIZE;
2198		goto err;
2199	}
2200
2201	if (seg6_next_csid_enabled(fops)) {
2202		rc = seg6_fill_nla_next_csid_cfg(skb, finfo);
2203		if (rc < 0)
2204			goto err;
2205	}
2206
2207	return nla_nest_end(skb, nest);
2208
2209err:
2210	nla_nest_cancel(skb, nest);
2211	return rc;
2212}
2213
2214static int seg6_cmp_nla_next_csid_cfg(struct seg6_flavors_info *finfo_a,
2215				      struct seg6_flavors_info *finfo_b)
2216{
2217	if (finfo_a->lcblock_bits != finfo_b->lcblock_bits)
2218		return 1;
2219
2220	if (finfo_a->lcnode_func_bits != finfo_b->lcnode_func_bits)
2221		return 1;
2222
2223	return 0;
2224}
2225
2226static int cmp_nla_flavors(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
2227{
2228	struct seg6_flavors_info *finfo_a = &a->flv_info;
2229	struct seg6_flavors_info *finfo_b = &b->flv_info;
2230
2231	if (finfo_a->flv_ops != finfo_b->flv_ops)
2232		return 1;
2233
2234	if (seg6_next_csid_enabled(finfo_a->flv_ops)) {
2235		if (seg6_cmp_nla_next_csid_cfg(finfo_a, finfo_b))
2236			return 1;
2237	}
2238
2239	return 0;
2240}
2241
2242static int encap_size_flavors(struct seg6_local_lwt *slwt)
2243{
2244	struct seg6_flavors_info *finfo = &slwt->flv_info;
2245	int nlsize;
2246
2247	nlsize = nla_total_size(0) +	/* nest SEG6_LOCAL_FLAVORS */
2248		 nla_total_size(4);	/* SEG6_LOCAL_FLV_OPERATION */
2249
2250	if (seg6_next_csid_enabled(finfo->flv_ops))
2251		nlsize += nla_total_size(1) + /* SEG6_LOCAL_FLV_LCBLOCK_BITS */
2252			  nla_total_size(1); /* SEG6_LOCAL_FLV_LCNODE_FN_BITS */
2253
2254	return nlsize;
2255}
2256
2257struct seg6_action_param {
2258	int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt,
2259		     struct netlink_ext_ack *extack);
2260	int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
2261	int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b);
2262
2263	/* optional destroy() callback useful for releasing resources which
2264	 * have been previously acquired in the corresponding parse()
2265	 * function.
2266	 */
2267	void (*destroy)(struct seg6_local_lwt *slwt);
2268};
2269
2270static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
2271	[SEG6_LOCAL_SRH]	= { .parse = parse_nla_srh,
2272				    .put = put_nla_srh,
2273				    .cmp = cmp_nla_srh,
2274				    .destroy = destroy_attr_srh },
2275
2276	[SEG6_LOCAL_TABLE]	= { .parse = parse_nla_table,
2277				    .put = put_nla_table,
2278				    .cmp = cmp_nla_table },
2279
2280	[SEG6_LOCAL_NH4]	= { .parse = parse_nla_nh4,
2281				    .put = put_nla_nh4,
2282				    .cmp = cmp_nla_nh4 },
2283
2284	[SEG6_LOCAL_NH6]	= { .parse = parse_nla_nh6,
2285				    .put = put_nla_nh6,
2286				    .cmp = cmp_nla_nh6 },
2287
2288	[SEG6_LOCAL_IIF]	= { .parse = parse_nla_iif,
2289				    .put = put_nla_iif,
2290				    .cmp = cmp_nla_iif },
2291
2292	[SEG6_LOCAL_OIF]	= { .parse = parse_nla_oif,
2293				    .put = put_nla_oif,
2294				    .cmp = cmp_nla_oif },
2295
2296	[SEG6_LOCAL_BPF]	= { .parse = parse_nla_bpf,
2297				    .put = put_nla_bpf,
2298				    .cmp = cmp_nla_bpf,
2299				    .destroy = destroy_attr_bpf },
2300
2301	[SEG6_LOCAL_VRFTABLE]	= { .parse = parse_nla_vrftable,
2302				    .put = put_nla_vrftable,
2303				    .cmp = cmp_nla_vrftable },
2304
2305	[SEG6_LOCAL_COUNTERS]	= { .parse = parse_nla_counters,
2306				    .put = put_nla_counters,
2307				    .cmp = cmp_nla_counters,
2308				    .destroy = destroy_attr_counters },
2309
2310	[SEG6_LOCAL_FLAVORS]	= { .parse = parse_nla_flavors,
2311				    .put = put_nla_flavors,
2312				    .cmp = cmp_nla_flavors },
2313};
2314
2315/* call the destroy() callback (if available) for each set attribute in
2316 * @parsed_attrs, starting from the first attribute up to the @max_parsed
2317 * (excluded) attribute.
2318 */
2319static void __destroy_attrs(unsigned long parsed_attrs, int max_parsed,
2320			    struct seg6_local_lwt *slwt)
2321{
2322	struct seg6_action_param *param;
2323	int i;
2324
2325	/* Every required seg6local attribute is identified by an ID which is
2326	 * encoded as a flag (i.e: 1 << ID) in the 'attrs' bitmask;
2327	 *
2328	 * We scan the 'parsed_attrs' bitmask, starting from the first attribute
2329	 * up to the @max_parsed (excluded) attribute.
2330	 * For each set attribute, we retrieve the corresponding destroy()
2331	 * callback. If the callback is not available, then we skip to the next
2332	 * attribute; otherwise, we call the destroy() callback.
2333	 */
2334	for (i = SEG6_LOCAL_SRH; i < max_parsed; ++i) {
2335		if (!(parsed_attrs & SEG6_F_ATTR(i)))
2336			continue;
2337
2338		param = &seg6_action_params[i];
2339
2340		if (param->destroy)
2341			param->destroy(slwt);
2342	}
2343}
2344
2345/* release all the resources that may have been acquired during parsing
2346 * operations.
2347 */
2348static void destroy_attrs(struct seg6_local_lwt *slwt)
2349{
2350	unsigned long attrs = slwt->desc->attrs | slwt->parsed_optattrs;
2351
2352	__destroy_attrs(attrs, SEG6_LOCAL_MAX + 1, slwt);
2353}
2354
2355static int parse_nla_optional_attrs(struct nlattr **attrs,
2356				    struct seg6_local_lwt *slwt,
2357				    struct netlink_ext_ack *extack)
2358{
2359	struct seg6_action_desc *desc = slwt->desc;
2360	unsigned long parsed_optattrs = 0;
2361	struct seg6_action_param *param;
2362	int err, i;
2363
2364	for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; ++i) {
2365		if (!(desc->optattrs & SEG6_F_ATTR(i)) || !attrs[i])
2366			continue;
2367
2368		/* once here, the i-th attribute is provided by the
2369		 * userspace AND it is identified optional as well.
2370		 */
2371		param = &seg6_action_params[i];
2372
2373		err = param->parse(attrs, slwt, extack);
2374		if (err < 0)
2375			goto parse_optattrs_err;
2376
2377		/* current attribute has been correctly parsed */
2378		parsed_optattrs |= SEG6_F_ATTR(i);
2379	}
2380
2381	/* store in the tunnel state all the optional attributed successfully
2382	 * parsed.
2383	 */
2384	slwt->parsed_optattrs = parsed_optattrs;
2385
2386	return 0;
2387
2388parse_optattrs_err:
2389	__destroy_attrs(parsed_optattrs, i, slwt);
2390
2391	return err;
2392}
2393
2394/* call the custom constructor of the behavior during its initialization phase
2395 * and after that all its attributes have been parsed successfully.
2396 */
2397static int
2398seg6_local_lwtunnel_build_state(struct seg6_local_lwt *slwt, const void *cfg,
2399				struct netlink_ext_ack *extack)
2400{
2401	struct seg6_action_desc *desc = slwt->desc;
2402	struct seg6_local_lwtunnel_ops *ops;
2403
2404	ops = &desc->slwt_ops;
2405	if (!ops->build_state)
2406		return 0;
2407
2408	return ops->build_state(slwt, cfg, extack);
2409}
2410
2411/* call the custom destructor of the behavior which is invoked before the
2412 * tunnel is going to be destroyed.
2413 */
2414static void seg6_local_lwtunnel_destroy_state(struct seg6_local_lwt *slwt)
2415{
2416	struct seg6_action_desc *desc = slwt->desc;
2417	struct seg6_local_lwtunnel_ops *ops;
2418
2419	ops = &desc->slwt_ops;
2420	if (!ops->destroy_state)
2421		return;
2422
2423	ops->destroy_state(slwt);
2424}
2425
2426static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt,
2427			    struct netlink_ext_ack *extack)
2428{
2429	struct seg6_action_param *param;
2430	struct seg6_action_desc *desc;
2431	unsigned long invalid_attrs;
2432	int i, err;
2433
2434	desc = __get_action_desc(slwt->action);
2435	if (!desc)
2436		return -EINVAL;
2437
2438	if (!desc->input)
2439		return -EOPNOTSUPP;
2440
2441	slwt->desc = desc;
2442	slwt->headroom += desc->static_headroom;
2443
2444	/* Forcing the desc->optattrs *set* and the desc->attrs *set* to be
2445	 * disjoined, this allow us to release acquired resources by optional
2446	 * attributes and by required attributes independently from each other
2447	 * without any interference.
2448	 * In other terms, we are sure that we do not release some the acquired
2449	 * resources twice.
2450	 *
2451	 * Note that if an attribute is configured both as required and as
2452	 * optional, it means that the user has messed something up in the
2453	 * seg6_action_table. Therefore, this check is required for SRv6
2454	 * behaviors to work properly.
2455	 */
2456	invalid_attrs = desc->attrs & desc->optattrs;
2457	if (invalid_attrs) {
2458		WARN_ONCE(1,
2459			  "An attribute cannot be both required AND optional");
2460		return -EINVAL;
2461	}
2462
2463	/* parse the required attributes */
2464	for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
2465		if (desc->attrs & SEG6_F_ATTR(i)) {
2466			if (!attrs[i])
2467				return -EINVAL;
2468
2469			param = &seg6_action_params[i];
2470
2471			err = param->parse(attrs, slwt, extack);
2472			if (err < 0)
2473				goto parse_attrs_err;
2474		}
2475	}
2476
2477	/* parse the optional attributes, if any */
2478	err = parse_nla_optional_attrs(attrs, slwt, extack);
2479	if (err < 0)
2480		goto parse_attrs_err;
2481
2482	return 0;
2483
2484parse_attrs_err:
2485	/* release any resource that may have been acquired during the i-1
2486	 * parse() operations.
2487	 */
2488	__destroy_attrs(desc->attrs, i, slwt);
2489
2490	return err;
2491}
2492
2493static int seg6_local_build_state(struct net *net, struct nlattr *nla,
2494				  unsigned int family, const void *cfg,
2495				  struct lwtunnel_state **ts,
2496				  struct netlink_ext_ack *extack)
2497{
2498	struct nlattr *tb[SEG6_LOCAL_MAX + 1];
2499	struct lwtunnel_state *newts;
2500	struct seg6_local_lwt *slwt;
2501	int err;
2502
2503	if (family != AF_INET6)
2504		return -EINVAL;
2505
2506	err = nla_parse_nested_deprecated(tb, SEG6_LOCAL_MAX, nla,
2507					  seg6_local_policy, extack);
2508
2509	if (err < 0)
2510		return err;
2511
2512	if (!tb[SEG6_LOCAL_ACTION])
2513		return -EINVAL;
2514
2515	newts = lwtunnel_state_alloc(sizeof(*slwt));
2516	if (!newts)
2517		return -ENOMEM;
2518
2519	slwt = seg6_local_lwtunnel(newts);
2520	slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]);
2521
2522	err = parse_nla_action(tb, slwt, extack);
2523	if (err < 0)
2524		goto out_free;
2525
2526	err = seg6_local_lwtunnel_build_state(slwt, cfg, extack);
2527	if (err < 0)
2528		goto out_destroy_attrs;
2529
2530	newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL;
2531	newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT;
2532	newts->headroom = slwt->headroom;
2533
2534	*ts = newts;
2535
2536	return 0;
2537
2538out_destroy_attrs:
2539	destroy_attrs(slwt);
2540out_free:
2541	kfree(newts);
2542	return err;
2543}
2544
2545static void seg6_local_destroy_state(struct lwtunnel_state *lwt)
2546{
2547	struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
2548
2549	seg6_local_lwtunnel_destroy_state(slwt);
2550
2551	destroy_attrs(slwt);
2552
2553	return;
2554}
2555
2556static int seg6_local_fill_encap(struct sk_buff *skb,
2557				 struct lwtunnel_state *lwt)
2558{
2559	struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
2560	struct seg6_action_param *param;
2561	unsigned long attrs;
2562	int i, err;
2563
2564	if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
2565		return -EMSGSIZE;
2566
2567	attrs = slwt->desc->attrs | slwt->parsed_optattrs;
2568
2569	for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
2570		if (attrs & SEG6_F_ATTR(i)) {
2571			param = &seg6_action_params[i];
2572			err = param->put(skb, slwt);
2573			if (err < 0)
2574				return err;
2575		}
2576	}
2577
2578	return 0;
2579}
2580
2581static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
2582{
2583	struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
2584	unsigned long attrs;
2585	int nlsize;
2586
2587	nlsize = nla_total_size(4); /* action */
2588
2589	attrs = slwt->desc->attrs | slwt->parsed_optattrs;
2590
2591	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_SRH))
2592		nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3);
2593
2594	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_TABLE))
2595		nlsize += nla_total_size(4);
2596
2597	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_NH4))
2598		nlsize += nla_total_size(4);
2599
2600	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_NH6))
2601		nlsize += nla_total_size(16);
2602
2603	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_IIF))
2604		nlsize += nla_total_size(4);
2605
2606	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_OIF))
2607		nlsize += nla_total_size(4);
2608
2609	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_BPF))
2610		nlsize += nla_total_size(sizeof(struct nlattr)) +
2611		       nla_total_size(MAX_PROG_NAME) +
2612		       nla_total_size(4);
2613
2614	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE))
2615		nlsize += nla_total_size(4);
2616
2617	if (attrs & SEG6_F_LOCAL_COUNTERS)
2618		nlsize += nla_total_size(0) + /* nest SEG6_LOCAL_COUNTERS */
2619			  /* SEG6_LOCAL_CNT_PACKETS */
2620			  nla_total_size_64bit(sizeof(__u64)) +
2621			  /* SEG6_LOCAL_CNT_BYTES */
2622			  nla_total_size_64bit(sizeof(__u64)) +
2623			  /* SEG6_LOCAL_CNT_ERRORS */
2624			  nla_total_size_64bit(sizeof(__u64));
2625
2626	if (attrs & SEG6_F_ATTR(SEG6_LOCAL_FLAVORS))
2627		nlsize += encap_size_flavors(slwt);
2628
2629	return nlsize;
2630}
2631
2632static int seg6_local_cmp_encap(struct lwtunnel_state *a,
2633				struct lwtunnel_state *b)
2634{
2635	struct seg6_local_lwt *slwt_a, *slwt_b;
2636	struct seg6_action_param *param;
2637	unsigned long attrs_a, attrs_b;
2638	int i;
2639
2640	slwt_a = seg6_local_lwtunnel(a);
2641	slwt_b = seg6_local_lwtunnel(b);
2642
2643	if (slwt_a->action != slwt_b->action)
2644		return 1;
2645
2646	attrs_a = slwt_a->desc->attrs | slwt_a->parsed_optattrs;
2647	attrs_b = slwt_b->desc->attrs | slwt_b->parsed_optattrs;
2648
2649	if (attrs_a != attrs_b)
2650		return 1;
2651
2652	for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
2653		if (attrs_a & SEG6_F_ATTR(i)) {
2654			param = &seg6_action_params[i];
2655			if (param->cmp(slwt_a, slwt_b))
2656				return 1;
2657		}
2658	}
2659
2660	return 0;
2661}
2662
2663static const struct lwtunnel_encap_ops seg6_local_ops = {
2664	.build_state	= seg6_local_build_state,
2665	.destroy_state	= seg6_local_destroy_state,
2666	.input		= seg6_local_input,
2667	.fill_encap	= seg6_local_fill_encap,
2668	.get_encap_size	= seg6_local_get_encap_size,
2669	.cmp_encap	= seg6_local_cmp_encap,
2670	.owner		= THIS_MODULE,
2671};
2672
2673int __init seg6_local_init(void)
2674{
2675	/* If the max total number of defined attributes is reached, then your
2676	 * kernel build stops here.
2677	 *
2678	 * This check is required to avoid arithmetic overflows when processing
2679	 * behavior attributes and the maximum number of defined attributes
2680	 * exceeds the allowed value.
2681	 */
2682	BUILD_BUG_ON(SEG6_LOCAL_MAX + 1 > BITS_PER_TYPE(unsigned long));
2683
2684	/* Check whether the number of defined flavors exceeds the maximum
2685	 * allowed value.
2686	 */
2687	BUILD_BUG_ON(SEG6_LOCAL_FLV_OP_MAX + 1 > BITS_PER_TYPE(__u32));
2688
2689	/* If the default NEXT-C-SID Locator-Block/Node Function lengths (in
2690	 * bits) have been changed with invalid values, kernel build stops
2691	 * here.
2692	 */
2693	BUILD_BUG_ON(next_csid_chk_cntr_bits(SEG6_LOCAL_LCBLOCK_DBITS,
2694					     SEG6_LOCAL_LCNODE_FN_DBITS));
2695	BUILD_BUG_ON(next_csid_chk_lcblock_bits(SEG6_LOCAL_LCBLOCK_DBITS));
2696	BUILD_BUG_ON(next_csid_chk_lcnode_fn_bits(SEG6_LOCAL_LCNODE_FN_DBITS));
2697
2698	/* To be memory efficient, we use 'u8' to represent the different
2699	 * actions related to RFC8986 flavors. If the kernel build stops here,
2700	 * it means that it is not possible to correctly encode these actions
2701	 * with the data type chosen for the action table.
2702	 */
2703	BUILD_BUG_ON(SEG6_LOCAL_FLV_ACT_MAX > (typeof(flv8986_act_tbl[0]))~0U);
2704
2705	return lwtunnel_encap_add_ops(&seg6_local_ops,
2706				      LWTUNNEL_ENCAP_SEG6_LOCAL);
2707}
2708
2709void seg6_local_exit(void)
2710{
2711	lwtunnel_encap_del_ops(&seg6_local_ops, LWTUNNEL_ENCAP_SEG6_LOCAL);
2712}
2713