1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 */
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <linux/rtnetlink.h>
11#include <linux/if_vlan.h>
12#include <net/netlink.h>
13#include <net/pkt_sched.h>
14#include <net/pkt_cls.h>
15#include <net/tc_wrapper.h>
16
17#include <linux/tc_act/tc_vlan.h>
18#include <net/tc_act/tc_vlan.h>
19
20static struct tc_action_ops act_vlan_ops;
21
22TC_INDIRECT_SCOPE int tcf_vlan_act(struct sk_buff *skb,
23				   const struct tc_action *a,
24				   struct tcf_result *res)
25{
26	struct tcf_vlan *v = to_vlan(a);
27	struct tcf_vlan_params *p;
28	int action;
29	int err;
30	u16 tci;
31
32	tcf_lastuse_update(&v->tcf_tm);
33	tcf_action_update_bstats(&v->common, skb);
34
35	/* Ensure 'data' points at mac_header prior calling vlan manipulating
36	 * functions.
37	 */
38	if (skb_at_tc_ingress(skb))
39		skb_push_rcsum(skb, skb->mac_len);
40
41	action = READ_ONCE(v->tcf_action);
42
43	p = rcu_dereference_bh(v->vlan_p);
44
45	switch (p->tcfv_action) {
46	case TCA_VLAN_ACT_POP:
47		err = skb_vlan_pop(skb);
48		if (err)
49			goto drop;
50		break;
51	case TCA_VLAN_ACT_PUSH:
52		err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
53				    (p->tcfv_push_prio << VLAN_PRIO_SHIFT));
54		if (err)
55			goto drop;
56		break;
57	case TCA_VLAN_ACT_MODIFY:
58		/* No-op if no vlan tag (either hw-accel or in-payload) */
59		if (!skb_vlan_tagged(skb))
60			goto out;
61		/* extract existing tag (and guarantee no hw-accel tag) */
62		if (skb_vlan_tag_present(skb)) {
63			tci = skb_vlan_tag_get(skb);
64			__vlan_hwaccel_clear_tag(skb);
65		} else {
66			/* in-payload vlan tag, pop it */
67			err = __skb_vlan_pop(skb, &tci);
68			if (err)
69				goto drop;
70		}
71		/* replace the vid */
72		tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
73		/* replace prio bits, if tcfv_push_prio specified */
74		if (p->tcfv_push_prio_exists) {
75			tci &= ~VLAN_PRIO_MASK;
76			tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
77		}
78		/* put updated tci as hwaccel tag */
79		__vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
80		break;
81	case TCA_VLAN_ACT_POP_ETH:
82		err = skb_eth_pop(skb);
83		if (err)
84			goto drop;
85		break;
86	case TCA_VLAN_ACT_PUSH_ETH:
87		err = skb_eth_push(skb, p->tcfv_push_dst, p->tcfv_push_src);
88		if (err)
89			goto drop;
90		break;
91	default:
92		BUG();
93	}
94
95out:
96	if (skb_at_tc_ingress(skb))
97		skb_pull_rcsum(skb, skb->mac_len);
98
99	return action;
100
101drop:
102	tcf_action_inc_drop_qstats(&v->common);
103	return TC_ACT_SHOT;
104}
105
106static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
107	[TCA_VLAN_UNSPEC]		= { .strict_start_type = TCA_VLAN_PUSH_ETH_DST },
108	[TCA_VLAN_PARMS]		= { .len = sizeof(struct tc_vlan) },
109	[TCA_VLAN_PUSH_VLAN_ID]		= { .type = NLA_U16 },
110	[TCA_VLAN_PUSH_VLAN_PROTOCOL]	= { .type = NLA_U16 },
111	[TCA_VLAN_PUSH_VLAN_PRIORITY]	= { .type = NLA_U8 },
112	[TCA_VLAN_PUSH_ETH_DST]		= NLA_POLICY_ETH_ADDR,
113	[TCA_VLAN_PUSH_ETH_SRC]		= NLA_POLICY_ETH_ADDR,
114};
115
116static int tcf_vlan_init(struct net *net, struct nlattr *nla,
117			 struct nlattr *est, struct tc_action **a,
118			 struct tcf_proto *tp, u32 flags,
119			 struct netlink_ext_ack *extack)
120{
121	struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id);
122	bool bind = flags & TCA_ACT_FLAGS_BIND;
123	struct nlattr *tb[TCA_VLAN_MAX + 1];
124	struct tcf_chain *goto_ch = NULL;
125	bool push_prio_exists = false;
126	struct tcf_vlan_params *p;
127	struct tc_vlan *parm;
128	struct tcf_vlan *v;
129	int action;
130	u16 push_vid = 0;
131	__be16 push_proto = 0;
132	u8 push_prio = 0;
133	bool exists = false;
134	int ret = 0, err;
135	u32 index;
136
137	if (!nla)
138		return -EINVAL;
139
140	err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy,
141					  NULL);
142	if (err < 0)
143		return err;
144
145	if (!tb[TCA_VLAN_PARMS])
146		return -EINVAL;
147	parm = nla_data(tb[TCA_VLAN_PARMS]);
148	index = parm->index;
149	err = tcf_idr_check_alloc(tn, &index, a, bind);
150	if (err < 0)
151		return err;
152	exists = err;
153	if (exists && bind)
154		return ACT_P_BOUND;
155
156	switch (parm->v_action) {
157	case TCA_VLAN_ACT_POP:
158		break;
159	case TCA_VLAN_ACT_PUSH:
160	case TCA_VLAN_ACT_MODIFY:
161		if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
162			if (exists)
163				tcf_idr_release(*a, bind);
164			else
165				tcf_idr_cleanup(tn, index);
166			return -EINVAL;
167		}
168		push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
169		if (push_vid >= VLAN_VID_MASK) {
170			if (exists)
171				tcf_idr_release(*a, bind);
172			else
173				tcf_idr_cleanup(tn, index);
174			return -ERANGE;
175		}
176
177		if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
178			push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
179			switch (push_proto) {
180			case htons(ETH_P_8021Q):
181			case htons(ETH_P_8021AD):
182				break;
183			default:
184				if (exists)
185					tcf_idr_release(*a, bind);
186				else
187					tcf_idr_cleanup(tn, index);
188				return -EPROTONOSUPPORT;
189			}
190		} else {
191			push_proto = htons(ETH_P_8021Q);
192		}
193
194		push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
195		if (push_prio_exists)
196			push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
197		break;
198	case TCA_VLAN_ACT_POP_ETH:
199		break;
200	case TCA_VLAN_ACT_PUSH_ETH:
201		if (!tb[TCA_VLAN_PUSH_ETH_DST] || !tb[TCA_VLAN_PUSH_ETH_SRC]) {
202			if (exists)
203				tcf_idr_release(*a, bind);
204			else
205				tcf_idr_cleanup(tn, index);
206			return -EINVAL;
207		}
208		break;
209	default:
210		if (exists)
211			tcf_idr_release(*a, bind);
212		else
213			tcf_idr_cleanup(tn, index);
214		return -EINVAL;
215	}
216	action = parm->v_action;
217
218	if (!exists) {
219		ret = tcf_idr_create_from_flags(tn, index, est, a,
220						&act_vlan_ops, bind, flags);
221		if (ret) {
222			tcf_idr_cleanup(tn, index);
223			return ret;
224		}
225
226		ret = ACT_P_CREATED;
227	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
228		tcf_idr_release(*a, bind);
229		return -EEXIST;
230	}
231
232	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
233	if (err < 0)
234		goto release_idr;
235
236	v = to_vlan(*a);
237
238	p = kzalloc(sizeof(*p), GFP_KERNEL);
239	if (!p) {
240		err = -ENOMEM;
241		goto put_chain;
242	}
243
244	p->tcfv_action = action;
245	p->tcfv_push_vid = push_vid;
246	p->tcfv_push_prio = push_prio;
247	p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
248	p->tcfv_push_proto = push_proto;
249
250	if (action == TCA_VLAN_ACT_PUSH_ETH) {
251		nla_memcpy(&p->tcfv_push_dst, tb[TCA_VLAN_PUSH_ETH_DST],
252			   ETH_ALEN);
253		nla_memcpy(&p->tcfv_push_src, tb[TCA_VLAN_PUSH_ETH_SRC],
254			   ETH_ALEN);
255	}
256
257	spin_lock_bh(&v->tcf_lock);
258	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
259	p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
260	spin_unlock_bh(&v->tcf_lock);
261
262	if (goto_ch)
263		tcf_chain_put_by_act(goto_ch);
264	if (p)
265		kfree_rcu(p, rcu);
266
267	return ret;
268put_chain:
269	if (goto_ch)
270		tcf_chain_put_by_act(goto_ch);
271release_idr:
272	tcf_idr_release(*a, bind);
273	return err;
274}
275
276static void tcf_vlan_cleanup(struct tc_action *a)
277{
278	struct tcf_vlan *v = to_vlan(a);
279	struct tcf_vlan_params *p;
280
281	p = rcu_dereference_protected(v->vlan_p, 1);
282	if (p)
283		kfree_rcu(p, rcu);
284}
285
286static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
287			 int bind, int ref)
288{
289	unsigned char *b = skb_tail_pointer(skb);
290	struct tcf_vlan *v = to_vlan(a);
291	struct tcf_vlan_params *p;
292	struct tc_vlan opt = {
293		.index    = v->tcf_index,
294		.refcnt   = refcount_read(&v->tcf_refcnt) - ref,
295		.bindcnt  = atomic_read(&v->tcf_bindcnt) - bind,
296	};
297	struct tcf_t t;
298
299	spin_lock_bh(&v->tcf_lock);
300	opt.action = v->tcf_action;
301	p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
302	opt.v_action = p->tcfv_action;
303	if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
304		goto nla_put_failure;
305
306	if ((p->tcfv_action == TCA_VLAN_ACT_PUSH ||
307	     p->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
308	    (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
309	     nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
310			  p->tcfv_push_proto) ||
311	     (p->tcfv_push_prio_exists &&
312	      nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, p->tcfv_push_prio))))
313		goto nla_put_failure;
314
315	if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) {
316		if (nla_put(skb, TCA_VLAN_PUSH_ETH_DST, ETH_ALEN,
317			    p->tcfv_push_dst))
318			goto nla_put_failure;
319		if (nla_put(skb, TCA_VLAN_PUSH_ETH_SRC, ETH_ALEN,
320			    p->tcfv_push_src))
321			goto nla_put_failure;
322	}
323
324	tcf_tm_dump(&t, &v->tcf_tm);
325	if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
326		goto nla_put_failure;
327	spin_unlock_bh(&v->tcf_lock);
328
329	return skb->len;
330
331nla_put_failure:
332	spin_unlock_bh(&v->tcf_lock);
333	nlmsg_trim(skb, b);
334	return -1;
335}
336
337static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets,
338				  u64 drops, u64 lastuse, bool hw)
339{
340	struct tcf_vlan *v = to_vlan(a);
341	struct tcf_t *tm = &v->tcf_tm;
342
343	tcf_action_update_stats(a, bytes, packets, drops, hw);
344	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
345}
346
347static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
348{
349	return nla_total_size(sizeof(struct tc_vlan))
350		+ nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
351		+ nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
352		+ nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
353}
354
355static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
356				      u32 *index_inc, bool bind,
357				      struct netlink_ext_ack *extack)
358{
359	if (bind) {
360		struct flow_action_entry *entry = entry_data;
361
362		switch (tcf_vlan_action(act)) {
363		case TCA_VLAN_ACT_PUSH:
364			entry->id = FLOW_ACTION_VLAN_PUSH;
365			entry->vlan.vid = tcf_vlan_push_vid(act);
366			entry->vlan.proto = tcf_vlan_push_proto(act);
367			entry->vlan.prio = tcf_vlan_push_prio(act);
368			break;
369		case TCA_VLAN_ACT_POP:
370			entry->id = FLOW_ACTION_VLAN_POP;
371			break;
372		case TCA_VLAN_ACT_MODIFY:
373			entry->id = FLOW_ACTION_VLAN_MANGLE;
374			entry->vlan.vid = tcf_vlan_push_vid(act);
375			entry->vlan.proto = tcf_vlan_push_proto(act);
376			entry->vlan.prio = tcf_vlan_push_prio(act);
377			break;
378		case TCA_VLAN_ACT_POP_ETH:
379			entry->id = FLOW_ACTION_VLAN_POP_ETH;
380			break;
381		case TCA_VLAN_ACT_PUSH_ETH:
382			entry->id = FLOW_ACTION_VLAN_PUSH_ETH;
383			tcf_vlan_push_eth(entry->vlan_push_eth.src, entry->vlan_push_eth.dst, act);
384			break;
385		default:
386			NL_SET_ERR_MSG_MOD(extack, "Unsupported vlan action mode offload");
387			return -EOPNOTSUPP;
388		}
389		*index_inc = 1;
390	} else {
391		struct flow_offload_action *fl_action = entry_data;
392
393		switch (tcf_vlan_action(act)) {
394		case TCA_VLAN_ACT_PUSH:
395			fl_action->id = FLOW_ACTION_VLAN_PUSH;
396			break;
397		case TCA_VLAN_ACT_POP:
398			fl_action->id = FLOW_ACTION_VLAN_POP;
399			break;
400		case TCA_VLAN_ACT_MODIFY:
401			fl_action->id = FLOW_ACTION_VLAN_MANGLE;
402			break;
403		case TCA_VLAN_ACT_POP_ETH:
404			fl_action->id = FLOW_ACTION_VLAN_POP_ETH;
405			break;
406		case TCA_VLAN_ACT_PUSH_ETH:
407			fl_action->id = FLOW_ACTION_VLAN_PUSH_ETH;
408			break;
409		default:
410			return -EOPNOTSUPP;
411		}
412	}
413
414	return 0;
415}
416
417static struct tc_action_ops act_vlan_ops = {
418	.kind		=	"vlan",
419	.id		=	TCA_ID_VLAN,
420	.owner		=	THIS_MODULE,
421	.act		=	tcf_vlan_act,
422	.dump		=	tcf_vlan_dump,
423	.init		=	tcf_vlan_init,
424	.cleanup	=	tcf_vlan_cleanup,
425	.stats_update	=	tcf_vlan_stats_update,
426	.get_fill_size	=	tcf_vlan_get_fill_size,
427	.offload_act_setup =	tcf_vlan_offload_act_setup,
428	.size		=	sizeof(struct tcf_vlan),
429};
430MODULE_ALIAS_NET_ACT("vlan");
431
432static __net_init int vlan_init_net(struct net *net)
433{
434	struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id);
435
436	return tc_action_net_init(net, tn, &act_vlan_ops);
437}
438
439static void __net_exit vlan_exit_net(struct list_head *net_list)
440{
441	tc_action_net_exit(net_list, act_vlan_ops.net_id);
442}
443
444static struct pernet_operations vlan_net_ops = {
445	.init = vlan_init_net,
446	.exit_batch = vlan_exit_net,
447	.id   = &act_vlan_ops.net_id,
448	.size = sizeof(struct tc_action_net),
449};
450
451static int __init vlan_init_module(void)
452{
453	return tcf_register_action(&act_vlan_ops, &vlan_net_ops);
454}
455
456static void __exit vlan_cleanup_module(void)
457{
458	tcf_unregister_action(&act_vlan_ops, &vlan_net_ops);
459}
460
461module_init(vlan_init_module);
462module_exit(vlan_cleanup_module);
463
464MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
465MODULE_DESCRIPTION("vlan manipulation actions");
466MODULE_LICENSE("GPL v2");
467