1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Copyright 2020 NXP */
3
4#include <linux/module.h>
5#include <linux/types.h>
6#include <linux/kernel.h>
7#include <linux/string.h>
8#include <linux/errno.h>
9#include <linux/skbuff.h>
10#include <linux/rtnetlink.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16#include <net/tc_act/tc_gate.h>
17#include <net/tc_wrapper.h>
18
19static struct tc_action_ops act_gate_ops;
20
21static ktime_t gate_get_time(struct tcf_gate *gact)
22{
23	ktime_t mono = ktime_get();
24
25	switch (gact->tk_offset) {
26	case TK_OFFS_MAX:
27		return mono;
28	default:
29		return ktime_mono_to_any(mono, gact->tk_offset);
30	}
31
32	return KTIME_MAX;
33}
34
35static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
36{
37	struct tcf_gate_params *param = &gact->param;
38	ktime_t now, base, cycle;
39	u64 n;
40
41	base = ns_to_ktime(param->tcfg_basetime);
42	now = gate_get_time(gact);
43
44	if (ktime_after(base, now)) {
45		*start = base;
46		return;
47	}
48
49	cycle = param->tcfg_cycletime;
50
51	n = div64_u64(ktime_sub_ns(now, base), cycle);
52	*start = ktime_add_ns(base, (n + 1) * cycle);
53}
54
55static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
56{
57	ktime_t expires;
58
59	expires = hrtimer_get_expires(&gact->hitimer);
60	if (expires == 0)
61		expires = KTIME_MAX;
62
63	start = min_t(ktime_t, start, expires);
64
65	hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
66}
67
68static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
69{
70	struct tcf_gate *gact = container_of(timer, struct tcf_gate,
71					     hitimer);
72	struct tcf_gate_params *p = &gact->param;
73	struct tcfg_gate_entry *next;
74	ktime_t close_time, now;
75
76	spin_lock(&gact->tcf_lock);
77
78	next = gact->next_entry;
79
80	/* cycle start, clear pending bit, clear total octets */
81	gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
82	gact->current_entry_octets = 0;
83	gact->current_max_octets = next->maxoctets;
84
85	gact->current_close_time = ktime_add_ns(gact->current_close_time,
86						next->interval);
87
88	close_time = gact->current_close_time;
89
90	if (list_is_last(&next->list, &p->entries))
91		next = list_first_entry(&p->entries,
92					struct tcfg_gate_entry, list);
93	else
94		next = list_next_entry(next, list);
95
96	now = gate_get_time(gact);
97
98	if (ktime_after(now, close_time)) {
99		ktime_t cycle, base;
100		u64 n;
101
102		cycle = p->tcfg_cycletime;
103		base = ns_to_ktime(p->tcfg_basetime);
104		n = div64_u64(ktime_sub_ns(now, base), cycle);
105		close_time = ktime_add_ns(base, (n + 1) * cycle);
106	}
107
108	gact->next_entry = next;
109
110	hrtimer_set_expires(&gact->hitimer, close_time);
111
112	spin_unlock(&gact->tcf_lock);
113
114	return HRTIMER_RESTART;
115}
116
117TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb,
118				   const struct tc_action *a,
119				   struct tcf_result *res)
120{
121	struct tcf_gate *gact = to_gate(a);
122	int action = READ_ONCE(gact->tcf_action);
123
124	tcf_lastuse_update(&gact->tcf_tm);
125	tcf_action_update_bstats(&gact->common, skb);
126
127	spin_lock(&gact->tcf_lock);
128	if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
129		spin_unlock(&gact->tcf_lock);
130		return action;
131	}
132
133	if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) {
134		spin_unlock(&gact->tcf_lock);
135		goto drop;
136	}
137
138	if (gact->current_max_octets >= 0) {
139		gact->current_entry_octets += qdisc_pkt_len(skb);
140		if (gact->current_entry_octets > gact->current_max_octets) {
141			spin_unlock(&gact->tcf_lock);
142			goto overlimit;
143		}
144	}
145	spin_unlock(&gact->tcf_lock);
146
147	return action;
148
149overlimit:
150	tcf_action_inc_overlimit_qstats(&gact->common);
151drop:
152	tcf_action_inc_drop_qstats(&gact->common);
153	return TC_ACT_SHOT;
154}
155
156static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
157	[TCA_GATE_ENTRY_INDEX]		= { .type = NLA_U32 },
158	[TCA_GATE_ENTRY_GATE]		= { .type = NLA_FLAG },
159	[TCA_GATE_ENTRY_INTERVAL]	= { .type = NLA_U32 },
160	[TCA_GATE_ENTRY_IPV]		= { .type = NLA_S32 },
161	[TCA_GATE_ENTRY_MAX_OCTETS]	= { .type = NLA_S32 },
162};
163
164static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
165	[TCA_GATE_PARMS]		=
166		NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
167	[TCA_GATE_PRIORITY]		= { .type = NLA_S32 },
168	[TCA_GATE_ENTRY_LIST]		= { .type = NLA_NESTED },
169	[TCA_GATE_BASE_TIME]		= { .type = NLA_U64 },
170	[TCA_GATE_CYCLE_TIME]		= { .type = NLA_U64 },
171	[TCA_GATE_CYCLE_TIME_EXT]	= { .type = NLA_U64 },
172	[TCA_GATE_FLAGS]		= { .type = NLA_U32 },
173	[TCA_GATE_CLOCKID]		= { .type = NLA_S32 },
174};
175
176static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
177			   struct netlink_ext_ack *extack)
178{
179	u32 interval = 0;
180
181	entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
182
183	if (tb[TCA_GATE_ENTRY_INTERVAL])
184		interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
185
186	if (interval == 0) {
187		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
188		return -EINVAL;
189	}
190
191	entry->interval = interval;
192
193	if (tb[TCA_GATE_ENTRY_IPV])
194		entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
195	else
196		entry->ipv = -1;
197
198	if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
199		entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
200	else
201		entry->maxoctets = -1;
202
203	return 0;
204}
205
206static int parse_gate_entry(struct nlattr *n, struct  tcfg_gate_entry *entry,
207			    int index, struct netlink_ext_ack *extack)
208{
209	struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
210	int err;
211
212	err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
213	if (err < 0) {
214		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
215		return -EINVAL;
216	}
217
218	entry->index = index;
219
220	return fill_gate_entry(tb, entry, extack);
221}
222
223static void release_entry_list(struct list_head *entries)
224{
225	struct tcfg_gate_entry *entry, *e;
226
227	list_for_each_entry_safe(entry, e, entries, list) {
228		list_del(&entry->list);
229		kfree(entry);
230	}
231}
232
233static int parse_gate_list(struct nlattr *list_attr,
234			   struct tcf_gate_params *sched,
235			   struct netlink_ext_ack *extack)
236{
237	struct tcfg_gate_entry *entry;
238	struct nlattr *n;
239	int err, rem;
240	int i = 0;
241
242	if (!list_attr)
243		return -EINVAL;
244
245	nla_for_each_nested(n, list_attr, rem) {
246		if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
247			NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
248			continue;
249		}
250
251		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
252		if (!entry) {
253			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
254			err = -ENOMEM;
255			goto release_list;
256		}
257
258		err = parse_gate_entry(n, entry, i, extack);
259		if (err < 0) {
260			kfree(entry);
261			goto release_list;
262		}
263
264		list_add_tail(&entry->list, &sched->entries);
265		i++;
266	}
267
268	sched->num_entries = i;
269
270	return i;
271
272release_list:
273	release_entry_list(&sched->entries);
274
275	return err;
276}
277
278static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
279			     enum tk_offsets tko, s32 clockid,
280			     bool do_init)
281{
282	if (!do_init) {
283		if (basetime == gact->param.tcfg_basetime &&
284		    tko == gact->tk_offset &&
285		    clockid == gact->param.tcfg_clockid)
286			return;
287
288		spin_unlock_bh(&gact->tcf_lock);
289		hrtimer_cancel(&gact->hitimer);
290		spin_lock_bh(&gact->tcf_lock);
291	}
292	gact->param.tcfg_basetime = basetime;
293	gact->param.tcfg_clockid = clockid;
294	gact->tk_offset = tko;
295	hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
296	gact->hitimer.function = gate_timer_func;
297}
298
299static int tcf_gate_init(struct net *net, struct nlattr *nla,
300			 struct nlattr *est, struct tc_action **a,
301			 struct tcf_proto *tp, u32 flags,
302			 struct netlink_ext_ack *extack)
303{
304	struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
305	enum tk_offsets tk_offset = TK_OFFS_TAI;
306	bool bind = flags & TCA_ACT_FLAGS_BIND;
307	struct nlattr *tb[TCA_GATE_MAX + 1];
308	struct tcf_chain *goto_ch = NULL;
309	u64 cycletime = 0, basetime = 0;
310	struct tcf_gate_params *p;
311	s32 clockid = CLOCK_TAI;
312	struct tcf_gate *gact;
313	struct tc_gate *parm;
314	int ret = 0, err;
315	u32 gflags = 0;
316	s32 prio = -1;
317	ktime_t start;
318	u32 index;
319
320	if (!nla)
321		return -EINVAL;
322
323	err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
324	if (err < 0)
325		return err;
326
327	if (!tb[TCA_GATE_PARMS])
328		return -EINVAL;
329
330	if (tb[TCA_GATE_CLOCKID]) {
331		clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
332		switch (clockid) {
333		case CLOCK_REALTIME:
334			tk_offset = TK_OFFS_REAL;
335			break;
336		case CLOCK_MONOTONIC:
337			tk_offset = TK_OFFS_MAX;
338			break;
339		case CLOCK_BOOTTIME:
340			tk_offset = TK_OFFS_BOOT;
341			break;
342		case CLOCK_TAI:
343			tk_offset = TK_OFFS_TAI;
344			break;
345		default:
346			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
347			return -EINVAL;
348		}
349	}
350
351	parm = nla_data(tb[TCA_GATE_PARMS]);
352	index = parm->index;
353
354	err = tcf_idr_check_alloc(tn, &index, a, bind);
355	if (err < 0)
356		return err;
357
358	if (err && bind)
359		return ACT_P_BOUND;
360
361	if (!err) {
362		ret = tcf_idr_create_from_flags(tn, index, est, a,
363						&act_gate_ops, bind, flags);
364		if (ret) {
365			tcf_idr_cleanup(tn, index);
366			return ret;
367		}
368
369		ret = ACT_P_CREATED;
370	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
371		tcf_idr_release(*a, bind);
372		return -EEXIST;
373	}
374
375	if (tb[TCA_GATE_PRIORITY])
376		prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
377
378	if (tb[TCA_GATE_BASE_TIME])
379		basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
380
381	if (tb[TCA_GATE_FLAGS])
382		gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
383
384	gact = to_gate(*a);
385	if (ret == ACT_P_CREATED)
386		INIT_LIST_HEAD(&gact->param.entries);
387
388	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
389	if (err < 0)
390		goto release_idr;
391
392	spin_lock_bh(&gact->tcf_lock);
393	p = &gact->param;
394
395	if (tb[TCA_GATE_CYCLE_TIME])
396		cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
397
398	if (tb[TCA_GATE_ENTRY_LIST]) {
399		err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
400		if (err < 0)
401			goto chain_put;
402	}
403
404	if (!cycletime) {
405		struct tcfg_gate_entry *entry;
406		ktime_t cycle = 0;
407
408		list_for_each_entry(entry, &p->entries, list)
409			cycle = ktime_add_ns(cycle, entry->interval);
410		cycletime = cycle;
411		if (!cycletime) {
412			err = -EINVAL;
413			goto chain_put;
414		}
415	}
416	p->tcfg_cycletime = cycletime;
417
418	if (tb[TCA_GATE_CYCLE_TIME_EXT])
419		p->tcfg_cycletime_ext =
420			nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
421
422	gate_setup_timer(gact, basetime, tk_offset, clockid,
423			 ret == ACT_P_CREATED);
424	p->tcfg_priority = prio;
425	p->tcfg_flags = gflags;
426	gate_get_start_time(gact, &start);
427
428	gact->current_close_time = start;
429	gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
430
431	gact->next_entry = list_first_entry(&p->entries,
432					    struct tcfg_gate_entry, list);
433
434	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
435
436	gate_start_timer(gact, start);
437
438	spin_unlock_bh(&gact->tcf_lock);
439
440	if (goto_ch)
441		tcf_chain_put_by_act(goto_ch);
442
443	return ret;
444
445chain_put:
446	spin_unlock_bh(&gact->tcf_lock);
447
448	if (goto_ch)
449		tcf_chain_put_by_act(goto_ch);
450release_idr:
451	/* action is not inserted in any list: it's safe to init hitimer
452	 * without taking tcf_lock.
453	 */
454	if (ret == ACT_P_CREATED)
455		gate_setup_timer(gact, gact->param.tcfg_basetime,
456				 gact->tk_offset, gact->param.tcfg_clockid,
457				 true);
458	tcf_idr_release(*a, bind);
459	return err;
460}
461
462static void tcf_gate_cleanup(struct tc_action *a)
463{
464	struct tcf_gate *gact = to_gate(a);
465	struct tcf_gate_params *p;
466
467	p = &gact->param;
468	hrtimer_cancel(&gact->hitimer);
469	release_entry_list(&p->entries);
470}
471
472static int dumping_entry(struct sk_buff *skb,
473			 struct tcfg_gate_entry *entry)
474{
475	struct nlattr *item;
476
477	item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
478	if (!item)
479		return -ENOSPC;
480
481	if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
482		goto nla_put_failure;
483
484	if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
485		goto nla_put_failure;
486
487	if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
488		goto nla_put_failure;
489
490	if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
491		goto nla_put_failure;
492
493	if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
494		goto nla_put_failure;
495
496	return nla_nest_end(skb, item);
497
498nla_put_failure:
499	nla_nest_cancel(skb, item);
500	return -1;
501}
502
503static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
504			 int bind, int ref)
505{
506	unsigned char *b = skb_tail_pointer(skb);
507	struct tcf_gate *gact = to_gate(a);
508	struct tc_gate opt = {
509		.index    = gact->tcf_index,
510		.refcnt   = refcount_read(&gact->tcf_refcnt) - ref,
511		.bindcnt  = atomic_read(&gact->tcf_bindcnt) - bind,
512	};
513	struct tcfg_gate_entry *entry;
514	struct tcf_gate_params *p;
515	struct nlattr *entry_list;
516	struct tcf_t t;
517
518	spin_lock_bh(&gact->tcf_lock);
519	opt.action = gact->tcf_action;
520
521	p = &gact->param;
522
523	if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
524		goto nla_put_failure;
525
526	if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
527			      p->tcfg_basetime, TCA_GATE_PAD))
528		goto nla_put_failure;
529
530	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
531			      p->tcfg_cycletime, TCA_GATE_PAD))
532		goto nla_put_failure;
533
534	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
535			      p->tcfg_cycletime_ext, TCA_GATE_PAD))
536		goto nla_put_failure;
537
538	if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
539		goto nla_put_failure;
540
541	if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
542		goto nla_put_failure;
543
544	if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
545		goto nla_put_failure;
546
547	entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
548	if (!entry_list)
549		goto nla_put_failure;
550
551	list_for_each_entry(entry, &p->entries, list) {
552		if (dumping_entry(skb, entry) < 0)
553			goto nla_put_failure;
554	}
555
556	nla_nest_end(skb, entry_list);
557
558	tcf_tm_dump(&t, &gact->tcf_tm);
559	if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
560		goto nla_put_failure;
561	spin_unlock_bh(&gact->tcf_lock);
562
563	return skb->len;
564
565nla_put_failure:
566	spin_unlock_bh(&gact->tcf_lock);
567	nlmsg_trim(skb, b);
568	return -1;
569}
570
571static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
572				  u64 drops, u64 lastuse, bool hw)
573{
574	struct tcf_gate *gact = to_gate(a);
575	struct tcf_t *tm = &gact->tcf_tm;
576
577	tcf_action_update_stats(a, bytes, packets, drops, hw);
578	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
579}
580
581static size_t tcf_gate_get_fill_size(const struct tc_action *act)
582{
583	return nla_total_size(sizeof(struct tc_gate));
584}
585
586static void tcf_gate_entry_destructor(void *priv)
587{
588	struct action_gate_entry *oe = priv;
589
590	kfree(oe);
591}
592
593static int tcf_gate_get_entries(struct flow_action_entry *entry,
594				const struct tc_action *act)
595{
596	entry->gate.entries = tcf_gate_get_list(act);
597
598	if (!entry->gate.entries)
599		return -EINVAL;
600
601	entry->destructor = tcf_gate_entry_destructor;
602	entry->destructor_priv = entry->gate.entries;
603
604	return 0;
605}
606
607static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
608				      u32 *index_inc, bool bind,
609				      struct netlink_ext_ack *extack)
610{
611	int err;
612
613	if (bind) {
614		struct flow_action_entry *entry = entry_data;
615
616		entry->id = FLOW_ACTION_GATE;
617		entry->gate.prio = tcf_gate_prio(act);
618		entry->gate.basetime = tcf_gate_basetime(act);
619		entry->gate.cycletime = tcf_gate_cycletime(act);
620		entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
621		entry->gate.num_entries = tcf_gate_num_entries(act);
622		err = tcf_gate_get_entries(entry, act);
623		if (err)
624			return err;
625		*index_inc = 1;
626	} else {
627		struct flow_offload_action *fl_action = entry_data;
628
629		fl_action->id = FLOW_ACTION_GATE;
630	}
631
632	return 0;
633}
634
635static struct tc_action_ops act_gate_ops = {
636	.kind		=	"gate",
637	.id		=	TCA_ID_GATE,
638	.owner		=	THIS_MODULE,
639	.act		=	tcf_gate_act,
640	.dump		=	tcf_gate_dump,
641	.init		=	tcf_gate_init,
642	.cleanup	=	tcf_gate_cleanup,
643	.stats_update	=	tcf_gate_stats_update,
644	.get_fill_size	=	tcf_gate_get_fill_size,
645	.offload_act_setup =	tcf_gate_offload_act_setup,
646	.size		=	sizeof(struct tcf_gate),
647};
648MODULE_ALIAS_NET_ACT("gate");
649
650static __net_init int gate_init_net(struct net *net)
651{
652	struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
653
654	return tc_action_net_init(net, tn, &act_gate_ops);
655}
656
657static void __net_exit gate_exit_net(struct list_head *net_list)
658{
659	tc_action_net_exit(net_list, act_gate_ops.net_id);
660}
661
662static struct pernet_operations gate_net_ops = {
663	.init = gate_init_net,
664	.exit_batch = gate_exit_net,
665	.id   = &act_gate_ops.net_id,
666	.size = sizeof(struct tc_action_net),
667};
668
669static int __init gate_init_module(void)
670{
671	return tcf_register_action(&act_gate_ops, &gate_net_ops);
672}
673
674static void __exit gate_cleanup_module(void)
675{
676	tcf_unregister_action(&act_gate_ops, &gate_net_ops);
677}
678
679module_init(gate_init_module);
680module_exit(gate_cleanup_module);
681MODULE_DESCRIPTION("TC gate action");
682MODULE_LICENSE("GPL v2");
683