1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_api.c	Packet classifier API.
4 *
5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/kmod.h>
21#include <linux/slab.h>
22#include <linux/idr.h>
23#include <linux/jhash.h>
24#include <linux/rculist.h>
25#include <linux/rhashtable.h>
26#include <net/net_namespace.h>
27#include <net/sock.h>
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/pkt_cls.h>
31#include <net/tc_act/tc_pedit.h>
32#include <net/tc_act/tc_mirred.h>
33#include <net/tc_act/tc_vlan.h>
34#include <net/tc_act/tc_tunnel_key.h>
35#include <net/tc_act/tc_csum.h>
36#include <net/tc_act/tc_gact.h>
37#include <net/tc_act/tc_police.h>
38#include <net/tc_act/tc_sample.h>
39#include <net/tc_act/tc_skbedit.h>
40#include <net/tc_act/tc_ct.h>
41#include <net/tc_act/tc_mpls.h>
42#include <net/tc_act/tc_gate.h>
43#include <net/flow_offload.h>
44#include <net/tc_wrapper.h>
45
46/* The list of all installed classifier types */
47static LIST_HEAD(tcf_proto_base);
48
49/* Protects list of registered TC modules. It is pure SMP lock. */
50static DEFINE_RWLOCK(cls_mod_lock);
51
52static struct xarray tcf_exts_miss_cookies_xa;
53struct tcf_exts_miss_cookie_node {
54	const struct tcf_chain *chain;
55	const struct tcf_proto *tp;
56	const struct tcf_exts *exts;
57	u32 chain_index;
58	u32 tp_prio;
59	u32 handle;
60	u32 miss_cookie_base;
61	struct rcu_head rcu;
62};
63
64/* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
66 */
67union tcf_exts_miss_cookie {
68	struct {
69		u32 miss_cookie_base;
70		u32 act_index;
71	};
72	u64 miss_cookie;
73};
74
75#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76static int
77tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78				u32 handle)
79{
80	struct tcf_exts_miss_cookie_node *n;
81	static u32 next;
82	int err;
83
84	if (WARN_ON(!handle || !tp->ops->get_exts))
85		return -EINVAL;
86
87	n = kzalloc(sizeof(*n), GFP_KERNEL);
88	if (!n)
89		return -ENOMEM;
90
91	n->chain_index = tp->chain->index;
92	n->chain = tp->chain;
93	n->tp_prio = tp->prio;
94	n->tp = tp;
95	n->exts = exts;
96	n->handle = handle;
97
98	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99			      n, xa_limit_32b, &next, GFP_KERNEL);
100	if (err)
101		goto err_xa_alloc;
102
103	exts->miss_cookie_node = n;
104	return 0;
105
106err_xa_alloc:
107	kfree(n);
108	return err;
109}
110
111static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112{
113	struct tcf_exts_miss_cookie_node *n;
114
115	if (!exts->miss_cookie_node)
116		return;
117
118	n = exts->miss_cookie_node;
119	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120	kfree_rcu(n, rcu);
121}
122
123static struct tcf_exts_miss_cookie_node *
124tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125{
126	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128	*act_index = mc.act_index;
129	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130}
131#else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132static int
133tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134				u32 handle)
135{
136	return 0;
137}
138
139static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140{
141}
142#endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
144static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145{
146	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148	if (!miss_cookie_base)
149		return 0;
150
151	mc.miss_cookie_base = miss_cookie_base;
152	return mc.miss_cookie;
153}
154
155#ifdef CONFIG_NET_CLS_ACT
156DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157EXPORT_SYMBOL(tc_skb_ext_tc);
158
159void tc_skb_ext_tc_enable(void)
160{
161	static_branch_inc(&tc_skb_ext_tc);
162}
163EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
165void tc_skb_ext_tc_disable(void)
166{
167	static_branch_dec(&tc_skb_ext_tc);
168}
169EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170#endif
171
172static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173{
174	return jhash_3words(tp->chain->index, tp->prio,
175			    (__force __u32)tp->protocol, 0);
176}
177
178static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179					struct tcf_proto *tp)
180{
181	struct tcf_block *block = chain->block;
182
183	mutex_lock(&block->proto_destroy_lock);
184	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185		     destroy_obj_hashfn(tp));
186	mutex_unlock(&block->proto_destroy_lock);
187}
188
189static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190			  const struct tcf_proto *tp2)
191{
192	return tp1->chain->index == tp2->chain->index &&
193	       tp1->prio == tp2->prio &&
194	       tp1->protocol == tp2->protocol;
195}
196
197static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198					struct tcf_proto *tp)
199{
200	u32 hash = destroy_obj_hashfn(tp);
201	struct tcf_proto *iter;
202	bool found = false;
203
204	rcu_read_lock();
205	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206				   destroy_ht_node, hash) {
207		if (tcf_proto_cmp(tp, iter)) {
208			found = true;
209			break;
210		}
211	}
212	rcu_read_unlock();
213
214	return found;
215}
216
217static void
218tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219{
220	struct tcf_block *block = chain->block;
221
222	mutex_lock(&block->proto_destroy_lock);
223	if (hash_hashed(&tp->destroy_ht_node))
224		hash_del_rcu(&tp->destroy_ht_node);
225	mutex_unlock(&block->proto_destroy_lock);
226}
227
228/* Find classifier type by string name */
229
230static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231{
232	const struct tcf_proto_ops *t, *res = NULL;
233
234	if (kind) {
235		read_lock(&cls_mod_lock);
236		list_for_each_entry(t, &tcf_proto_base, head) {
237			if (strcmp(kind, t->kind) == 0) {
238				if (try_module_get(t->owner))
239					res = t;
240				break;
241			}
242		}
243		read_unlock(&cls_mod_lock);
244	}
245	return res;
246}
247
248static const struct tcf_proto_ops *
249tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250		     struct netlink_ext_ack *extack)
251{
252	const struct tcf_proto_ops *ops;
253
254	ops = __tcf_proto_lookup_ops(kind);
255	if (ops)
256		return ops;
257#ifdef CONFIG_MODULES
258	if (rtnl_held)
259		rtnl_unlock();
260	request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261	if (rtnl_held)
262		rtnl_lock();
263	ops = __tcf_proto_lookup_ops(kind);
264	/* We dropped the RTNL semaphore in order to perform
265	 * the module load. So, even if we succeeded in loading
266	 * the module we have to replay the request. We indicate
267	 * this using -EAGAIN.
268	 */
269	if (ops) {
270		module_put(ops->owner);
271		return ERR_PTR(-EAGAIN);
272	}
273#endif
274	NL_SET_ERR_MSG(extack, "TC classifier not found");
275	return ERR_PTR(-ENOENT);
276}
277
278/* Register(unregister) new classifier type */
279
280int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281{
282	struct tcf_proto_ops *t;
283	int rc = -EEXIST;
284
285	write_lock(&cls_mod_lock);
286	list_for_each_entry(t, &tcf_proto_base, head)
287		if (!strcmp(ops->kind, t->kind))
288			goto out;
289
290	list_add_tail(&ops->head, &tcf_proto_base);
291	rc = 0;
292out:
293	write_unlock(&cls_mod_lock);
294	return rc;
295}
296EXPORT_SYMBOL(register_tcf_proto_ops);
297
298static struct workqueue_struct *tc_filter_wq;
299
300void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301{
302	struct tcf_proto_ops *t;
303	int rc = -ENOENT;
304
305	/* Wait for outstanding call_rcu()s, if any, from a
306	 * tcf_proto_ops's destroy() handler.
307	 */
308	rcu_barrier();
309	flush_workqueue(tc_filter_wq);
310
311	write_lock(&cls_mod_lock);
312	list_for_each_entry(t, &tcf_proto_base, head) {
313		if (t == ops) {
314			list_del(&t->head);
315			rc = 0;
316			break;
317		}
318	}
319	write_unlock(&cls_mod_lock);
320
321	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322}
323EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
325bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326{
327	INIT_RCU_WORK(rwork, func);
328	return queue_rcu_work(tc_filter_wq, rwork);
329}
330EXPORT_SYMBOL(tcf_queue_work);
331
332/* Select new prio value from the range, managed by kernel. */
333
334static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335{
336	u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338	if (tp)
339		first = tp->prio - 1;
340
341	return TC_H_MAJ(first);
342}
343
344static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345{
346	if (kind)
347		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348	memset(name, 0, IFNAMSIZ);
349	return false;
350}
351
352static bool tcf_proto_is_unlocked(const char *kind)
353{
354	const struct tcf_proto_ops *ops;
355	bool ret;
356
357	if (strlen(kind) == 0)
358		return false;
359
360	ops = tcf_proto_lookup_ops(kind, false, NULL);
361	/* On error return false to take rtnl lock. Proto lookup/create
362	 * functions will perform lookup again and properly handle errors.
363	 */
364	if (IS_ERR(ops))
365		return false;
366
367	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368	module_put(ops->owner);
369	return ret;
370}
371
372static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373					  u32 prio, struct tcf_chain *chain,
374					  bool rtnl_held,
375					  struct netlink_ext_ack *extack)
376{
377	struct tcf_proto *tp;
378	int err;
379
380	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381	if (!tp)
382		return ERR_PTR(-ENOBUFS);
383
384	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385	if (IS_ERR(tp->ops)) {
386		err = PTR_ERR(tp->ops);
387		goto errout;
388	}
389	tp->classify = tp->ops->classify;
390	tp->protocol = protocol;
391	tp->prio = prio;
392	tp->chain = chain;
393	spin_lock_init(&tp->lock);
394	refcount_set(&tp->refcnt, 1);
395
396	err = tp->ops->init(tp);
397	if (err) {
398		module_put(tp->ops->owner);
399		goto errout;
400	}
401	return tp;
402
403errout:
404	kfree(tp);
405	return ERR_PTR(err);
406}
407
408static void tcf_proto_get(struct tcf_proto *tp)
409{
410	refcount_inc(&tp->refcnt);
411}
412
413static void tcf_chain_put(struct tcf_chain *chain);
414
415static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416			      bool sig_destroy, struct netlink_ext_ack *extack)
417{
418	tp->ops->destroy(tp, rtnl_held, extack);
419	if (sig_destroy)
420		tcf_proto_signal_destroyed(tp->chain, tp);
421	tcf_chain_put(tp->chain);
422	module_put(tp->ops->owner);
423	kfree_rcu(tp, rcu);
424}
425
426static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427			  struct netlink_ext_ack *extack)
428{
429	if (refcount_dec_and_test(&tp->refcnt))
430		tcf_proto_destroy(tp, rtnl_held, true, extack);
431}
432
433static bool tcf_proto_check_delete(struct tcf_proto *tp)
434{
435	if (tp->ops->delete_empty)
436		return tp->ops->delete_empty(tp);
437
438	tp->deleting = true;
439	return tp->deleting;
440}
441
442static void tcf_proto_mark_delete(struct tcf_proto *tp)
443{
444	spin_lock(&tp->lock);
445	tp->deleting = true;
446	spin_unlock(&tp->lock);
447}
448
449static bool tcf_proto_is_deleting(struct tcf_proto *tp)
450{
451	bool deleting;
452
453	spin_lock(&tp->lock);
454	deleting = tp->deleting;
455	spin_unlock(&tp->lock);
456
457	return deleting;
458}
459
460#define ASSERT_BLOCK_LOCKED(block)					\
461	lockdep_assert_held(&(block)->lock)
462
463struct tcf_filter_chain_list_item {
464	struct list_head list;
465	tcf_chain_head_change_t *chain_head_change;
466	void *chain_head_change_priv;
467};
468
469static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
470					  u32 chain_index)
471{
472	struct tcf_chain *chain;
473
474	ASSERT_BLOCK_LOCKED(block);
475
476	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
477	if (!chain)
478		return NULL;
479	list_add_tail_rcu(&chain->list, &block->chain_list);
480	mutex_init(&chain->filter_chain_lock);
481	chain->block = block;
482	chain->index = chain_index;
483	chain->refcnt = 1;
484	if (!chain->index)
485		block->chain0.chain = chain;
486	return chain;
487}
488
489static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490				       struct tcf_proto *tp_head)
491{
492	if (item->chain_head_change)
493		item->chain_head_change(tp_head, item->chain_head_change_priv);
494}
495
496static void tcf_chain0_head_change(struct tcf_chain *chain,
497				   struct tcf_proto *tp_head)
498{
499	struct tcf_filter_chain_list_item *item;
500	struct tcf_block *block = chain->block;
501
502	if (chain->index)
503		return;
504
505	mutex_lock(&block->lock);
506	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507		tcf_chain_head_change_item(item, tp_head);
508	mutex_unlock(&block->lock);
509}
510
511/* Returns true if block can be safely freed. */
512
513static bool tcf_chain_detach(struct tcf_chain *chain)
514{
515	struct tcf_block *block = chain->block;
516
517	ASSERT_BLOCK_LOCKED(block);
518
519	list_del_rcu(&chain->list);
520	if (!chain->index)
521		block->chain0.chain = NULL;
522
523	if (list_empty(&block->chain_list) &&
524	    refcount_read(&block->refcnt) == 0)
525		return true;
526
527	return false;
528}
529
530static void tcf_block_destroy(struct tcf_block *block)
531{
532	mutex_destroy(&block->lock);
533	mutex_destroy(&block->proto_destroy_lock);
534	xa_destroy(&block->ports);
535	kfree_rcu(block, rcu);
536}
537
538static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
539{
540	struct tcf_block *block = chain->block;
541
542	mutex_destroy(&chain->filter_chain_lock);
543	kfree_rcu(chain, rcu);
544	if (free_block)
545		tcf_block_destroy(block);
546}
547
548static void tcf_chain_hold(struct tcf_chain *chain)
549{
550	ASSERT_BLOCK_LOCKED(chain->block);
551
552	++chain->refcnt;
553}
554
555static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
556{
557	ASSERT_BLOCK_LOCKED(chain->block);
558
559	/* In case all the references are action references, this
560	 * chain should not be shown to the user.
561	 */
562	return chain->refcnt == chain->action_refcnt;
563}
564
565static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
566					  u32 chain_index)
567{
568	struct tcf_chain *chain;
569
570	ASSERT_BLOCK_LOCKED(block);
571
572	list_for_each_entry(chain, &block->chain_list, list) {
573		if (chain->index == chain_index)
574			return chain;
575	}
576	return NULL;
577}
578
579#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
580static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
581					      u32 chain_index)
582{
583	struct tcf_chain *chain;
584
585	list_for_each_entry_rcu(chain, &block->chain_list, list) {
586		if (chain->index == chain_index)
587			return chain;
588	}
589	return NULL;
590}
591#endif
592
593static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
594			   u32 seq, u16 flags, int event, bool unicast,
595			   struct netlink_ext_ack *extack);
596
597static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
598					 u32 chain_index, bool create,
599					 bool by_act)
600{
601	struct tcf_chain *chain = NULL;
602	bool is_first_reference;
603
604	mutex_lock(&block->lock);
605	chain = tcf_chain_lookup(block, chain_index);
606	if (chain) {
607		tcf_chain_hold(chain);
608	} else {
609		if (!create)
610			goto errout;
611		chain = tcf_chain_create(block, chain_index);
612		if (!chain)
613			goto errout;
614	}
615
616	if (by_act)
617		++chain->action_refcnt;
618	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
619	mutex_unlock(&block->lock);
620
621	/* Send notification only in case we got the first
622	 * non-action reference. Until then, the chain acts only as
623	 * a placeholder for actions pointing to it and user ought
624	 * not know about them.
625	 */
626	if (is_first_reference && !by_act)
627		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
628				RTM_NEWCHAIN, false, NULL);
629
630	return chain;
631
632errout:
633	mutex_unlock(&block->lock);
634	return chain;
635}
636
637static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
638				       bool create)
639{
640	return __tcf_chain_get(block, chain_index, create, false);
641}
642
643struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
644{
645	return __tcf_chain_get(block, chain_index, true, true);
646}
647EXPORT_SYMBOL(tcf_chain_get_by_act);
648
649static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
650			       void *tmplt_priv);
651static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
652				  void *tmplt_priv, u32 chain_index,
653				  struct tcf_block *block, struct sk_buff *oskb,
654				  u32 seq, u16 flags);
655
656static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
657			    bool explicitly_created)
658{
659	struct tcf_block *block = chain->block;
660	const struct tcf_proto_ops *tmplt_ops;
661	unsigned int refcnt, non_act_refcnt;
662	bool free_block = false;
663	void *tmplt_priv;
664
665	mutex_lock(&block->lock);
666	if (explicitly_created) {
667		if (!chain->explicitly_created) {
668			mutex_unlock(&block->lock);
669			return;
670		}
671		chain->explicitly_created = false;
672	}
673
674	if (by_act)
675		chain->action_refcnt--;
676
677	/* tc_chain_notify_delete can't be called while holding block lock.
678	 * However, when block is unlocked chain can be changed concurrently, so
679	 * save these to temporary variables.
680	 */
681	refcnt = --chain->refcnt;
682	non_act_refcnt = refcnt - chain->action_refcnt;
683	tmplt_ops = chain->tmplt_ops;
684	tmplt_priv = chain->tmplt_priv;
685
686	if (non_act_refcnt == chain->explicitly_created && !by_act) {
687		if (non_act_refcnt == 0)
688			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
689					       chain->index, block, NULL, 0, 0);
690		/* Last reference to chain, no need to lock. */
691		chain->flushing = false;
692	}
693
694	if (refcnt == 0)
695		free_block = tcf_chain_detach(chain);
696	mutex_unlock(&block->lock);
697
698	if (refcnt == 0) {
699		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700		tcf_chain_destroy(chain, free_block);
701	}
702}
703
704static void tcf_chain_put(struct tcf_chain *chain)
705{
706	__tcf_chain_put(chain, false, false);
707}
708
709void tcf_chain_put_by_act(struct tcf_chain *chain)
710{
711	__tcf_chain_put(chain, true, false);
712}
713EXPORT_SYMBOL(tcf_chain_put_by_act);
714
715static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
716{
717	__tcf_chain_put(chain, false, true);
718}
719
720static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
721{
722	struct tcf_proto *tp, *tp_next;
723
724	mutex_lock(&chain->filter_chain_lock);
725	tp = tcf_chain_dereference(chain->filter_chain, chain);
726	while (tp) {
727		tp_next = rcu_dereference_protected(tp->next, 1);
728		tcf_proto_signal_destroying(chain, tp);
729		tp = tp_next;
730	}
731	tp = tcf_chain_dereference(chain->filter_chain, chain);
732	RCU_INIT_POINTER(chain->filter_chain, NULL);
733	tcf_chain0_head_change(chain, NULL);
734	chain->flushing = true;
735	mutex_unlock(&chain->filter_chain_lock);
736
737	while (tp) {
738		tp_next = rcu_dereference_protected(tp->next, 1);
739		tcf_proto_put(tp, rtnl_held, NULL);
740		tp = tp_next;
741	}
742}
743
744static int tcf_block_setup(struct tcf_block *block,
745			   struct flow_block_offload *bo);
746
747static void tcf_block_offload_init(struct flow_block_offload *bo,
748				   struct net_device *dev, struct Qdisc *sch,
749				   enum flow_block_command command,
750				   enum flow_block_binder_type binder_type,
751				   struct flow_block *flow_block,
752				   bool shared, struct netlink_ext_ack *extack)
753{
754	bo->net = dev_net(dev);
755	bo->command = command;
756	bo->binder_type = binder_type;
757	bo->block = flow_block;
758	bo->block_shared = shared;
759	bo->extack = extack;
760	bo->sch = sch;
761	bo->cb_list_head = &flow_block->cb_list;
762	INIT_LIST_HEAD(&bo->cb_list);
763}
764
765static void tcf_block_unbind(struct tcf_block *block,
766			     struct flow_block_offload *bo);
767
768static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
769{
770	struct tcf_block *block = block_cb->indr.data;
771	struct net_device *dev = block_cb->indr.dev;
772	struct Qdisc *sch = block_cb->indr.sch;
773	struct netlink_ext_ack extack = {};
774	struct flow_block_offload bo = {};
775
776	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777			       block_cb->indr.binder_type,
778			       &block->flow_block, tcf_block_shared(block),
779			       &extack);
780	rtnl_lock();
781	down_write(&block->cb_lock);
782	list_del(&block_cb->driver_list);
783	list_move(&block_cb->list, &bo.cb_list);
784	tcf_block_unbind(block, &bo);
785	up_write(&block->cb_lock);
786	rtnl_unlock();
787}
788
789static bool tcf_block_offload_in_use(struct tcf_block *block)
790{
791	return atomic_read(&block->offloadcnt);
792}
793
794static int tcf_block_offload_cmd(struct tcf_block *block,
795				 struct net_device *dev, struct Qdisc *sch,
796				 struct tcf_block_ext_info *ei,
797				 enum flow_block_command command,
798				 struct netlink_ext_ack *extack)
799{
800	struct flow_block_offload bo = {};
801
802	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803			       &block->flow_block, tcf_block_shared(block),
804			       extack);
805
806	if (dev->netdev_ops->ndo_setup_tc) {
807		int err;
808
809		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
810		if (err < 0) {
811			if (err != -EOPNOTSUPP)
812				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
813			return err;
814		}
815
816		return tcf_block_setup(block, &bo);
817	}
818
819	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820				    tc_block_indr_cleanup);
821	tcf_block_setup(block, &bo);
822
823	return -EOPNOTSUPP;
824}
825
826static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827				  struct tcf_block_ext_info *ei,
828				  struct netlink_ext_ack *extack)
829{
830	struct net_device *dev = q->dev_queue->dev;
831	int err;
832
833	down_write(&block->cb_lock);
834
835	/* If tc offload feature is disabled and the block we try to bind
836	 * to already has some offloaded filters, forbid to bind.
837	 */
838	if (dev->netdev_ops->ndo_setup_tc &&
839	    !tc_can_offload(dev) &&
840	    tcf_block_offload_in_use(block)) {
841		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842		err = -EOPNOTSUPP;
843		goto err_unlock;
844	}
845
846	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847	if (err == -EOPNOTSUPP)
848		goto no_offload_dev_inc;
849	if (err)
850		goto err_unlock;
851
852	up_write(&block->cb_lock);
853	return 0;
854
855no_offload_dev_inc:
856	if (tcf_block_offload_in_use(block))
857		goto err_unlock;
858
859	err = 0;
860	block->nooffloaddevcnt++;
861err_unlock:
862	up_write(&block->cb_lock);
863	return err;
864}
865
866static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867				     struct tcf_block_ext_info *ei)
868{
869	struct net_device *dev = q->dev_queue->dev;
870	int err;
871
872	down_write(&block->cb_lock);
873	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874	if (err == -EOPNOTSUPP)
875		goto no_offload_dev_dec;
876	up_write(&block->cb_lock);
877	return;
878
879no_offload_dev_dec:
880	WARN_ON(block->nooffloaddevcnt-- == 0);
881	up_write(&block->cb_lock);
882}
883
884static int
885tcf_chain0_head_change_cb_add(struct tcf_block *block,
886			      struct tcf_block_ext_info *ei,
887			      struct netlink_ext_ack *extack)
888{
889	struct tcf_filter_chain_list_item *item;
890	struct tcf_chain *chain0;
891
892	item = kmalloc(sizeof(*item), GFP_KERNEL);
893	if (!item) {
894		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895		return -ENOMEM;
896	}
897	item->chain_head_change = ei->chain_head_change;
898	item->chain_head_change_priv = ei->chain_head_change_priv;
899
900	mutex_lock(&block->lock);
901	chain0 = block->chain0.chain;
902	if (chain0)
903		tcf_chain_hold(chain0);
904	else
905		list_add(&item->list, &block->chain0.filter_chain_list);
906	mutex_unlock(&block->lock);
907
908	if (chain0) {
909		struct tcf_proto *tp_head;
910
911		mutex_lock(&chain0->filter_chain_lock);
912
913		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
914		if (tp_head)
915			tcf_chain_head_change_item(item, tp_head);
916
917		mutex_lock(&block->lock);
918		list_add(&item->list, &block->chain0.filter_chain_list);
919		mutex_unlock(&block->lock);
920
921		mutex_unlock(&chain0->filter_chain_lock);
922		tcf_chain_put(chain0);
923	}
924
925	return 0;
926}
927
928static void
929tcf_chain0_head_change_cb_del(struct tcf_block *block,
930			      struct tcf_block_ext_info *ei)
931{
932	struct tcf_filter_chain_list_item *item;
933
934	mutex_lock(&block->lock);
935	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937		    (item->chain_head_change == ei->chain_head_change &&
938		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
939			if (block->chain0.chain)
940				tcf_chain_head_change_item(item, NULL);
941			list_del(&item->list);
942			mutex_unlock(&block->lock);
943
944			kfree(item);
945			return;
946		}
947	}
948	mutex_unlock(&block->lock);
949	WARN_ON(1);
950}
951
952struct tcf_net {
953	spinlock_t idr_lock; /* Protects idr */
954	struct idr idr;
955};
956
957static unsigned int tcf_net_id;
958
959static int tcf_block_insert(struct tcf_block *block, struct net *net,
960			    struct netlink_ext_ack *extack)
961{
962	struct tcf_net *tn = net_generic(net, tcf_net_id);
963	int err;
964
965	idr_preload(GFP_KERNEL);
966	spin_lock(&tn->idr_lock);
967	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
968			    GFP_NOWAIT);
969	spin_unlock(&tn->idr_lock);
970	idr_preload_end();
971
972	return err;
973}
974
975static void tcf_block_remove(struct tcf_block *block, struct net *net)
976{
977	struct tcf_net *tn = net_generic(net, tcf_net_id);
978
979	spin_lock(&tn->idr_lock);
980	idr_remove(&tn->idr, block->index);
981	spin_unlock(&tn->idr_lock);
982}
983
984static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
985					  u32 block_index,
986					  struct netlink_ext_ack *extack)
987{
988	struct tcf_block *block;
989
990	block = kzalloc(sizeof(*block), GFP_KERNEL);
991	if (!block) {
992		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993		return ERR_PTR(-ENOMEM);
994	}
995	mutex_init(&block->lock);
996	mutex_init(&block->proto_destroy_lock);
997	init_rwsem(&block->cb_lock);
998	flow_block_init(&block->flow_block);
999	INIT_LIST_HEAD(&block->chain_list);
1000	INIT_LIST_HEAD(&block->owner_list);
1001	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1002
1003	refcount_set(&block->refcnt, 1);
1004	block->net = net;
1005	block->index = block_index;
1006	xa_init(&block->ports);
1007
1008	/* Don't store q pointer for blocks which are shared */
1009	if (!tcf_block_shared(block))
1010		block->q = q;
1011	return block;
1012}
1013
1014struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1015{
1016	struct tcf_net *tn = net_generic(net, tcf_net_id);
1017
1018	return idr_find(&tn->idr, block_index);
1019}
1020EXPORT_SYMBOL(tcf_block_lookup);
1021
1022static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1023{
1024	struct tcf_block *block;
1025
1026	rcu_read_lock();
1027	block = tcf_block_lookup(net, block_index);
1028	if (block && !refcount_inc_not_zero(&block->refcnt))
1029		block = NULL;
1030	rcu_read_unlock();
1031
1032	return block;
1033}
1034
1035static struct tcf_chain *
1036__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1037{
1038	mutex_lock(&block->lock);
1039	if (chain)
1040		chain = list_is_last(&chain->list, &block->chain_list) ?
1041			NULL : list_next_entry(chain, list);
1042	else
1043		chain = list_first_entry_or_null(&block->chain_list,
1044						 struct tcf_chain, list);
1045
1046	/* skip all action-only chains */
1047	while (chain && tcf_chain_held_by_acts_only(chain))
1048		chain = list_is_last(&chain->list, &block->chain_list) ?
1049			NULL : list_next_entry(chain, list);
1050
1051	if (chain)
1052		tcf_chain_hold(chain);
1053	mutex_unlock(&block->lock);
1054
1055	return chain;
1056}
1057
1058/* Function to be used by all clients that want to iterate over all chains on
1059 * block. It properly obtains block->lock and takes reference to chain before
1060 * returning it. Users of this function must be tolerant to concurrent chain
1061 * insertion/deletion or ensure that no concurrent chain modification is
1062 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1063 * consistent dump because rtnl lock is released each time skb is filled with
1064 * data and sent to user-space.
1065 */
1066
1067struct tcf_chain *
1068tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1069{
1070	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1071
1072	if (chain)
1073		tcf_chain_put(chain);
1074
1075	return chain_next;
1076}
1077EXPORT_SYMBOL(tcf_get_next_chain);
1078
1079static struct tcf_proto *
1080__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1081{
1082	u32 prio = 0;
1083
1084	ASSERT_RTNL();
1085	mutex_lock(&chain->filter_chain_lock);
1086
1087	if (!tp) {
1088		tp = tcf_chain_dereference(chain->filter_chain, chain);
1089	} else if (tcf_proto_is_deleting(tp)) {
1090		/* 'deleting' flag is set and chain->filter_chain_lock was
1091		 * unlocked, which means next pointer could be invalid. Restart
1092		 * search.
1093		 */
1094		prio = tp->prio + 1;
1095		tp = tcf_chain_dereference(chain->filter_chain, chain);
1096
1097		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1098			if (!tp->deleting && tp->prio >= prio)
1099				break;
1100	} else {
1101		tp = tcf_chain_dereference(tp->next, chain);
1102	}
1103
1104	if (tp)
1105		tcf_proto_get(tp);
1106
1107	mutex_unlock(&chain->filter_chain_lock);
1108
1109	return tp;
1110}
1111
1112/* Function to be used by all clients that want to iterate over all tp's on
1113 * chain. Users of this function must be tolerant to concurrent tp
1114 * insertion/deletion or ensure that no concurrent chain modification is
1115 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1116 * consistent dump because rtnl lock is released each time skb is filled with
1117 * data and sent to user-space.
1118 */
1119
1120struct tcf_proto *
1121tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1122{
1123	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1124
1125	if (tp)
1126		tcf_proto_put(tp, true, NULL);
1127
1128	return tp_next;
1129}
1130EXPORT_SYMBOL(tcf_get_next_proto);
1131
1132static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1133{
1134	struct tcf_chain *chain;
1135
1136	/* Last reference to block. At this point chains cannot be added or
1137	 * removed concurrently.
1138	 */
1139	for (chain = tcf_get_next_chain(block, NULL);
1140	     chain;
1141	     chain = tcf_get_next_chain(block, chain)) {
1142		tcf_chain_put_explicitly_created(chain);
1143		tcf_chain_flush(chain, rtnl_held);
1144	}
1145}
1146
1147/* Lookup Qdisc and increments its reference counter.
1148 * Set parent, if necessary.
1149 */
1150
1151static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1152			    u32 *parent, int ifindex, bool rtnl_held,
1153			    struct netlink_ext_ack *extack)
1154{
1155	const struct Qdisc_class_ops *cops;
1156	struct net_device *dev;
1157	int err = 0;
1158
1159	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1160		return 0;
1161
1162	rcu_read_lock();
1163
1164	/* Find link */
1165	dev = dev_get_by_index_rcu(net, ifindex);
1166	if (!dev) {
1167		rcu_read_unlock();
1168		return -ENODEV;
1169	}
1170
1171	/* Find qdisc */
1172	if (!*parent) {
1173		*q = rcu_dereference(dev->qdisc);
1174		*parent = (*q)->handle;
1175	} else {
1176		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1177		if (!*q) {
1178			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1179			err = -EINVAL;
1180			goto errout_rcu;
1181		}
1182	}
1183
1184	*q = qdisc_refcount_inc_nz(*q);
1185	if (!*q) {
1186		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1187		err = -EINVAL;
1188		goto errout_rcu;
1189	}
1190
1191	/* Is it classful? */
1192	cops = (*q)->ops->cl_ops;
1193	if (!cops) {
1194		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1195		err = -EINVAL;
1196		goto errout_qdisc;
1197	}
1198
1199	if (!cops->tcf_block) {
1200		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1201		err = -EOPNOTSUPP;
1202		goto errout_qdisc;
1203	}
1204
1205errout_rcu:
1206	/* At this point we know that qdisc is not noop_qdisc,
1207	 * which means that qdisc holds a reference to net_device
1208	 * and we hold a reference to qdisc, so it is safe to release
1209	 * rcu read lock.
1210	 */
1211	rcu_read_unlock();
1212	return err;
1213
1214errout_qdisc:
1215	rcu_read_unlock();
1216
1217	if (rtnl_held)
1218		qdisc_put(*q);
1219	else
1220		qdisc_put_unlocked(*q);
1221	*q = NULL;
1222
1223	return err;
1224}
1225
1226static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1227			       int ifindex, struct netlink_ext_ack *extack)
1228{
1229	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1230		return 0;
1231
1232	/* Do we search for filter, attached to class? */
1233	if (TC_H_MIN(parent)) {
1234		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1235
1236		*cl = cops->find(q, parent);
1237		if (*cl == 0) {
1238			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1239			return -ENOENT;
1240		}
1241	}
1242
1243	return 0;
1244}
1245
1246static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1247					  unsigned long cl, int ifindex,
1248					  u32 block_index,
1249					  struct netlink_ext_ack *extack)
1250{
1251	struct tcf_block *block;
1252
1253	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1254		block = tcf_block_refcnt_get(net, block_index);
1255		if (!block) {
1256			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1257			return ERR_PTR(-EINVAL);
1258		}
1259	} else {
1260		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1261
1262		block = cops->tcf_block(q, cl, extack);
1263		if (!block)
1264			return ERR_PTR(-EINVAL);
1265
1266		if (tcf_block_shared(block)) {
1267			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1268			return ERR_PTR(-EOPNOTSUPP);
1269		}
1270
1271		/* Always take reference to block in order to support execution
1272		 * of rules update path of cls API without rtnl lock. Caller
1273		 * must release block when it is finished using it. 'if' block
1274		 * of this conditional obtain reference to block by calling
1275		 * tcf_block_refcnt_get().
1276		 */
1277		refcount_inc(&block->refcnt);
1278	}
1279
1280	return block;
1281}
1282
1283static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1284			    struct tcf_block_ext_info *ei, bool rtnl_held)
1285{
1286	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1287		/* Flushing/putting all chains will cause the block to be
1288		 * deallocated when last chain is freed. However, if chain_list
1289		 * is empty, block has to be manually deallocated. After block
1290		 * reference counter reached 0, it is no longer possible to
1291		 * increment it or add new chains to block.
1292		 */
1293		bool free_block = list_empty(&block->chain_list);
1294
1295		mutex_unlock(&block->lock);
1296		if (tcf_block_shared(block))
1297			tcf_block_remove(block, block->net);
1298
1299		if (q)
1300			tcf_block_offload_unbind(block, q, ei);
1301
1302		if (free_block)
1303			tcf_block_destroy(block);
1304		else
1305			tcf_block_flush_all_chains(block, rtnl_held);
1306	} else if (q) {
1307		tcf_block_offload_unbind(block, q, ei);
1308	}
1309}
1310
1311static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1312{
1313	__tcf_block_put(block, NULL, NULL, rtnl_held);
1314}
1315
1316/* Find tcf block.
1317 * Set q, parent, cl when appropriate.
1318 */
1319
1320static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1321					u32 *parent, unsigned long *cl,
1322					int ifindex, u32 block_index,
1323					struct netlink_ext_ack *extack)
1324{
1325	struct tcf_block *block;
1326	int err = 0;
1327
1328	ASSERT_RTNL();
1329
1330	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1331	if (err)
1332		goto errout;
1333
1334	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1335	if (err)
1336		goto errout_qdisc;
1337
1338	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1339	if (IS_ERR(block)) {
1340		err = PTR_ERR(block);
1341		goto errout_qdisc;
1342	}
1343
1344	return block;
1345
1346errout_qdisc:
1347	if (*q)
1348		qdisc_put(*q);
1349errout:
1350	*q = NULL;
1351	return ERR_PTR(err);
1352}
1353
1354static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1355			      bool rtnl_held)
1356{
1357	if (!IS_ERR_OR_NULL(block))
1358		tcf_block_refcnt_put(block, rtnl_held);
1359
1360	if (q) {
1361		if (rtnl_held)
1362			qdisc_put(q);
1363		else
1364			qdisc_put_unlocked(q);
1365	}
1366}
1367
1368struct tcf_block_owner_item {
1369	struct list_head list;
1370	struct Qdisc *q;
1371	enum flow_block_binder_type binder_type;
1372};
1373
1374static void
1375tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1376			       struct Qdisc *q,
1377			       enum flow_block_binder_type binder_type)
1378{
1379	if (block->keep_dst &&
1380	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1381	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1382		netif_keep_dst(qdisc_dev(q));
1383}
1384
1385void tcf_block_netif_keep_dst(struct tcf_block *block)
1386{
1387	struct tcf_block_owner_item *item;
1388
1389	block->keep_dst = true;
1390	list_for_each_entry(item, &block->owner_list, list)
1391		tcf_block_owner_netif_keep_dst(block, item->q,
1392					       item->binder_type);
1393}
1394EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1395
1396static int tcf_block_owner_add(struct tcf_block *block,
1397			       struct Qdisc *q,
1398			       enum flow_block_binder_type binder_type)
1399{
1400	struct tcf_block_owner_item *item;
1401
1402	item = kmalloc(sizeof(*item), GFP_KERNEL);
1403	if (!item)
1404		return -ENOMEM;
1405	item->q = q;
1406	item->binder_type = binder_type;
1407	list_add(&item->list, &block->owner_list);
1408	return 0;
1409}
1410
1411static void tcf_block_owner_del(struct tcf_block *block,
1412				struct Qdisc *q,
1413				enum flow_block_binder_type binder_type)
1414{
1415	struct tcf_block_owner_item *item;
1416
1417	list_for_each_entry(item, &block->owner_list, list) {
1418		if (item->q == q && item->binder_type == binder_type) {
1419			list_del(&item->list);
1420			kfree(item);
1421			return;
1422		}
1423	}
1424	WARN_ON(1);
1425}
1426
1427static bool tcf_block_tracks_dev(struct tcf_block *block,
1428				 struct tcf_block_ext_info *ei)
1429{
1430	return tcf_block_shared(block) &&
1431	       (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1432		ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1433}
1434
1435int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1436		      struct tcf_block_ext_info *ei,
1437		      struct netlink_ext_ack *extack)
1438{
1439	struct net_device *dev = qdisc_dev(q);
1440	struct net *net = qdisc_net(q);
1441	struct tcf_block *block = NULL;
1442	int err;
1443
1444	if (ei->block_index)
1445		/* block_index not 0 means the shared block is requested */
1446		block = tcf_block_refcnt_get(net, ei->block_index);
1447
1448	if (!block) {
1449		block = tcf_block_create(net, q, ei->block_index, extack);
1450		if (IS_ERR(block))
1451			return PTR_ERR(block);
1452		if (tcf_block_shared(block)) {
1453			err = tcf_block_insert(block, net, extack);
1454			if (err)
1455				goto err_block_insert;
1456		}
1457	}
1458
1459	err = tcf_block_owner_add(block, q, ei->binder_type);
1460	if (err)
1461		goto err_block_owner_add;
1462
1463	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1464
1465	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1466	if (err)
1467		goto err_chain0_head_change_cb_add;
1468
1469	err = tcf_block_offload_bind(block, q, ei, extack);
1470	if (err)
1471		goto err_block_offload_bind;
1472
1473	if (tcf_block_tracks_dev(block, ei)) {
1474		err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1475		if (err) {
1476			NL_SET_ERR_MSG(extack, "block dev insert failed");
1477			goto err_dev_insert;
1478		}
1479	}
1480
1481	*p_block = block;
1482	return 0;
1483
1484err_dev_insert:
1485err_block_offload_bind:
1486	tcf_chain0_head_change_cb_del(block, ei);
1487err_chain0_head_change_cb_add:
1488	tcf_block_owner_del(block, q, ei->binder_type);
1489err_block_owner_add:
1490err_block_insert:
1491	tcf_block_refcnt_put(block, true);
1492	return err;
1493}
1494EXPORT_SYMBOL(tcf_block_get_ext);
1495
1496static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1497{
1498	struct tcf_proto __rcu **p_filter_chain = priv;
1499
1500	rcu_assign_pointer(*p_filter_chain, tp_head);
1501}
1502
1503int tcf_block_get(struct tcf_block **p_block,
1504		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1505		  struct netlink_ext_ack *extack)
1506{
1507	struct tcf_block_ext_info ei = {
1508		.chain_head_change = tcf_chain_head_change_dflt,
1509		.chain_head_change_priv = p_filter_chain,
1510	};
1511
1512	WARN_ON(!p_filter_chain);
1513	return tcf_block_get_ext(p_block, q, &ei, extack);
1514}
1515EXPORT_SYMBOL(tcf_block_get);
1516
1517/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1518 * actions should be all removed after flushing.
1519 */
1520void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1521		       struct tcf_block_ext_info *ei)
1522{
1523	struct net_device *dev = qdisc_dev(q);
1524
1525	if (!block)
1526		return;
1527	if (tcf_block_tracks_dev(block, ei))
1528		xa_erase(&block->ports, dev->ifindex);
1529	tcf_chain0_head_change_cb_del(block, ei);
1530	tcf_block_owner_del(block, q, ei->binder_type);
1531
1532	__tcf_block_put(block, q, ei, true);
1533}
1534EXPORT_SYMBOL(tcf_block_put_ext);
1535
1536void tcf_block_put(struct tcf_block *block)
1537{
1538	struct tcf_block_ext_info ei = {0, };
1539
1540	if (!block)
1541		return;
1542	tcf_block_put_ext(block, block->q, &ei);
1543}
1544
1545EXPORT_SYMBOL(tcf_block_put);
1546
1547static int
1548tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1549			    void *cb_priv, bool add, bool offload_in_use,
1550			    struct netlink_ext_ack *extack)
1551{
1552	struct tcf_chain *chain, *chain_prev;
1553	struct tcf_proto *tp, *tp_prev;
1554	int err;
1555
1556	lockdep_assert_held(&block->cb_lock);
1557
1558	for (chain = __tcf_get_next_chain(block, NULL);
1559	     chain;
1560	     chain_prev = chain,
1561		     chain = __tcf_get_next_chain(block, chain),
1562		     tcf_chain_put(chain_prev)) {
1563		if (chain->tmplt_ops && add)
1564			chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1565							  cb_priv);
1566		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1567		     tp_prev = tp,
1568			     tp = __tcf_get_next_proto(chain, tp),
1569			     tcf_proto_put(tp_prev, true, NULL)) {
1570			if (tp->ops->reoffload) {
1571				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1572							 extack);
1573				if (err && add)
1574					goto err_playback_remove;
1575			} else if (add && offload_in_use) {
1576				err = -EOPNOTSUPP;
1577				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1578				goto err_playback_remove;
1579			}
1580		}
1581		if (chain->tmplt_ops && !add)
1582			chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1583							  cb_priv);
1584	}
1585
1586	return 0;
1587
1588err_playback_remove:
1589	tcf_proto_put(tp, true, NULL);
1590	tcf_chain_put(chain);
1591	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1592				    extack);
1593	return err;
1594}
1595
1596static int tcf_block_bind(struct tcf_block *block,
1597			  struct flow_block_offload *bo)
1598{
1599	struct flow_block_cb *block_cb, *next;
1600	int err, i = 0;
1601
1602	lockdep_assert_held(&block->cb_lock);
1603
1604	list_for_each_entry(block_cb, &bo->cb_list, list) {
1605		err = tcf_block_playback_offloads(block, block_cb->cb,
1606						  block_cb->cb_priv, true,
1607						  tcf_block_offload_in_use(block),
1608						  bo->extack);
1609		if (err)
1610			goto err_unroll;
1611		if (!bo->unlocked_driver_cb)
1612			block->lockeddevcnt++;
1613
1614		i++;
1615	}
1616	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1617
1618	return 0;
1619
1620err_unroll:
1621	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1622		list_del(&block_cb->driver_list);
1623		if (i-- > 0) {
1624			list_del(&block_cb->list);
1625			tcf_block_playback_offloads(block, block_cb->cb,
1626						    block_cb->cb_priv, false,
1627						    tcf_block_offload_in_use(block),
1628						    NULL);
1629			if (!bo->unlocked_driver_cb)
1630				block->lockeddevcnt--;
1631		}
1632		flow_block_cb_free(block_cb);
1633	}
1634
1635	return err;
1636}
1637
1638static void tcf_block_unbind(struct tcf_block *block,
1639			     struct flow_block_offload *bo)
1640{
1641	struct flow_block_cb *block_cb, *next;
1642
1643	lockdep_assert_held(&block->cb_lock);
1644
1645	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1646		tcf_block_playback_offloads(block, block_cb->cb,
1647					    block_cb->cb_priv, false,
1648					    tcf_block_offload_in_use(block),
1649					    NULL);
1650		list_del(&block_cb->list);
1651		flow_block_cb_free(block_cb);
1652		if (!bo->unlocked_driver_cb)
1653			block->lockeddevcnt--;
1654	}
1655}
1656
1657static int tcf_block_setup(struct tcf_block *block,
1658			   struct flow_block_offload *bo)
1659{
1660	int err;
1661
1662	switch (bo->command) {
1663	case FLOW_BLOCK_BIND:
1664		err = tcf_block_bind(block, bo);
1665		break;
1666	case FLOW_BLOCK_UNBIND:
1667		err = 0;
1668		tcf_block_unbind(block, bo);
1669		break;
1670	default:
1671		WARN_ON_ONCE(1);
1672		err = -EOPNOTSUPP;
1673	}
1674
1675	return err;
1676}
1677
1678/* Main classifier routine: scans classifier chain attached
1679 * to this qdisc, (optionally) tests for protocol and asks
1680 * specific classifiers.
1681 */
1682static inline int __tcf_classify(struct sk_buff *skb,
1683				 const struct tcf_proto *tp,
1684				 const struct tcf_proto *orig_tp,
1685				 struct tcf_result *res,
1686				 bool compat_mode,
1687				 struct tcf_exts_miss_cookie_node *n,
1688				 int act_index,
1689				 u32 *last_executed_chain)
1690{
1691#ifdef CONFIG_NET_CLS_ACT
1692	const int max_reclassify_loop = 16;
1693	const struct tcf_proto *first_tp;
1694	int limit = 0;
1695
1696reclassify:
1697#endif
1698	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1699		__be16 protocol = skb_protocol(skb, false);
1700		int err = 0;
1701
1702		if (n) {
1703			struct tcf_exts *exts;
1704
1705			if (n->tp_prio != tp->prio)
1706				continue;
1707
1708			/* We re-lookup the tp and chain based on index instead
1709			 * of having hard refs and locks to them, so do a sanity
1710			 * check if any of tp,chain,exts was replaced by the
1711			 * time we got here with a cookie from hardware.
1712			 */
1713			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1714				     !tp->ops->get_exts)) {
1715				tcf_set_drop_reason(skb,
1716						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1717				return TC_ACT_SHOT;
1718			}
1719
1720			exts = tp->ops->get_exts(tp, n->handle);
1721			if (unlikely(!exts || n->exts != exts)) {
1722				tcf_set_drop_reason(skb,
1723						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1724				return TC_ACT_SHOT;
1725			}
1726
1727			n = NULL;
1728			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1729		} else {
1730			if (tp->protocol != protocol &&
1731			    tp->protocol != htons(ETH_P_ALL))
1732				continue;
1733
1734			err = tc_classify(skb, tp, res);
1735		}
1736#ifdef CONFIG_NET_CLS_ACT
1737		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1738			first_tp = orig_tp;
1739			*last_executed_chain = first_tp->chain->index;
1740			goto reset;
1741		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1742			first_tp = res->goto_tp;
1743			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1744			goto reset;
1745		}
1746#endif
1747		if (err >= 0)
1748			return err;
1749	}
1750
1751	if (unlikely(n)) {
1752		tcf_set_drop_reason(skb,
1753				    SKB_DROP_REASON_TC_COOKIE_ERROR);
1754		return TC_ACT_SHOT;
1755	}
1756
1757	return TC_ACT_UNSPEC; /* signal: continue lookup */
1758#ifdef CONFIG_NET_CLS_ACT
1759reset:
1760	if (unlikely(limit++ >= max_reclassify_loop)) {
1761		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1762				       tp->chain->block->index,
1763				       tp->prio & 0xffff,
1764				       ntohs(tp->protocol));
1765		tcf_set_drop_reason(skb,
1766				    SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1767		return TC_ACT_SHOT;
1768	}
1769
1770	tp = first_tp;
1771	goto reclassify;
1772#endif
1773}
1774
1775int tcf_classify(struct sk_buff *skb,
1776		 const struct tcf_block *block,
1777		 const struct tcf_proto *tp,
1778		 struct tcf_result *res, bool compat_mode)
1779{
1780#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1781	u32 last_executed_chain = 0;
1782
1783	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1784			      &last_executed_chain);
1785#else
1786	u32 last_executed_chain = tp ? tp->chain->index : 0;
1787	struct tcf_exts_miss_cookie_node *n = NULL;
1788	const struct tcf_proto *orig_tp = tp;
1789	struct tc_skb_ext *ext;
1790	int act_index = 0;
1791	int ret;
1792
1793	if (block) {
1794		ext = skb_ext_find(skb, TC_SKB_EXT);
1795
1796		if (ext && (ext->chain || ext->act_miss)) {
1797			struct tcf_chain *fchain;
1798			u32 chain;
1799
1800			if (ext->act_miss) {
1801				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1802								&act_index);
1803				if (!n) {
1804					tcf_set_drop_reason(skb,
1805							    SKB_DROP_REASON_TC_COOKIE_ERROR);
1806					return TC_ACT_SHOT;
1807				}
1808
1809				chain = n->chain_index;
1810			} else {
1811				chain = ext->chain;
1812			}
1813
1814			fchain = tcf_chain_lookup_rcu(block, chain);
1815			if (!fchain) {
1816				tcf_set_drop_reason(skb,
1817						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1818
1819				return TC_ACT_SHOT;
1820			}
1821
1822			/* Consume, so cloned/redirect skbs won't inherit ext */
1823			skb_ext_del(skb, TC_SKB_EXT);
1824
1825			tp = rcu_dereference_bh(fchain->filter_chain);
1826			last_executed_chain = fchain->index;
1827		}
1828	}
1829
1830	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1831			     &last_executed_chain);
1832
1833	if (tc_skb_ext_tc_enabled()) {
1834		/* If we missed on some chain */
1835		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1836			struct tc_skb_cb *cb = tc_skb_cb(skb);
1837
1838			ext = tc_skb_ext_alloc(skb);
1839			if (WARN_ON_ONCE(!ext)) {
1840				tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1841				return TC_ACT_SHOT;
1842			}
1843			ext->chain = last_executed_chain;
1844			ext->mru = cb->mru;
1845			ext->post_ct = cb->post_ct;
1846			ext->post_ct_snat = cb->post_ct_snat;
1847			ext->post_ct_dnat = cb->post_ct_dnat;
1848			ext->zone = cb->zone;
1849		}
1850	}
1851
1852	return ret;
1853#endif
1854}
1855EXPORT_SYMBOL(tcf_classify);
1856
1857struct tcf_chain_info {
1858	struct tcf_proto __rcu **pprev;
1859	struct tcf_proto __rcu *next;
1860};
1861
1862static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1863					   struct tcf_chain_info *chain_info)
1864{
1865	return tcf_chain_dereference(*chain_info->pprev, chain);
1866}
1867
1868static int tcf_chain_tp_insert(struct tcf_chain *chain,
1869			       struct tcf_chain_info *chain_info,
1870			       struct tcf_proto *tp)
1871{
1872	if (chain->flushing)
1873		return -EAGAIN;
1874
1875	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1876	if (*chain_info->pprev == chain->filter_chain)
1877		tcf_chain0_head_change(chain, tp);
1878	tcf_proto_get(tp);
1879	rcu_assign_pointer(*chain_info->pprev, tp);
1880
1881	return 0;
1882}
1883
1884static void tcf_chain_tp_remove(struct tcf_chain *chain,
1885				struct tcf_chain_info *chain_info,
1886				struct tcf_proto *tp)
1887{
1888	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1889
1890	tcf_proto_mark_delete(tp);
1891	if (tp == chain->filter_chain)
1892		tcf_chain0_head_change(chain, next);
1893	RCU_INIT_POINTER(*chain_info->pprev, next);
1894}
1895
1896static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1897					   struct tcf_chain_info *chain_info,
1898					   u32 protocol, u32 prio,
1899					   bool prio_allocate);
1900
1901/* Try to insert new proto.
1902 * If proto with specified priority already exists, free new proto
1903 * and return existing one.
1904 */
1905
1906static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1907						    struct tcf_proto *tp_new,
1908						    u32 protocol, u32 prio,
1909						    bool rtnl_held)
1910{
1911	struct tcf_chain_info chain_info;
1912	struct tcf_proto *tp;
1913	int err = 0;
1914
1915	mutex_lock(&chain->filter_chain_lock);
1916
1917	if (tcf_proto_exists_destroying(chain, tp_new)) {
1918		mutex_unlock(&chain->filter_chain_lock);
1919		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1920		return ERR_PTR(-EAGAIN);
1921	}
1922
1923	tp = tcf_chain_tp_find(chain, &chain_info,
1924			       protocol, prio, false);
1925	if (!tp)
1926		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1927	mutex_unlock(&chain->filter_chain_lock);
1928
1929	if (tp) {
1930		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1931		tp_new = tp;
1932	} else if (err) {
1933		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1934		tp_new = ERR_PTR(err);
1935	}
1936
1937	return tp_new;
1938}
1939
1940static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1941				      struct tcf_proto *tp, bool rtnl_held,
1942				      struct netlink_ext_ack *extack)
1943{
1944	struct tcf_chain_info chain_info;
1945	struct tcf_proto *tp_iter;
1946	struct tcf_proto **pprev;
1947	struct tcf_proto *next;
1948
1949	mutex_lock(&chain->filter_chain_lock);
1950
1951	/* Atomically find and remove tp from chain. */
1952	for (pprev = &chain->filter_chain;
1953	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1954	     pprev = &tp_iter->next) {
1955		if (tp_iter == tp) {
1956			chain_info.pprev = pprev;
1957			chain_info.next = tp_iter->next;
1958			WARN_ON(tp_iter->deleting);
1959			break;
1960		}
1961	}
1962	/* Verify that tp still exists and no new filters were inserted
1963	 * concurrently.
1964	 * Mark tp for deletion if it is empty.
1965	 */
1966	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1967		mutex_unlock(&chain->filter_chain_lock);
1968		return;
1969	}
1970
1971	tcf_proto_signal_destroying(chain, tp);
1972	next = tcf_chain_dereference(chain_info.next, chain);
1973	if (tp == chain->filter_chain)
1974		tcf_chain0_head_change(chain, next);
1975	RCU_INIT_POINTER(*chain_info.pprev, next);
1976	mutex_unlock(&chain->filter_chain_lock);
1977
1978	tcf_proto_put(tp, rtnl_held, extack);
1979}
1980
1981static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1982					   struct tcf_chain_info *chain_info,
1983					   u32 protocol, u32 prio,
1984					   bool prio_allocate)
1985{
1986	struct tcf_proto **pprev;
1987	struct tcf_proto *tp;
1988
1989	/* Check the chain for existence of proto-tcf with this priority */
1990	for (pprev = &chain->filter_chain;
1991	     (tp = tcf_chain_dereference(*pprev, chain));
1992	     pprev = &tp->next) {
1993		if (tp->prio >= prio) {
1994			if (tp->prio == prio) {
1995				if (prio_allocate ||
1996				    (tp->protocol != protocol && protocol))
1997					return ERR_PTR(-EINVAL);
1998			} else {
1999				tp = NULL;
2000			}
2001			break;
2002		}
2003	}
2004	chain_info->pprev = pprev;
2005	if (tp) {
2006		chain_info->next = tp->next;
2007		tcf_proto_get(tp);
2008	} else {
2009		chain_info->next = NULL;
2010	}
2011	return tp;
2012}
2013
2014static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2015			 struct tcf_proto *tp, struct tcf_block *block,
2016			 struct Qdisc *q, u32 parent, void *fh,
2017			 u32 portid, u32 seq, u16 flags, int event,
2018			 bool terse_dump, bool rtnl_held,
2019			 struct netlink_ext_ack *extack)
2020{
2021	struct tcmsg *tcm;
2022	struct nlmsghdr  *nlh;
2023	unsigned char *b = skb_tail_pointer(skb);
2024
2025	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2026	if (!nlh)
2027		goto out_nlmsg_trim;
2028	tcm = nlmsg_data(nlh);
2029	tcm->tcm_family = AF_UNSPEC;
2030	tcm->tcm__pad1 = 0;
2031	tcm->tcm__pad2 = 0;
2032	if (q) {
2033		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2034		tcm->tcm_parent = parent;
2035	} else {
2036		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2037		tcm->tcm_block_index = block->index;
2038	}
2039	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2040	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2041		goto nla_put_failure;
2042	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2043		goto nla_put_failure;
2044	if (!fh) {
2045		tcm->tcm_handle = 0;
2046	} else if (terse_dump) {
2047		if (tp->ops->terse_dump) {
2048			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2049						rtnl_held) < 0)
2050				goto nla_put_failure;
2051		} else {
2052			goto cls_op_not_supp;
2053		}
2054	} else {
2055		if (tp->ops->dump &&
2056		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2057			goto nla_put_failure;
2058	}
2059
2060	if (extack && extack->_msg &&
2061	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2062		goto nla_put_failure;
2063
2064	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2065
2066	return skb->len;
2067
2068out_nlmsg_trim:
2069nla_put_failure:
2070cls_op_not_supp:
2071	nlmsg_trim(skb, b);
2072	return -1;
2073}
2074
2075static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2076			  struct nlmsghdr *n, struct tcf_proto *tp,
2077			  struct tcf_block *block, struct Qdisc *q,
2078			  u32 parent, void *fh, int event, bool unicast,
2079			  bool rtnl_held, struct netlink_ext_ack *extack)
2080{
2081	struct sk_buff *skb;
2082	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2083	int err = 0;
2084
2085	if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2086		return 0;
2087
2088	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2089	if (!skb)
2090		return -ENOBUFS;
2091
2092	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2093			  n->nlmsg_seq, n->nlmsg_flags, event,
2094			  false, rtnl_held, extack) <= 0) {
2095		kfree_skb(skb);
2096		return -EINVAL;
2097	}
2098
2099	if (unicast)
2100		err = rtnl_unicast(skb, net, portid);
2101	else
2102		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2103				     n->nlmsg_flags & NLM_F_ECHO);
2104	return err;
2105}
2106
2107static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2108			      struct nlmsghdr *n, struct tcf_proto *tp,
2109			      struct tcf_block *block, struct Qdisc *q,
2110			      u32 parent, void *fh, bool *last, bool rtnl_held,
2111			      struct netlink_ext_ack *extack)
2112{
2113	struct sk_buff *skb;
2114	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2115	int err;
2116
2117	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2118		return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2119
2120	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2121	if (!skb)
2122		return -ENOBUFS;
2123
2124	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2125			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2126			  false, rtnl_held, extack) <= 0) {
2127		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2128		kfree_skb(skb);
2129		return -EINVAL;
2130	}
2131
2132	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2133	if (err) {
2134		kfree_skb(skb);
2135		return err;
2136	}
2137
2138	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2139			     n->nlmsg_flags & NLM_F_ECHO);
2140	if (err < 0)
2141		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2142
2143	return err;
2144}
2145
2146static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2147				 struct tcf_block *block, struct Qdisc *q,
2148				 u32 parent, struct nlmsghdr *n,
2149				 struct tcf_chain *chain, int event,
2150				 struct netlink_ext_ack *extack)
2151{
2152	struct tcf_proto *tp;
2153
2154	for (tp = tcf_get_next_proto(chain, NULL);
2155	     tp; tp = tcf_get_next_proto(chain, tp))
2156		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2157			       event, false, true, extack);
2158}
2159
2160static void tfilter_put(struct tcf_proto *tp, void *fh)
2161{
2162	if (tp->ops->put && fh)
2163		tp->ops->put(tp, fh);
2164}
2165
2166static bool is_qdisc_ingress(__u32 classid)
2167{
2168	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2169}
2170
2171static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2172			  struct netlink_ext_ack *extack)
2173{
2174	struct net *net = sock_net(skb->sk);
2175	struct nlattr *tca[TCA_MAX + 1];
2176	char name[IFNAMSIZ];
2177	struct tcmsg *t;
2178	u32 protocol;
2179	u32 prio;
2180	bool prio_allocate;
2181	u32 parent;
2182	u32 chain_index;
2183	struct Qdisc *q;
2184	struct tcf_chain_info chain_info;
2185	struct tcf_chain *chain;
2186	struct tcf_block *block;
2187	struct tcf_proto *tp;
2188	unsigned long cl;
2189	void *fh;
2190	int err;
2191	int tp_created;
2192	bool rtnl_held = false;
2193	u32 flags;
2194
2195replay:
2196	tp_created = 0;
2197
2198	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2199				     rtm_tca_policy, extack);
2200	if (err < 0)
2201		return err;
2202
2203	t = nlmsg_data(n);
2204	protocol = TC_H_MIN(t->tcm_info);
2205	prio = TC_H_MAJ(t->tcm_info);
2206	prio_allocate = false;
2207	parent = t->tcm_parent;
2208	tp = NULL;
2209	cl = 0;
2210	block = NULL;
2211	q = NULL;
2212	chain = NULL;
2213	flags = 0;
2214
2215	if (prio == 0) {
2216		/* If no priority is provided by the user,
2217		 * we allocate one.
2218		 */
2219		if (n->nlmsg_flags & NLM_F_CREATE) {
2220			prio = TC_H_MAKE(0x80000000U, 0U);
2221			prio_allocate = true;
2222		} else {
2223			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2224			return -ENOENT;
2225		}
2226	}
2227
2228	/* Find head of filter chain. */
2229
2230	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2231	if (err)
2232		return err;
2233
2234	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2235		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2236		err = -EINVAL;
2237		goto errout;
2238	}
2239
2240	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2241	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2242	 * type is not specified, classifier is not unlocked.
2243	 */
2244	if (rtnl_held ||
2245	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2246	    !tcf_proto_is_unlocked(name)) {
2247		rtnl_held = true;
2248		rtnl_lock();
2249	}
2250
2251	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2252	if (err)
2253		goto errout;
2254
2255	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2256				 extack);
2257	if (IS_ERR(block)) {
2258		err = PTR_ERR(block);
2259		goto errout;
2260	}
2261	block->classid = parent;
2262
2263	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2264	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2265		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2266		err = -EINVAL;
2267		goto errout;
2268	}
2269	chain = tcf_chain_get(block, chain_index, true);
2270	if (!chain) {
2271		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2272		err = -ENOMEM;
2273		goto errout;
2274	}
2275
2276	mutex_lock(&chain->filter_chain_lock);
2277	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2278			       prio, prio_allocate);
2279	if (IS_ERR(tp)) {
2280		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2281		err = PTR_ERR(tp);
2282		goto errout_locked;
2283	}
2284
2285	if (tp == NULL) {
2286		struct tcf_proto *tp_new = NULL;
2287
2288		if (chain->flushing) {
2289			err = -EAGAIN;
2290			goto errout_locked;
2291		}
2292
2293		/* Proto-tcf does not exist, create new one */
2294
2295		if (tca[TCA_KIND] == NULL || !protocol) {
2296			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2297			err = -EINVAL;
2298			goto errout_locked;
2299		}
2300
2301		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2302			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2303			err = -ENOENT;
2304			goto errout_locked;
2305		}
2306
2307		if (prio_allocate)
2308			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2309							       &chain_info));
2310
2311		mutex_unlock(&chain->filter_chain_lock);
2312		tp_new = tcf_proto_create(name, protocol, prio, chain,
2313					  rtnl_held, extack);
2314		if (IS_ERR(tp_new)) {
2315			err = PTR_ERR(tp_new);
2316			goto errout_tp;
2317		}
2318
2319		tp_created = 1;
2320		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2321						rtnl_held);
2322		if (IS_ERR(tp)) {
2323			err = PTR_ERR(tp);
2324			goto errout_tp;
2325		}
2326	} else {
2327		mutex_unlock(&chain->filter_chain_lock);
2328	}
2329
2330	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2331		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2332		err = -EINVAL;
2333		goto errout;
2334	}
2335
2336	fh = tp->ops->get(tp, t->tcm_handle);
2337
2338	if (!fh) {
2339		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2340			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2341			err = -ENOENT;
2342			goto errout;
2343		}
2344	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2345		tfilter_put(tp, fh);
2346		NL_SET_ERR_MSG(extack, "Filter already exists");
2347		err = -EEXIST;
2348		goto errout;
2349	}
2350
2351	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2352		tfilter_put(tp, fh);
2353		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2354		err = -EINVAL;
2355		goto errout;
2356	}
2357
2358	if (!(n->nlmsg_flags & NLM_F_CREATE))
2359		flags |= TCA_ACT_FLAGS_REPLACE;
2360	if (!rtnl_held)
2361		flags |= TCA_ACT_FLAGS_NO_RTNL;
2362	if (is_qdisc_ingress(parent))
2363		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2364	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2365			      flags, extack);
2366	if (err == 0) {
2367		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2368			       RTM_NEWTFILTER, false, rtnl_held, extack);
2369		tfilter_put(tp, fh);
2370		/* q pointer is NULL for shared blocks */
2371		if (q)
2372			q->flags &= ~TCQ_F_CAN_BYPASS;
2373	}
2374
2375errout:
2376	if (err && tp_created)
2377		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2378errout_tp:
2379	if (chain) {
2380		if (tp && !IS_ERR(tp))
2381			tcf_proto_put(tp, rtnl_held, NULL);
2382		if (!tp_created)
2383			tcf_chain_put(chain);
2384	}
2385	tcf_block_release(q, block, rtnl_held);
2386
2387	if (rtnl_held)
2388		rtnl_unlock();
2389
2390	if (err == -EAGAIN) {
2391		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2392		 * of target chain.
2393		 */
2394		rtnl_held = true;
2395		/* Replay the request. */
2396		goto replay;
2397	}
2398	return err;
2399
2400errout_locked:
2401	mutex_unlock(&chain->filter_chain_lock);
2402	goto errout;
2403}
2404
2405static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2406			  struct netlink_ext_ack *extack)
2407{
2408	struct net *net = sock_net(skb->sk);
2409	struct nlattr *tca[TCA_MAX + 1];
2410	char name[IFNAMSIZ];
2411	struct tcmsg *t;
2412	u32 protocol;
2413	u32 prio;
2414	u32 parent;
2415	u32 chain_index;
2416	struct Qdisc *q = NULL;
2417	struct tcf_chain_info chain_info;
2418	struct tcf_chain *chain = NULL;
2419	struct tcf_block *block = NULL;
2420	struct tcf_proto *tp = NULL;
2421	unsigned long cl = 0;
2422	void *fh = NULL;
2423	int err;
2424	bool rtnl_held = false;
2425
2426	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2427				     rtm_tca_policy, extack);
2428	if (err < 0)
2429		return err;
2430
2431	t = nlmsg_data(n);
2432	protocol = TC_H_MIN(t->tcm_info);
2433	prio = TC_H_MAJ(t->tcm_info);
2434	parent = t->tcm_parent;
2435
2436	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2437		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2438		return -ENOENT;
2439	}
2440
2441	/* Find head of filter chain. */
2442
2443	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2444	if (err)
2445		return err;
2446
2447	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2448		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2449		err = -EINVAL;
2450		goto errout;
2451	}
2452	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2453	 * found), qdisc is not unlocked, classifier type is not specified,
2454	 * classifier is not unlocked.
2455	 */
2456	if (!prio ||
2457	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2458	    !tcf_proto_is_unlocked(name)) {
2459		rtnl_held = true;
2460		rtnl_lock();
2461	}
2462
2463	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2464	if (err)
2465		goto errout;
2466
2467	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2468				 extack);
2469	if (IS_ERR(block)) {
2470		err = PTR_ERR(block);
2471		goto errout;
2472	}
2473
2474	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2475	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2476		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2477		err = -EINVAL;
2478		goto errout;
2479	}
2480	chain = tcf_chain_get(block, chain_index, false);
2481	if (!chain) {
2482		/* User requested flush on non-existent chain. Nothing to do,
2483		 * so just return success.
2484		 */
2485		if (prio == 0) {
2486			err = 0;
2487			goto errout;
2488		}
2489		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2490		err = -ENOENT;
2491		goto errout;
2492	}
2493
2494	if (prio == 0) {
2495		tfilter_notify_chain(net, skb, block, q, parent, n,
2496				     chain, RTM_DELTFILTER, extack);
2497		tcf_chain_flush(chain, rtnl_held);
2498		err = 0;
2499		goto errout;
2500	}
2501
2502	mutex_lock(&chain->filter_chain_lock);
2503	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2504			       prio, false);
2505	if (!tp || IS_ERR(tp)) {
2506		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2507		err = tp ? PTR_ERR(tp) : -ENOENT;
2508		goto errout_locked;
2509	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2510		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2511		err = -EINVAL;
2512		goto errout_locked;
2513	} else if (t->tcm_handle == 0) {
2514		tcf_proto_signal_destroying(chain, tp);
2515		tcf_chain_tp_remove(chain, &chain_info, tp);
2516		mutex_unlock(&chain->filter_chain_lock);
2517
2518		tcf_proto_put(tp, rtnl_held, NULL);
2519		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2520			       RTM_DELTFILTER, false, rtnl_held, extack);
2521		err = 0;
2522		goto errout;
2523	}
2524	mutex_unlock(&chain->filter_chain_lock);
2525
2526	fh = tp->ops->get(tp, t->tcm_handle);
2527
2528	if (!fh) {
2529		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2530		err = -ENOENT;
2531	} else {
2532		bool last;
2533
2534		err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2535					 &last, rtnl_held, extack);
2536
2537		if (err)
2538			goto errout;
2539		if (last)
2540			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2541	}
2542
2543errout:
2544	if (chain) {
2545		if (tp && !IS_ERR(tp))
2546			tcf_proto_put(tp, rtnl_held, NULL);
2547		tcf_chain_put(chain);
2548	}
2549	tcf_block_release(q, block, rtnl_held);
2550
2551	if (rtnl_held)
2552		rtnl_unlock();
2553
2554	return err;
2555
2556errout_locked:
2557	mutex_unlock(&chain->filter_chain_lock);
2558	goto errout;
2559}
2560
2561static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2562			  struct netlink_ext_ack *extack)
2563{
2564	struct net *net = sock_net(skb->sk);
2565	struct nlattr *tca[TCA_MAX + 1];
2566	char name[IFNAMSIZ];
2567	struct tcmsg *t;
2568	u32 protocol;
2569	u32 prio;
2570	u32 parent;
2571	u32 chain_index;
2572	struct Qdisc *q = NULL;
2573	struct tcf_chain_info chain_info;
2574	struct tcf_chain *chain = NULL;
2575	struct tcf_block *block = NULL;
2576	struct tcf_proto *tp = NULL;
2577	unsigned long cl = 0;
2578	void *fh = NULL;
2579	int err;
2580	bool rtnl_held = false;
2581
2582	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2583				     rtm_tca_policy, extack);
2584	if (err < 0)
2585		return err;
2586
2587	t = nlmsg_data(n);
2588	protocol = TC_H_MIN(t->tcm_info);
2589	prio = TC_H_MAJ(t->tcm_info);
2590	parent = t->tcm_parent;
2591
2592	if (prio == 0) {
2593		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2594		return -ENOENT;
2595	}
2596
2597	/* Find head of filter chain. */
2598
2599	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2600	if (err)
2601		return err;
2602
2603	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2604		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2605		err = -EINVAL;
2606		goto errout;
2607	}
2608	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2609	 * unlocked, classifier type is not specified, classifier is not
2610	 * unlocked.
2611	 */
2612	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2613	    !tcf_proto_is_unlocked(name)) {
2614		rtnl_held = true;
2615		rtnl_lock();
2616	}
2617
2618	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2619	if (err)
2620		goto errout;
2621
2622	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2623				 extack);
2624	if (IS_ERR(block)) {
2625		err = PTR_ERR(block);
2626		goto errout;
2627	}
2628
2629	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2630	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2631		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2632		err = -EINVAL;
2633		goto errout;
2634	}
2635	chain = tcf_chain_get(block, chain_index, false);
2636	if (!chain) {
2637		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2638		err = -EINVAL;
2639		goto errout;
2640	}
2641
2642	mutex_lock(&chain->filter_chain_lock);
2643	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2644			       prio, false);
2645	mutex_unlock(&chain->filter_chain_lock);
2646	if (!tp || IS_ERR(tp)) {
2647		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2648		err = tp ? PTR_ERR(tp) : -ENOENT;
2649		goto errout;
2650	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2651		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2652		err = -EINVAL;
2653		goto errout;
2654	}
2655
2656	fh = tp->ops->get(tp, t->tcm_handle);
2657
2658	if (!fh) {
2659		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2660		err = -ENOENT;
2661	} else {
2662		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2663				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2664		if (err < 0)
2665			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2666	}
2667
2668	tfilter_put(tp, fh);
2669errout:
2670	if (chain) {
2671		if (tp && !IS_ERR(tp))
2672			tcf_proto_put(tp, rtnl_held, NULL);
2673		tcf_chain_put(chain);
2674	}
2675	tcf_block_release(q, block, rtnl_held);
2676
2677	if (rtnl_held)
2678		rtnl_unlock();
2679
2680	return err;
2681}
2682
2683struct tcf_dump_args {
2684	struct tcf_walker w;
2685	struct sk_buff *skb;
2686	struct netlink_callback *cb;
2687	struct tcf_block *block;
2688	struct Qdisc *q;
2689	u32 parent;
2690	bool terse_dump;
2691};
2692
2693static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2694{
2695	struct tcf_dump_args *a = (void *)arg;
2696	struct net *net = sock_net(a->skb->sk);
2697
2698	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2699			     n, NETLINK_CB(a->cb->skb).portid,
2700			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2701			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2702}
2703
2704static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2705			   struct sk_buff *skb, struct netlink_callback *cb,
2706			   long index_start, long *p_index, bool terse)
2707{
2708	struct net *net = sock_net(skb->sk);
2709	struct tcf_block *block = chain->block;
2710	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2711	struct tcf_proto *tp, *tp_prev;
2712	struct tcf_dump_args arg;
2713
2714	for (tp = __tcf_get_next_proto(chain, NULL);
2715	     tp;
2716	     tp_prev = tp,
2717		     tp = __tcf_get_next_proto(chain, tp),
2718		     tcf_proto_put(tp_prev, true, NULL),
2719		     (*p_index)++) {
2720		if (*p_index < index_start)
2721			continue;
2722		if (TC_H_MAJ(tcm->tcm_info) &&
2723		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2724			continue;
2725		if (TC_H_MIN(tcm->tcm_info) &&
2726		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2727			continue;
2728		if (*p_index > index_start)
2729			memset(&cb->args[1], 0,
2730			       sizeof(cb->args) - sizeof(cb->args[0]));
2731		if (cb->args[1] == 0) {
2732			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2733					  NETLINK_CB(cb->skb).portid,
2734					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2735					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2736				goto errout;
2737			cb->args[1] = 1;
2738		}
2739		if (!tp->ops->walk)
2740			continue;
2741		arg.w.fn = tcf_node_dump;
2742		arg.skb = skb;
2743		arg.cb = cb;
2744		arg.block = block;
2745		arg.q = q;
2746		arg.parent = parent;
2747		arg.w.stop = 0;
2748		arg.w.skip = cb->args[1] - 1;
2749		arg.w.count = 0;
2750		arg.w.cookie = cb->args[2];
2751		arg.terse_dump = terse;
2752		tp->ops->walk(tp, &arg.w, true);
2753		cb->args[2] = arg.w.cookie;
2754		cb->args[1] = arg.w.count + 1;
2755		if (arg.w.stop)
2756			goto errout;
2757	}
2758	return true;
2759
2760errout:
2761	tcf_proto_put(tp, true, NULL);
2762	return false;
2763}
2764
2765static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2766	[TCA_CHAIN]      = { .type = NLA_U32 },
2767	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2768};
2769
2770/* called with RTNL */
2771static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2772{
2773	struct tcf_chain *chain, *chain_prev;
2774	struct net *net = sock_net(skb->sk);
2775	struct nlattr *tca[TCA_MAX + 1];
2776	struct Qdisc *q = NULL;
2777	struct tcf_block *block;
2778	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2779	bool terse_dump = false;
2780	long index_start;
2781	long index;
2782	u32 parent;
2783	int err;
2784
2785	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2786		return skb->len;
2787
2788	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2789				     tcf_tfilter_dump_policy, cb->extack);
2790	if (err)
2791		return err;
2792
2793	if (tca[TCA_DUMP_FLAGS]) {
2794		struct nla_bitfield32 flags =
2795			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2796
2797		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2798	}
2799
2800	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2801		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2802		if (!block)
2803			goto out;
2804		/* If we work with block index, q is NULL and parent value
2805		 * will never be used in the following code. The check
2806		 * in tcf_fill_node prevents it. However, compiler does not
2807		 * see that far, so set parent to zero to silence the warning
2808		 * about parent being uninitialized.
2809		 */
2810		parent = 0;
2811	} else {
2812		const struct Qdisc_class_ops *cops;
2813		struct net_device *dev;
2814		unsigned long cl = 0;
2815
2816		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2817		if (!dev)
2818			return skb->len;
2819
2820		parent = tcm->tcm_parent;
2821		if (!parent)
2822			q = rtnl_dereference(dev->qdisc);
2823		else
2824			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2825		if (!q)
2826			goto out;
2827		cops = q->ops->cl_ops;
2828		if (!cops)
2829			goto out;
2830		if (!cops->tcf_block)
2831			goto out;
2832		if (TC_H_MIN(tcm->tcm_parent)) {
2833			cl = cops->find(q, tcm->tcm_parent);
2834			if (cl == 0)
2835				goto out;
2836		}
2837		block = cops->tcf_block(q, cl, NULL);
2838		if (!block)
2839			goto out;
2840		parent = block->classid;
2841		if (tcf_block_shared(block))
2842			q = NULL;
2843	}
2844
2845	index_start = cb->args[0];
2846	index = 0;
2847
2848	for (chain = __tcf_get_next_chain(block, NULL);
2849	     chain;
2850	     chain_prev = chain,
2851		     chain = __tcf_get_next_chain(block, chain),
2852		     tcf_chain_put(chain_prev)) {
2853		if (tca[TCA_CHAIN] &&
2854		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2855			continue;
2856		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2857				    index_start, &index, terse_dump)) {
2858			tcf_chain_put(chain);
2859			err = -EMSGSIZE;
2860			break;
2861		}
2862	}
2863
2864	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2865		tcf_block_refcnt_put(block, true);
2866	cb->args[0] = index;
2867
2868out:
2869	/* If we did no progress, the error (EMSGSIZE) is real */
2870	if (skb->len == 0 && err)
2871		return err;
2872	return skb->len;
2873}
2874
2875static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2876			      void *tmplt_priv, u32 chain_index,
2877			      struct net *net, struct sk_buff *skb,
2878			      struct tcf_block *block,
2879			      u32 portid, u32 seq, u16 flags, int event,
2880			      struct netlink_ext_ack *extack)
2881{
2882	unsigned char *b = skb_tail_pointer(skb);
2883	const struct tcf_proto_ops *ops;
2884	struct nlmsghdr *nlh;
2885	struct tcmsg *tcm;
2886	void *priv;
2887
2888	ops = tmplt_ops;
2889	priv = tmplt_priv;
2890
2891	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2892	if (!nlh)
2893		goto out_nlmsg_trim;
2894	tcm = nlmsg_data(nlh);
2895	tcm->tcm_family = AF_UNSPEC;
2896	tcm->tcm__pad1 = 0;
2897	tcm->tcm__pad2 = 0;
2898	tcm->tcm_handle = 0;
2899	if (block->q) {
2900		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2901		tcm->tcm_parent = block->q->handle;
2902	} else {
2903		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2904		tcm->tcm_block_index = block->index;
2905	}
2906
2907	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2908		goto nla_put_failure;
2909
2910	if (ops) {
2911		if (nla_put_string(skb, TCA_KIND, ops->kind))
2912			goto nla_put_failure;
2913		if (ops->tmplt_dump(skb, net, priv) < 0)
2914			goto nla_put_failure;
2915	}
2916
2917	if (extack && extack->_msg &&
2918	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2919		goto out_nlmsg_trim;
2920
2921	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2922
2923	return skb->len;
2924
2925out_nlmsg_trim:
2926nla_put_failure:
2927	nlmsg_trim(skb, b);
2928	return -EMSGSIZE;
2929}
2930
2931static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2932			   u32 seq, u16 flags, int event, bool unicast,
2933			   struct netlink_ext_ack *extack)
2934{
2935	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2936	struct tcf_block *block = chain->block;
2937	struct net *net = block->net;
2938	struct sk_buff *skb;
2939	int err = 0;
2940
2941	if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2942		return 0;
2943
2944	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2945	if (!skb)
2946		return -ENOBUFS;
2947
2948	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2949			       chain->index, net, skb, block, portid,
2950			       seq, flags, event, extack) <= 0) {
2951		kfree_skb(skb);
2952		return -EINVAL;
2953	}
2954
2955	if (unicast)
2956		err = rtnl_unicast(skb, net, portid);
2957	else
2958		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2959				     flags & NLM_F_ECHO);
2960
2961	return err;
2962}
2963
2964static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2965				  void *tmplt_priv, u32 chain_index,
2966				  struct tcf_block *block, struct sk_buff *oskb,
2967				  u32 seq, u16 flags)
2968{
2969	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2970	struct net *net = block->net;
2971	struct sk_buff *skb;
2972
2973	if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
2974		return 0;
2975
2976	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2977	if (!skb)
2978		return -ENOBUFS;
2979
2980	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2981			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2982		kfree_skb(skb);
2983		return -EINVAL;
2984	}
2985
2986	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2987}
2988
2989static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2990			      struct nlattr **tca,
2991			      struct netlink_ext_ack *extack)
2992{
2993	const struct tcf_proto_ops *ops;
2994	char name[IFNAMSIZ];
2995	void *tmplt_priv;
2996
2997	/* If kind is not set, user did not specify template. */
2998	if (!tca[TCA_KIND])
2999		return 0;
3000
3001	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3002		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3003		return -EINVAL;
3004	}
3005
3006	ops = tcf_proto_lookup_ops(name, true, extack);
3007	if (IS_ERR(ops))
3008		return PTR_ERR(ops);
3009	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3010	    !ops->tmplt_reoffload) {
3011		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3012		module_put(ops->owner);
3013		return -EOPNOTSUPP;
3014	}
3015
3016	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3017	if (IS_ERR(tmplt_priv)) {
3018		module_put(ops->owner);
3019		return PTR_ERR(tmplt_priv);
3020	}
3021	chain->tmplt_ops = ops;
3022	chain->tmplt_priv = tmplt_priv;
3023	return 0;
3024}
3025
3026static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3027			       void *tmplt_priv)
3028{
3029	/* If template ops are set, no work to do for us. */
3030	if (!tmplt_ops)
3031		return;
3032
3033	tmplt_ops->tmplt_destroy(tmplt_priv);
3034	module_put(tmplt_ops->owner);
3035}
3036
3037/* Add/delete/get a chain */
3038
3039static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3040			struct netlink_ext_ack *extack)
3041{
3042	struct net *net = sock_net(skb->sk);
3043	struct nlattr *tca[TCA_MAX + 1];
3044	struct tcmsg *t;
3045	u32 parent;
3046	u32 chain_index;
3047	struct Qdisc *q;
3048	struct tcf_chain *chain;
3049	struct tcf_block *block;
3050	unsigned long cl;
3051	int err;
3052
3053replay:
3054	q = NULL;
3055	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3056				     rtm_tca_policy, extack);
3057	if (err < 0)
3058		return err;
3059
3060	t = nlmsg_data(n);
3061	parent = t->tcm_parent;
3062	cl = 0;
3063
3064	block = tcf_block_find(net, &q, &parent, &cl,
3065			       t->tcm_ifindex, t->tcm_block_index, extack);
3066	if (IS_ERR(block))
3067		return PTR_ERR(block);
3068
3069	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3070	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3071		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3072		err = -EINVAL;
3073		goto errout_block;
3074	}
3075
3076	mutex_lock(&block->lock);
3077	chain = tcf_chain_lookup(block, chain_index);
3078	if (n->nlmsg_type == RTM_NEWCHAIN) {
3079		if (chain) {
3080			if (tcf_chain_held_by_acts_only(chain)) {
3081				/* The chain exists only because there is
3082				 * some action referencing it.
3083				 */
3084				tcf_chain_hold(chain);
3085			} else {
3086				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3087				err = -EEXIST;
3088				goto errout_block_locked;
3089			}
3090		} else {
3091			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3092				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3093				err = -ENOENT;
3094				goto errout_block_locked;
3095			}
3096			chain = tcf_chain_create(block, chain_index);
3097			if (!chain) {
3098				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3099				err = -ENOMEM;
3100				goto errout_block_locked;
3101			}
3102		}
3103	} else {
3104		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3105			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3106			err = -EINVAL;
3107			goto errout_block_locked;
3108		}
3109		tcf_chain_hold(chain);
3110	}
3111
3112	if (n->nlmsg_type == RTM_NEWCHAIN) {
3113		/* Modifying chain requires holding parent block lock. In case
3114		 * the chain was successfully added, take a reference to the
3115		 * chain. This ensures that an empty chain does not disappear at
3116		 * the end of this function.
3117		 */
3118		tcf_chain_hold(chain);
3119		chain->explicitly_created = true;
3120	}
3121	mutex_unlock(&block->lock);
3122
3123	switch (n->nlmsg_type) {
3124	case RTM_NEWCHAIN:
3125		err = tc_chain_tmplt_add(chain, net, tca, extack);
3126		if (err) {
3127			tcf_chain_put_explicitly_created(chain);
3128			goto errout;
3129		}
3130
3131		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3132				RTM_NEWCHAIN, false, extack);
3133		break;
3134	case RTM_DELCHAIN:
3135		tfilter_notify_chain(net, skb, block, q, parent, n,
3136				     chain, RTM_DELTFILTER, extack);
3137		/* Flush the chain first as the user requested chain removal. */
3138		tcf_chain_flush(chain, true);
3139		/* In case the chain was successfully deleted, put a reference
3140		 * to the chain previously taken during addition.
3141		 */
3142		tcf_chain_put_explicitly_created(chain);
3143		break;
3144	case RTM_GETCHAIN:
3145		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3146				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3147		if (err < 0)
3148			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3149		break;
3150	default:
3151		err = -EOPNOTSUPP;
3152		NL_SET_ERR_MSG(extack, "Unsupported message type");
3153		goto errout;
3154	}
3155
3156errout:
3157	tcf_chain_put(chain);
3158errout_block:
3159	tcf_block_release(q, block, true);
3160	if (err == -EAGAIN)
3161		/* Replay the request. */
3162		goto replay;
3163	return err;
3164
3165errout_block_locked:
3166	mutex_unlock(&block->lock);
3167	goto errout_block;
3168}
3169
3170/* called with RTNL */
3171static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3172{
3173	struct net *net = sock_net(skb->sk);
3174	struct nlattr *tca[TCA_MAX + 1];
3175	struct Qdisc *q = NULL;
3176	struct tcf_block *block;
3177	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3178	struct tcf_chain *chain;
3179	long index_start;
3180	long index;
3181	int err;
3182
3183	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3184		return skb->len;
3185
3186	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3187				     rtm_tca_policy, cb->extack);
3188	if (err)
3189		return err;
3190
3191	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3192		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3193		if (!block)
3194			goto out;
3195	} else {
3196		const struct Qdisc_class_ops *cops;
3197		struct net_device *dev;
3198		unsigned long cl = 0;
3199
3200		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3201		if (!dev)
3202			return skb->len;
3203
3204		if (!tcm->tcm_parent)
3205			q = rtnl_dereference(dev->qdisc);
3206		else
3207			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3208
3209		if (!q)
3210			goto out;
3211		cops = q->ops->cl_ops;
3212		if (!cops)
3213			goto out;
3214		if (!cops->tcf_block)
3215			goto out;
3216		if (TC_H_MIN(tcm->tcm_parent)) {
3217			cl = cops->find(q, tcm->tcm_parent);
3218			if (cl == 0)
3219				goto out;
3220		}
3221		block = cops->tcf_block(q, cl, NULL);
3222		if (!block)
3223			goto out;
3224		if (tcf_block_shared(block))
3225			q = NULL;
3226	}
3227
3228	index_start = cb->args[0];
3229	index = 0;
3230
3231	mutex_lock(&block->lock);
3232	list_for_each_entry(chain, &block->chain_list, list) {
3233		if ((tca[TCA_CHAIN] &&
3234		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3235			continue;
3236		if (index < index_start) {
3237			index++;
3238			continue;
3239		}
3240		if (tcf_chain_held_by_acts_only(chain))
3241			continue;
3242		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3243					 chain->index, net, skb, block,
3244					 NETLINK_CB(cb->skb).portid,
3245					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3246					 RTM_NEWCHAIN, NULL);
3247		if (err <= 0)
3248			break;
3249		index++;
3250	}
3251	mutex_unlock(&block->lock);
3252
3253	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3254		tcf_block_refcnt_put(block, true);
3255	cb->args[0] = index;
3256
3257out:
3258	/* If we did no progress, the error (EMSGSIZE) is real */
3259	if (skb->len == 0 && err)
3260		return err;
3261	return skb->len;
3262}
3263
3264int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3265		     int police, struct tcf_proto *tp, u32 handle,
3266		     bool use_action_miss)
3267{
3268	int err = 0;
3269
3270#ifdef CONFIG_NET_CLS_ACT
3271	exts->type = 0;
3272	exts->nr_actions = 0;
3273	exts->miss_cookie_node = NULL;
3274	/* Note: we do not own yet a reference on net.
3275	 * This reference might be taken later from tcf_exts_get_net().
3276	 */
3277	exts->net = net;
3278	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3279				GFP_KERNEL);
3280	if (!exts->actions)
3281		return -ENOMEM;
3282#endif
3283
3284	exts->action = action;
3285	exts->police = police;
3286
3287	if (!use_action_miss)
3288		return 0;
3289
3290	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3291	if (err)
3292		goto err_miss_alloc;
3293
3294	return 0;
3295
3296err_miss_alloc:
3297	tcf_exts_destroy(exts);
3298#ifdef CONFIG_NET_CLS_ACT
3299	exts->actions = NULL;
3300#endif
3301	return err;
3302}
3303EXPORT_SYMBOL(tcf_exts_init_ex);
3304
3305void tcf_exts_destroy(struct tcf_exts *exts)
3306{
3307	tcf_exts_miss_cookie_base_destroy(exts);
3308
3309#ifdef CONFIG_NET_CLS_ACT
3310	if (exts->actions) {
3311		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3312		kfree(exts->actions);
3313	}
3314	exts->nr_actions = 0;
3315#endif
3316}
3317EXPORT_SYMBOL(tcf_exts_destroy);
3318
3319int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3320			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3321			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3322{
3323#ifdef CONFIG_NET_CLS_ACT
3324	{
3325		int init_res[TCA_ACT_MAX_PRIO] = {};
3326		struct tc_action *act;
3327		size_t attr_size = 0;
3328
3329		if (exts->police && tb[exts->police]) {
3330			struct tc_action_ops *a_o;
3331
3332			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3333			a_o = tc_action_load_ops(tb[exts->police], flags,
3334						 extack);
3335			if (IS_ERR(a_o))
3336				return PTR_ERR(a_o);
3337			act = tcf_action_init_1(net, tp, tb[exts->police],
3338						rate_tlv, a_o, init_res, flags,
3339						extack);
3340			module_put(a_o->owner);
3341			if (IS_ERR(act))
3342				return PTR_ERR(act);
3343
3344			act->type = exts->type = TCA_OLD_COMPAT;
3345			exts->actions[0] = act;
3346			exts->nr_actions = 1;
3347			tcf_idr_insert_many(exts->actions, init_res);
3348		} else if (exts->action && tb[exts->action]) {
3349			int err;
3350
3351			flags |= TCA_ACT_FLAGS_BIND;
3352			err = tcf_action_init(net, tp, tb[exts->action],
3353					      rate_tlv, exts->actions, init_res,
3354					      &attr_size, flags, fl_flags,
3355					      extack);
3356			if (err < 0)
3357				return err;
3358			exts->nr_actions = err;
3359		}
3360	}
3361#else
3362	if ((exts->action && tb[exts->action]) ||
3363	    (exts->police && tb[exts->police])) {
3364		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3365		return -EOPNOTSUPP;
3366	}
3367#endif
3368
3369	return 0;
3370}
3371EXPORT_SYMBOL(tcf_exts_validate_ex);
3372
3373int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3374		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3375		      u32 flags, struct netlink_ext_ack *extack)
3376{
3377	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3378				    flags, 0, extack);
3379}
3380EXPORT_SYMBOL(tcf_exts_validate);
3381
3382void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3383{
3384#ifdef CONFIG_NET_CLS_ACT
3385	struct tcf_exts old = *dst;
3386
3387	*dst = *src;
3388	tcf_exts_destroy(&old);
3389#endif
3390}
3391EXPORT_SYMBOL(tcf_exts_change);
3392
3393#ifdef CONFIG_NET_CLS_ACT
3394static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3395{
3396	if (exts->nr_actions == 0)
3397		return NULL;
3398	else
3399		return exts->actions[0];
3400}
3401#endif
3402
3403int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3404{
3405#ifdef CONFIG_NET_CLS_ACT
3406	struct nlattr *nest;
3407
3408	if (exts->action && tcf_exts_has_actions(exts)) {
3409		/*
3410		 * again for backward compatible mode - we want
3411		 * to work with both old and new modes of entering
3412		 * tc data even if iproute2  was newer - jhs
3413		 */
3414		if (exts->type != TCA_OLD_COMPAT) {
3415			nest = nla_nest_start_noflag(skb, exts->action);
3416			if (nest == NULL)
3417				goto nla_put_failure;
3418
3419			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3420			    < 0)
3421				goto nla_put_failure;
3422			nla_nest_end(skb, nest);
3423		} else if (exts->police) {
3424			struct tc_action *act = tcf_exts_first_act(exts);
3425			nest = nla_nest_start_noflag(skb, exts->police);
3426			if (nest == NULL || !act)
3427				goto nla_put_failure;
3428			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3429				goto nla_put_failure;
3430			nla_nest_end(skb, nest);
3431		}
3432	}
3433	return 0;
3434
3435nla_put_failure:
3436	nla_nest_cancel(skb, nest);
3437	return -1;
3438#else
3439	return 0;
3440#endif
3441}
3442EXPORT_SYMBOL(tcf_exts_dump);
3443
3444int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3445{
3446#ifdef CONFIG_NET_CLS_ACT
3447	struct nlattr *nest;
3448
3449	if (!exts->action || !tcf_exts_has_actions(exts))
3450		return 0;
3451
3452	nest = nla_nest_start_noflag(skb, exts->action);
3453	if (!nest)
3454		goto nla_put_failure;
3455
3456	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3457		goto nla_put_failure;
3458	nla_nest_end(skb, nest);
3459	return 0;
3460
3461nla_put_failure:
3462	nla_nest_cancel(skb, nest);
3463	return -1;
3464#else
3465	return 0;
3466#endif
3467}
3468EXPORT_SYMBOL(tcf_exts_terse_dump);
3469
3470int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3471{
3472#ifdef CONFIG_NET_CLS_ACT
3473	struct tc_action *a = tcf_exts_first_act(exts);
3474	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3475		return -1;
3476#endif
3477	return 0;
3478}
3479EXPORT_SYMBOL(tcf_exts_dump_stats);
3480
3481static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3482{
3483	if (*flags & TCA_CLS_FLAGS_IN_HW)
3484		return;
3485	*flags |= TCA_CLS_FLAGS_IN_HW;
3486	atomic_inc(&block->offloadcnt);
3487}
3488
3489static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3490{
3491	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3492		return;
3493	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3494	atomic_dec(&block->offloadcnt);
3495}
3496
3497static void tc_cls_offload_cnt_update(struct tcf_block *block,
3498				      struct tcf_proto *tp, u32 *cnt,
3499				      u32 *flags, u32 diff, bool add)
3500{
3501	lockdep_assert_held(&block->cb_lock);
3502
3503	spin_lock(&tp->lock);
3504	if (add) {
3505		if (!*cnt)
3506			tcf_block_offload_inc(block, flags);
3507		*cnt += diff;
3508	} else {
3509		*cnt -= diff;
3510		if (!*cnt)
3511			tcf_block_offload_dec(block, flags);
3512	}
3513	spin_unlock(&tp->lock);
3514}
3515
3516static void
3517tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3518			 u32 *cnt, u32 *flags)
3519{
3520	lockdep_assert_held(&block->cb_lock);
3521
3522	spin_lock(&tp->lock);
3523	tcf_block_offload_dec(block, flags);
3524	*cnt = 0;
3525	spin_unlock(&tp->lock);
3526}
3527
3528static int
3529__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3530		   void *type_data, bool err_stop)
3531{
3532	struct flow_block_cb *block_cb;
3533	int ok_count = 0;
3534	int err;
3535
3536	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3537		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3538		if (err) {
3539			if (err_stop)
3540				return err;
3541		} else {
3542			ok_count++;
3543		}
3544	}
3545	return ok_count;
3546}
3547
3548int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3549		     void *type_data, bool err_stop, bool rtnl_held)
3550{
3551	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3552	int ok_count;
3553
3554retry:
3555	if (take_rtnl)
3556		rtnl_lock();
3557	down_read(&block->cb_lock);
3558	/* Need to obtain rtnl lock if block is bound to devs that require it.
3559	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3560	 * obtain the locks in same order here.
3561	 */
3562	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3563		up_read(&block->cb_lock);
3564		take_rtnl = true;
3565		goto retry;
3566	}
3567
3568	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3569
3570	up_read(&block->cb_lock);
3571	if (take_rtnl)
3572		rtnl_unlock();
3573	return ok_count;
3574}
3575EXPORT_SYMBOL(tc_setup_cb_call);
3576
3577/* Non-destructive filter add. If filter that wasn't already in hardware is
3578 * successfully offloaded, increment block offloads counter. On failure,
3579 * previously offloaded filter is considered to be intact and offloads counter
3580 * is not decremented.
3581 */
3582
3583int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3584		    enum tc_setup_type type, void *type_data, bool err_stop,
3585		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3586{
3587	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3588	int ok_count;
3589
3590retry:
3591	if (take_rtnl)
3592		rtnl_lock();
3593	down_read(&block->cb_lock);
3594	/* Need to obtain rtnl lock if block is bound to devs that require it.
3595	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3596	 * obtain the locks in same order here.
3597	 */
3598	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3599		up_read(&block->cb_lock);
3600		take_rtnl = true;
3601		goto retry;
3602	}
3603
3604	/* Make sure all netdevs sharing this block are offload-capable. */
3605	if (block->nooffloaddevcnt && err_stop) {
3606		ok_count = -EOPNOTSUPP;
3607		goto err_unlock;
3608	}
3609
3610	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611	if (ok_count < 0)
3612		goto err_unlock;
3613
3614	if (tp->ops->hw_add)
3615		tp->ops->hw_add(tp, type_data);
3616	if (ok_count > 0)
3617		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3618					  ok_count, true);
3619err_unlock:
3620	up_read(&block->cb_lock);
3621	if (take_rtnl)
3622		rtnl_unlock();
3623	return min(ok_count, 0);
3624}
3625EXPORT_SYMBOL(tc_setup_cb_add);
3626
3627/* Destructive filter replace. If filter that wasn't already in hardware is
3628 * successfully offloaded, increment block offload counter. On failure,
3629 * previously offloaded filter is considered to be destroyed and offload counter
3630 * is decremented.
3631 */
3632
3633int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3634			enum tc_setup_type type, void *type_data, bool err_stop,
3635			u32 *old_flags, unsigned int *old_in_hw_count,
3636			u32 *new_flags, unsigned int *new_in_hw_count,
3637			bool rtnl_held)
3638{
3639	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3640	int ok_count;
3641
3642retry:
3643	if (take_rtnl)
3644		rtnl_lock();
3645	down_read(&block->cb_lock);
3646	/* Need to obtain rtnl lock if block is bound to devs that require it.
3647	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3648	 * obtain the locks in same order here.
3649	 */
3650	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3651		up_read(&block->cb_lock);
3652		take_rtnl = true;
3653		goto retry;
3654	}
3655
3656	/* Make sure all netdevs sharing this block are offload-capable. */
3657	if (block->nooffloaddevcnt && err_stop) {
3658		ok_count = -EOPNOTSUPP;
3659		goto err_unlock;
3660	}
3661
3662	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3663	if (tp->ops->hw_del)
3664		tp->ops->hw_del(tp, type_data);
3665
3666	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3667	if (ok_count < 0)
3668		goto err_unlock;
3669
3670	if (tp->ops->hw_add)
3671		tp->ops->hw_add(tp, type_data);
3672	if (ok_count > 0)
3673		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3674					  new_flags, ok_count, true);
3675err_unlock:
3676	up_read(&block->cb_lock);
3677	if (take_rtnl)
3678		rtnl_unlock();
3679	return min(ok_count, 0);
3680}
3681EXPORT_SYMBOL(tc_setup_cb_replace);
3682
3683/* Destroy filter and decrement block offload counter, if filter was previously
3684 * offloaded.
3685 */
3686
3687int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3688			enum tc_setup_type type, void *type_data, bool err_stop,
3689			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3690{
3691	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3692	int ok_count;
3693
3694retry:
3695	if (take_rtnl)
3696		rtnl_lock();
3697	down_read(&block->cb_lock);
3698	/* Need to obtain rtnl lock if block is bound to devs that require it.
3699	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3700	 * obtain the locks in same order here.
3701	 */
3702	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3703		up_read(&block->cb_lock);
3704		take_rtnl = true;
3705		goto retry;
3706	}
3707
3708	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3709
3710	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3711	if (tp->ops->hw_del)
3712		tp->ops->hw_del(tp, type_data);
3713
3714	up_read(&block->cb_lock);
3715	if (take_rtnl)
3716		rtnl_unlock();
3717	return min(ok_count, 0);
3718}
3719EXPORT_SYMBOL(tc_setup_cb_destroy);
3720
3721int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3722			  bool add, flow_setup_cb_t *cb,
3723			  enum tc_setup_type type, void *type_data,
3724			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3725{
3726	int err = cb(type, type_data, cb_priv);
3727
3728	if (err) {
3729		if (add && tc_skip_sw(*flags))
3730			return err;
3731	} else {
3732		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3733					  add);
3734	}
3735
3736	return 0;
3737}
3738EXPORT_SYMBOL(tc_setup_cb_reoffload);
3739
3740static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3741				   const struct tc_action *act)
3742{
3743	struct tc_cookie *user_cookie;
3744	int err = 0;
3745
3746	rcu_read_lock();
3747	user_cookie = rcu_dereference(act->user_cookie);
3748	if (user_cookie) {
3749		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3750							       user_cookie->len,
3751							       GFP_ATOMIC);
3752		if (!entry->user_cookie)
3753			err = -ENOMEM;
3754	}
3755	rcu_read_unlock();
3756	return err;
3757}
3758
3759static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3760{
3761	flow_action_cookie_destroy(entry->user_cookie);
3762}
3763
3764void tc_cleanup_offload_action(struct flow_action *flow_action)
3765{
3766	struct flow_action_entry *entry;
3767	int i;
3768
3769	flow_action_for_each(i, entry, flow_action) {
3770		tcf_act_put_user_cookie(entry);
3771		if (entry->destructor)
3772			entry->destructor(entry->destructor_priv);
3773	}
3774}
3775EXPORT_SYMBOL(tc_cleanup_offload_action);
3776
3777static int tc_setup_offload_act(struct tc_action *act,
3778				struct flow_action_entry *entry,
3779				u32 *index_inc,
3780				struct netlink_ext_ack *extack)
3781{
3782#ifdef CONFIG_NET_CLS_ACT
3783	if (act->ops->offload_act_setup) {
3784		return act->ops->offload_act_setup(act, entry, index_inc, true,
3785						   extack);
3786	} else {
3787		NL_SET_ERR_MSG(extack, "Action does not support offload");
3788		return -EOPNOTSUPP;
3789	}
3790#else
3791	return 0;
3792#endif
3793}
3794
3795int tc_setup_action(struct flow_action *flow_action,
3796		    struct tc_action *actions[],
3797		    u32 miss_cookie_base,
3798		    struct netlink_ext_ack *extack)
3799{
3800	int i, j, k, index, err = 0;
3801	struct tc_action *act;
3802
3803	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3804	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3805	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3806
3807	if (!actions)
3808		return 0;
3809
3810	j = 0;
3811	tcf_act_for_each_action(i, act, actions) {
3812		struct flow_action_entry *entry;
3813
3814		entry = &flow_action->entries[j];
3815		spin_lock_bh(&act->tcfa_lock);
3816		err = tcf_act_get_user_cookie(entry, act);
3817		if (err)
3818			goto err_out_locked;
3819
3820		index = 0;
3821		err = tc_setup_offload_act(act, entry, &index, extack);
3822		if (err)
3823			goto err_out_locked;
3824
3825		for (k = 0; k < index ; k++) {
3826			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3827			entry[k].hw_index = act->tcfa_index;
3828			entry[k].cookie = (unsigned long)act;
3829			entry[k].miss_cookie =
3830				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3831		}
3832
3833		j += index;
3834
3835		spin_unlock_bh(&act->tcfa_lock);
3836	}
3837
3838err_out:
3839	if (err)
3840		tc_cleanup_offload_action(flow_action);
3841
3842	return err;
3843err_out_locked:
3844	spin_unlock_bh(&act->tcfa_lock);
3845	goto err_out;
3846}
3847
3848int tc_setup_offload_action(struct flow_action *flow_action,
3849			    const struct tcf_exts *exts,
3850			    struct netlink_ext_ack *extack)
3851{
3852#ifdef CONFIG_NET_CLS_ACT
3853	u32 miss_cookie_base;
3854
3855	if (!exts)
3856		return 0;
3857
3858	miss_cookie_base = exts->miss_cookie_node ?
3859			   exts->miss_cookie_node->miss_cookie_base : 0;
3860	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3861			       extack);
3862#else
3863	return 0;
3864#endif
3865}
3866EXPORT_SYMBOL(tc_setup_offload_action);
3867
3868unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3869{
3870	unsigned int num_acts = 0;
3871	struct tc_action *act;
3872	int i;
3873
3874	tcf_exts_for_each_action(i, act, exts) {
3875		if (is_tcf_pedit(act))
3876			num_acts += tcf_pedit_nkeys(act);
3877		else
3878			num_acts++;
3879	}
3880	return num_acts;
3881}
3882EXPORT_SYMBOL(tcf_exts_num_actions);
3883
3884#ifdef CONFIG_NET_CLS_ACT
3885static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3886					u32 *p_block_index,
3887					struct netlink_ext_ack *extack)
3888{
3889	*p_block_index = nla_get_u32(block_index_attr);
3890	if (!*p_block_index) {
3891		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3892		return -EINVAL;
3893	}
3894
3895	return 0;
3896}
3897
3898int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3899		    enum flow_block_binder_type binder_type,
3900		    struct nlattr *block_index_attr,
3901		    struct netlink_ext_ack *extack)
3902{
3903	u32 block_index;
3904	int err;
3905
3906	if (!block_index_attr)
3907		return 0;
3908
3909	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3910	if (err)
3911		return err;
3912
3913	qe->info.binder_type = binder_type;
3914	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3915	qe->info.chain_head_change_priv = &qe->filter_chain;
3916	qe->info.block_index = block_index;
3917
3918	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3919}
3920EXPORT_SYMBOL(tcf_qevent_init);
3921
3922void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3923{
3924	if (qe->info.block_index)
3925		tcf_block_put_ext(qe->block, sch, &qe->info);
3926}
3927EXPORT_SYMBOL(tcf_qevent_destroy);
3928
3929int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3930			       struct netlink_ext_ack *extack)
3931{
3932	u32 block_index;
3933	int err;
3934
3935	if (!block_index_attr)
3936		return 0;
3937
3938	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3939	if (err)
3940		return err;
3941
3942	/* Bounce newly-configured block or change in block. */
3943	if (block_index != qe->info.block_index) {
3944		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3945		return -EINVAL;
3946	}
3947
3948	return 0;
3949}
3950EXPORT_SYMBOL(tcf_qevent_validate_change);
3951
3952struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3953				  struct sk_buff **to_free, int *ret)
3954{
3955	struct tcf_result cl_res;
3956	struct tcf_proto *fl;
3957
3958	if (!qe->info.block_index)
3959		return skb;
3960
3961	fl = rcu_dereference_bh(qe->filter_chain);
3962
3963	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3964	case TC_ACT_SHOT:
3965		qdisc_qstats_drop(sch);
3966		__qdisc_drop(skb, to_free);
3967		*ret = __NET_XMIT_BYPASS;
3968		return NULL;
3969	case TC_ACT_STOLEN:
3970	case TC_ACT_QUEUED:
3971	case TC_ACT_TRAP:
3972		__qdisc_drop(skb, to_free);
3973		*ret = __NET_XMIT_STOLEN;
3974		return NULL;
3975	case TC_ACT_REDIRECT:
3976		skb_do_redirect(skb);
3977		*ret = __NET_XMIT_STOLEN;
3978		return NULL;
3979	}
3980
3981	return skb;
3982}
3983EXPORT_SYMBOL(tcf_qevent_handle);
3984
3985int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3986{
3987	if (!qe->info.block_index)
3988		return 0;
3989	return nla_put_u32(skb, attr_name, qe->info.block_index);
3990}
3991EXPORT_SYMBOL(tcf_qevent_dump);
3992#endif
3993
3994static __net_init int tcf_net_init(struct net *net)
3995{
3996	struct tcf_net *tn = net_generic(net, tcf_net_id);
3997
3998	spin_lock_init(&tn->idr_lock);
3999	idr_init(&tn->idr);
4000	return 0;
4001}
4002
4003static void __net_exit tcf_net_exit(struct net *net)
4004{
4005	struct tcf_net *tn = net_generic(net, tcf_net_id);
4006
4007	idr_destroy(&tn->idr);
4008}
4009
4010static struct pernet_operations tcf_net_ops = {
4011	.init = tcf_net_init,
4012	.exit = tcf_net_exit,
4013	.id   = &tcf_net_id,
4014	.size = sizeof(struct tcf_net),
4015};
4016
4017static int __init tc_filter_init(void)
4018{
4019	int err;
4020
4021	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4022	if (!tc_filter_wq)
4023		return -ENOMEM;
4024
4025	err = register_pernet_subsys(&tcf_net_ops);
4026	if (err)
4027		goto err_register_pernet_subsys;
4028
4029	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4030
4031	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
4032		      RTNL_FLAG_DOIT_UNLOCKED);
4033	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
4034		      RTNL_FLAG_DOIT_UNLOCKED);
4035	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4036		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4037	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4038	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4039	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4040		      tc_dump_chain, 0);
4041
4042	return 0;
4043
4044err_register_pernet_subsys:
4045	destroy_workqueue(tc_filter_wq);
4046	return err;
4047}
4048
4049subsys_initcall(tc_filter_init);
4050