1/* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * This code is GPL.
8 */
9#include <linux/kernel.h>
10#include <linux/netfilter.h>
11#include <net/protocol.h>
12#include <linux/init.h>
13#include <linux/skbuff.h>
14#include <linux/wait.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/if.h>
18#include <linux/netdevice.h>
19#include <linux/netfilter_ipv6.h>
20#include <linux/inetdevice.h>
21#include <linux/proc_fs.h>
22#include <linux/mutex.h>
23#include <linux/mm.h>
24#include <linux/rcupdate.h>
25#include <net/net_namespace.h>
26#include <net/netfilter/nf_queue.h>
27#include <net/sock.h>
28
29#include "nf_internals.h"
30
31const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33
34DEFINE_PER_CPU(bool, nf_skb_duplicated);
35EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36
37#ifdef CONFIG_JUMP_LABEL
38struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39EXPORT_SYMBOL(nf_hooks_needed);
40#endif
41
42static DEFINE_MUTEX(nf_hook_mutex);
43
44/* max hooks per family/hooknum */
45#define MAX_HOOK_COUNT		1024
46
47#define nf_entry_dereference(e) \
48	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49
50static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
51{
52	struct nf_hook_entries *e;
53	size_t alloc = sizeof(*e) +
54		       sizeof(struct nf_hook_entry) * num +
55		       sizeof(struct nf_hook_ops *) * num +
56		       sizeof(struct nf_hook_entries_rcu_head);
57
58	if (num == 0)
59		return NULL;
60
61	e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
62	if (e)
63		e->num_hook_entries = num;
64	return e;
65}
66
67static void __nf_hook_entries_free(struct rcu_head *h)
68{
69	struct nf_hook_entries_rcu_head *head;
70
71	head = container_of(h, struct nf_hook_entries_rcu_head, head);
72	kvfree(head->allocation);
73}
74
75static void nf_hook_entries_free(struct nf_hook_entries *e)
76{
77	struct nf_hook_entries_rcu_head *head;
78	struct nf_hook_ops **ops;
79	unsigned int num;
80
81	if (!e)
82		return;
83
84	num = e->num_hook_entries;
85	ops = nf_hook_entries_get_hook_ops(e);
86	head = (void *)&ops[num];
87	head->allocation = e;
88	call_rcu(&head->head, __nf_hook_entries_free);
89}
90
91static unsigned int accept_all(void *priv,
92			       struct sk_buff *skb,
93			       const struct nf_hook_state *state)
94{
95	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
96}
97
98static const struct nf_hook_ops dummy_ops = {
99	.hook = accept_all,
100	.priority = INT_MIN,
101};
102
103static struct nf_hook_entries *
104nf_hook_entries_grow(const struct nf_hook_entries *old,
105		     const struct nf_hook_ops *reg)
106{
107	unsigned int i, alloc_entries, nhooks, old_entries;
108	struct nf_hook_ops **orig_ops = NULL;
109	struct nf_hook_ops **new_ops;
110	struct nf_hook_entries *new;
111	bool inserted = false;
112
113	alloc_entries = 1;
114	old_entries = old ? old->num_hook_entries : 0;
115
116	if (old) {
117		orig_ops = nf_hook_entries_get_hook_ops(old);
118
119		for (i = 0; i < old_entries; i++) {
120			if (orig_ops[i] != &dummy_ops)
121				alloc_entries++;
122
123			/* Restrict BPF hook type to force a unique priority, not
124			 * shared at attach time.
125			 *
126			 * This is mainly to avoid ordering issues between two
127			 * different bpf programs, this doesn't prevent a normal
128			 * hook at same priority as a bpf one (we don't want to
129			 * prevent defrag, conntrack, iptables etc from attaching).
130			 */
131			if (reg->priority == orig_ops[i]->priority &&
132			    reg->hook_ops_type == NF_HOOK_OP_BPF)
133				return ERR_PTR(-EBUSY);
134		}
135	}
136
137	if (alloc_entries > MAX_HOOK_COUNT)
138		return ERR_PTR(-E2BIG);
139
140	new = allocate_hook_entries_size(alloc_entries);
141	if (!new)
142		return ERR_PTR(-ENOMEM);
143
144	new_ops = nf_hook_entries_get_hook_ops(new);
145
146	i = 0;
147	nhooks = 0;
148	while (i < old_entries) {
149		if (orig_ops[i] == &dummy_ops) {
150			++i;
151			continue;
152		}
153
154		if (inserted || reg->priority > orig_ops[i]->priority) {
155			new_ops[nhooks] = (void *)orig_ops[i];
156			new->hooks[nhooks] = old->hooks[i];
157			i++;
158		} else {
159			new_ops[nhooks] = (void *)reg;
160			new->hooks[nhooks].hook = reg->hook;
161			new->hooks[nhooks].priv = reg->priv;
162			inserted = true;
163		}
164		nhooks++;
165	}
166
167	if (!inserted) {
168		new_ops[nhooks] = (void *)reg;
169		new->hooks[nhooks].hook = reg->hook;
170		new->hooks[nhooks].priv = reg->priv;
171	}
172
173	return new;
174}
175
176static void hooks_validate(const struct nf_hook_entries *hooks)
177{
178#ifdef CONFIG_DEBUG_MISC
179	struct nf_hook_ops **orig_ops;
180	int prio = INT_MIN;
181	size_t i = 0;
182
183	orig_ops = nf_hook_entries_get_hook_ops(hooks);
184
185	for (i = 0; i < hooks->num_hook_entries; i++) {
186		if (orig_ops[i] == &dummy_ops)
187			continue;
188
189		WARN_ON(orig_ops[i]->priority < prio);
190
191		if (orig_ops[i]->priority > prio)
192			prio = orig_ops[i]->priority;
193	}
194#endif
195}
196
197int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
198				const struct nf_hook_ops *reg)
199{
200	struct nf_hook_entries *new_hooks;
201	struct nf_hook_entries *p;
202
203	p = rcu_dereference_raw(*pp);
204	new_hooks = nf_hook_entries_grow(p, reg);
205	if (IS_ERR(new_hooks))
206		return PTR_ERR(new_hooks);
207
208	hooks_validate(new_hooks);
209
210	rcu_assign_pointer(*pp, new_hooks);
211
212	BUG_ON(p == new_hooks);
213	nf_hook_entries_free(p);
214	return 0;
215}
216EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
217
218/*
219 * __nf_hook_entries_try_shrink - try to shrink hook array
220 *
221 * @old -- current hook blob at @pp
222 * @pp -- location of hook blob
223 *
224 * Hook unregistration must always succeed, so to-be-removed hooks
225 * are replaced by a dummy one that will just move to next hook.
226 *
227 * This counts the current dummy hooks, attempts to allocate new blob,
228 * copies the live hooks, then replaces and discards old one.
229 *
230 * return values:
231 *
232 * Returns address to free, or NULL.
233 */
234static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
235					  struct nf_hook_entries __rcu **pp)
236{
237	unsigned int i, j, skip = 0, hook_entries;
238	struct nf_hook_entries *new = NULL;
239	struct nf_hook_ops **orig_ops;
240	struct nf_hook_ops **new_ops;
241
242	if (WARN_ON_ONCE(!old))
243		return NULL;
244
245	orig_ops = nf_hook_entries_get_hook_ops(old);
246	for (i = 0; i < old->num_hook_entries; i++) {
247		if (orig_ops[i] == &dummy_ops)
248			skip++;
249	}
250
251	/* if skip == hook_entries all hooks have been removed */
252	hook_entries = old->num_hook_entries;
253	if (skip == hook_entries)
254		goto out_assign;
255
256	if (skip == 0)
257		return NULL;
258
259	hook_entries -= skip;
260	new = allocate_hook_entries_size(hook_entries);
261	if (!new)
262		return NULL;
263
264	new_ops = nf_hook_entries_get_hook_ops(new);
265	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
266		if (orig_ops[i] == &dummy_ops)
267			continue;
268		new->hooks[j] = old->hooks[i];
269		new_ops[j] = (void *)orig_ops[i];
270		j++;
271	}
272	hooks_validate(new);
273out_assign:
274	rcu_assign_pointer(*pp, new);
275	return old;
276}
277
278static struct nf_hook_entries __rcu **
279nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
280		   struct net_device *dev)
281{
282	switch (pf) {
283	case NFPROTO_NETDEV:
284		break;
285#ifdef CONFIG_NETFILTER_FAMILY_ARP
286	case NFPROTO_ARP:
287		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
288			return NULL;
289		return net->nf.hooks_arp + hooknum;
290#endif
291#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
292	case NFPROTO_BRIDGE:
293		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
294			return NULL;
295		return net->nf.hooks_bridge + hooknum;
296#endif
297#ifdef CONFIG_NETFILTER_INGRESS
298	case NFPROTO_INET:
299		if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
300			return NULL;
301		if (!dev || dev_net(dev) != net) {
302			WARN_ON_ONCE(1);
303			return NULL;
304		}
305		return &dev->nf_hooks_ingress;
306#endif
307	case NFPROTO_IPV4:
308		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
309			return NULL;
310		return net->nf.hooks_ipv4 + hooknum;
311	case NFPROTO_IPV6:
312		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
313			return NULL;
314		return net->nf.hooks_ipv6 + hooknum;
315	default:
316		WARN_ON_ONCE(1);
317		return NULL;
318	}
319
320#ifdef CONFIG_NETFILTER_INGRESS
321	if (hooknum == NF_NETDEV_INGRESS) {
322		if (dev && dev_net(dev) == net)
323			return &dev->nf_hooks_ingress;
324	}
325#endif
326#ifdef CONFIG_NETFILTER_EGRESS
327	if (hooknum == NF_NETDEV_EGRESS) {
328		if (dev && dev_net(dev) == net)
329			return &dev->nf_hooks_egress;
330	}
331#endif
332	WARN_ON_ONCE(1);
333	return NULL;
334}
335
336static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
337			    int hooknum)
338{
339#ifndef CONFIG_NETFILTER_INGRESS
340	if (reg->hooknum == hooknum)
341		return -EOPNOTSUPP;
342#endif
343	if (reg->hooknum != hooknum ||
344	    !reg->dev || dev_net(reg->dev) != net)
345		return -EINVAL;
346
347	return 0;
348}
349
350static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
351						  int pf)
352{
353	if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
354	    (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
355		return true;
356
357	return false;
358}
359
360static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
361						 int pf)
362{
363	return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
364}
365
366static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
367{
368#ifdef CONFIG_JUMP_LABEL
369	int hooknum;
370
371	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
372		pf = NFPROTO_NETDEV;
373		hooknum = NF_NETDEV_INGRESS;
374	} else {
375		hooknum = reg->hooknum;
376	}
377	static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
378#endif
379}
380
381static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
382{
383#ifdef CONFIG_JUMP_LABEL
384	int hooknum;
385
386	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
387		pf = NFPROTO_NETDEV;
388		hooknum = NF_NETDEV_INGRESS;
389	} else {
390		hooknum = reg->hooknum;
391	}
392	static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
393#endif
394}
395
396static int __nf_register_net_hook(struct net *net, int pf,
397				  const struct nf_hook_ops *reg)
398{
399	struct nf_hook_entries *p, *new_hooks;
400	struct nf_hook_entries __rcu **pp;
401	int err;
402
403	switch (pf) {
404	case NFPROTO_NETDEV:
405#ifndef CONFIG_NETFILTER_INGRESS
406		if (reg->hooknum == NF_NETDEV_INGRESS)
407			return -EOPNOTSUPP;
408#endif
409#ifndef CONFIG_NETFILTER_EGRESS
410		if (reg->hooknum == NF_NETDEV_EGRESS)
411			return -EOPNOTSUPP;
412#endif
413		if ((reg->hooknum != NF_NETDEV_INGRESS &&
414		     reg->hooknum != NF_NETDEV_EGRESS) ||
415		    !reg->dev || dev_net(reg->dev) != net)
416			return -EINVAL;
417		break;
418	case NFPROTO_INET:
419		if (reg->hooknum != NF_INET_INGRESS)
420			break;
421
422		err = nf_ingress_check(net, reg, NF_INET_INGRESS);
423		if (err < 0)
424			return err;
425		break;
426	}
427
428	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
429	if (!pp)
430		return -EINVAL;
431
432	mutex_lock(&nf_hook_mutex);
433
434	p = nf_entry_dereference(*pp);
435	new_hooks = nf_hook_entries_grow(p, reg);
436
437	if (!IS_ERR(new_hooks)) {
438		hooks_validate(new_hooks);
439		rcu_assign_pointer(*pp, new_hooks);
440	}
441
442	mutex_unlock(&nf_hook_mutex);
443	if (IS_ERR(new_hooks))
444		return PTR_ERR(new_hooks);
445
446#ifdef CONFIG_NETFILTER_INGRESS
447	if (nf_ingress_hook(reg, pf))
448		net_inc_ingress_queue();
449#endif
450#ifdef CONFIG_NETFILTER_EGRESS
451	if (nf_egress_hook(reg, pf))
452		net_inc_egress_queue();
453#endif
454	nf_static_key_inc(reg, pf);
455
456	BUG_ON(p == new_hooks);
457	nf_hook_entries_free(p);
458	return 0;
459}
460
461/*
462 * nf_remove_net_hook - remove a hook from blob
463 *
464 * @oldp: current address of hook blob
465 * @unreg: hook to unregister
466 *
467 * This cannot fail, hook unregistration must always succeed.
468 * Therefore replace the to-be-removed hook with a dummy hook.
469 */
470static bool nf_remove_net_hook(struct nf_hook_entries *old,
471			       const struct nf_hook_ops *unreg)
472{
473	struct nf_hook_ops **orig_ops;
474	unsigned int i;
475
476	orig_ops = nf_hook_entries_get_hook_ops(old);
477	for (i = 0; i < old->num_hook_entries; i++) {
478		if (orig_ops[i] != unreg)
479			continue;
480		WRITE_ONCE(old->hooks[i].hook, accept_all);
481		WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
482		return true;
483	}
484
485	return false;
486}
487
488static void __nf_unregister_net_hook(struct net *net, int pf,
489				     const struct nf_hook_ops *reg)
490{
491	struct nf_hook_entries __rcu **pp;
492	struct nf_hook_entries *p;
493
494	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
495	if (!pp)
496		return;
497
498	mutex_lock(&nf_hook_mutex);
499
500	p = nf_entry_dereference(*pp);
501	if (WARN_ON_ONCE(!p)) {
502		mutex_unlock(&nf_hook_mutex);
503		return;
504	}
505
506	if (nf_remove_net_hook(p, reg)) {
507#ifdef CONFIG_NETFILTER_INGRESS
508		if (nf_ingress_hook(reg, pf))
509			net_dec_ingress_queue();
510#endif
511#ifdef CONFIG_NETFILTER_EGRESS
512		if (nf_egress_hook(reg, pf))
513			net_dec_egress_queue();
514#endif
515		nf_static_key_dec(reg, pf);
516	} else {
517		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
518	}
519
520	p = __nf_hook_entries_try_shrink(p, pp);
521	mutex_unlock(&nf_hook_mutex);
522	if (!p)
523		return;
524
525	nf_queue_nf_hook_drop(net);
526	nf_hook_entries_free(p);
527}
528
529void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
530{
531	if (reg->pf == NFPROTO_INET) {
532		if (reg->hooknum == NF_INET_INGRESS) {
533			__nf_unregister_net_hook(net, NFPROTO_INET, reg);
534		} else {
535			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
536			__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
537		}
538	} else {
539		__nf_unregister_net_hook(net, reg->pf, reg);
540	}
541}
542EXPORT_SYMBOL(nf_unregister_net_hook);
543
544void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
545				const struct nf_hook_ops *reg)
546{
547	struct nf_hook_entries *p;
548
549	p = rcu_dereference_raw(*pp);
550	if (nf_remove_net_hook(p, reg)) {
551		p = __nf_hook_entries_try_shrink(p, pp);
552		nf_hook_entries_free(p);
553	}
554}
555EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
556
557int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
558{
559	int err;
560
561	if (reg->pf == NFPROTO_INET) {
562		if (reg->hooknum == NF_INET_INGRESS) {
563			err = __nf_register_net_hook(net, NFPROTO_INET, reg);
564			if (err < 0)
565				return err;
566		} else {
567			err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
568			if (err < 0)
569				return err;
570
571			err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
572			if (err < 0) {
573				__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
574				return err;
575			}
576		}
577	} else {
578		err = __nf_register_net_hook(net, reg->pf, reg);
579		if (err < 0)
580			return err;
581	}
582
583	return 0;
584}
585EXPORT_SYMBOL(nf_register_net_hook);
586
587int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
588			  unsigned int n)
589{
590	unsigned int i;
591	int err = 0;
592
593	for (i = 0; i < n; i++) {
594		err = nf_register_net_hook(net, &reg[i]);
595		if (err)
596			goto err;
597	}
598	return err;
599
600err:
601	if (i > 0)
602		nf_unregister_net_hooks(net, reg, i);
603	return err;
604}
605EXPORT_SYMBOL(nf_register_net_hooks);
606
607void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
608			     unsigned int hookcount)
609{
610	unsigned int i;
611
612	for (i = 0; i < hookcount; i++)
613		nf_unregister_net_hook(net, &reg[i]);
614}
615EXPORT_SYMBOL(nf_unregister_net_hooks);
616
617/* Returns 1 if okfn() needs to be executed by the caller,
618 * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
619int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
620		 const struct nf_hook_entries *e, unsigned int s)
621{
622	unsigned int verdict;
623	int ret;
624
625	for (; s < e->num_hook_entries; s++) {
626		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
627		switch (verdict & NF_VERDICT_MASK) {
628		case NF_ACCEPT:
629			break;
630		case NF_DROP:
631			kfree_skb_reason(skb,
632					 SKB_DROP_REASON_NETFILTER_DROP);
633			ret = NF_DROP_GETERR(verdict);
634			if (ret == 0)
635				ret = -EPERM;
636			return ret;
637		case NF_QUEUE:
638			ret = nf_queue(skb, state, s, verdict);
639			if (ret == 1)
640				continue;
641			return ret;
642		case NF_STOLEN:
643			return NF_DROP_GETERR(verdict);
644		default:
645			WARN_ON_ONCE(1);
646			return 0;
647		}
648	}
649
650	return 1;
651}
652EXPORT_SYMBOL(nf_hook_slow);
653
654void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
655		       const struct nf_hook_entries *e)
656{
657	struct sk_buff *skb, *next;
658	struct list_head sublist;
659	int ret;
660
661	INIT_LIST_HEAD(&sublist);
662
663	list_for_each_entry_safe(skb, next, head, list) {
664		skb_list_del_init(skb);
665		ret = nf_hook_slow(skb, state, e, 0);
666		if (ret == 1)
667			list_add_tail(&skb->list, &sublist);
668	}
669	/* Put passed packets back on main list */
670	list_splice(&sublist, head);
671}
672EXPORT_SYMBOL(nf_hook_slow_list);
673
674/* This needs to be compiled in any case to avoid dependencies between the
675 * nfnetlink_queue code and nf_conntrack.
676 */
677const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
678EXPORT_SYMBOL_GPL(nfnl_ct_hook);
679
680const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
681EXPORT_SYMBOL_GPL(nf_ct_hook);
682
683const struct nf_defrag_hook __rcu *nf_defrag_v4_hook __read_mostly;
684EXPORT_SYMBOL_GPL(nf_defrag_v4_hook);
685
686const struct nf_defrag_hook __rcu *nf_defrag_v6_hook __read_mostly;
687EXPORT_SYMBOL_GPL(nf_defrag_v6_hook);
688
689#if IS_ENABLED(CONFIG_NF_CONNTRACK)
690u8 nf_ctnetlink_has_listener;
691EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
692
693const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
694EXPORT_SYMBOL_GPL(nf_nat_hook);
695
696/* This does not belong here, but locally generated errors need it if connection
697 * tracking in use: without this, connection may not be in hash table, and hence
698 * manufactured ICMP or RST packets will not be associated with it.
699 */
700void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
701{
702	const struct nf_ct_hook *ct_hook;
703
704	if (skb->_nfct) {
705		rcu_read_lock();
706		ct_hook = rcu_dereference(nf_ct_hook);
707		if (ct_hook)
708			ct_hook->attach(new, skb);
709		rcu_read_unlock();
710	}
711}
712EXPORT_SYMBOL(nf_ct_attach);
713
714void nf_conntrack_destroy(struct nf_conntrack *nfct)
715{
716	const struct nf_ct_hook *ct_hook;
717
718	rcu_read_lock();
719	ct_hook = rcu_dereference(nf_ct_hook);
720	if (ct_hook)
721		ct_hook->destroy(nfct);
722	rcu_read_unlock();
723
724	WARN_ON(!ct_hook);
725}
726EXPORT_SYMBOL(nf_conntrack_destroy);
727
728void nf_ct_set_closing(struct nf_conntrack *nfct)
729{
730	const struct nf_ct_hook *ct_hook;
731
732	if (!nfct)
733		return;
734
735	rcu_read_lock();
736	ct_hook = rcu_dereference(nf_ct_hook);
737	if (ct_hook)
738		ct_hook->set_closing(nfct);
739
740	rcu_read_unlock();
741}
742EXPORT_SYMBOL_GPL(nf_ct_set_closing);
743
744bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
745			 const struct sk_buff *skb)
746{
747	const struct nf_ct_hook *ct_hook;
748	bool ret = false;
749
750	rcu_read_lock();
751	ct_hook = rcu_dereference(nf_ct_hook);
752	if (ct_hook)
753		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
754	rcu_read_unlock();
755	return ret;
756}
757EXPORT_SYMBOL(nf_ct_get_tuple_skb);
758
759/* Built-in default zone used e.g. by modules. */
760const struct nf_conntrack_zone nf_ct_zone_dflt = {
761	.id	= NF_CT_DEFAULT_ZONE_ID,
762	.dir	= NF_CT_DEFAULT_ZONE_DIR,
763};
764EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
765#endif /* CONFIG_NF_CONNTRACK */
766
767static void __net_init
768__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
769{
770	int h;
771
772	for (h = 0; h < max; h++)
773		RCU_INIT_POINTER(e[h], NULL);
774}
775
776static int __net_init netfilter_net_init(struct net *net)
777{
778	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
779	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
780#ifdef CONFIG_NETFILTER_FAMILY_ARP
781	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
782#endif
783#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
784	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
785#endif
786#ifdef CONFIG_PROC_FS
787	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
788						net->proc_net);
789	if (!net->nf.proc_netfilter) {
790		if (!net_eq(net, &init_net))
791			pr_err("cannot create netfilter proc entry");
792
793		return -ENOMEM;
794	}
795#endif
796
797	return 0;
798}
799
800static void __net_exit netfilter_net_exit(struct net *net)
801{
802	remove_proc_entry("netfilter", net->proc_net);
803}
804
805static struct pernet_operations netfilter_net_ops = {
806	.init = netfilter_net_init,
807	.exit = netfilter_net_exit,
808};
809
810int __init netfilter_init(void)
811{
812	int ret;
813
814	ret = register_pernet_subsys(&netfilter_net_ops);
815	if (ret < 0)
816		goto err;
817
818	ret = netfilter_log_init();
819	if (ret < 0)
820		goto err_pernet;
821
822	return 0;
823err_pernet:
824	unregister_pernet_subsys(&netfilter_net_ops);
825err:
826	return ret;
827}
828