1/* NAT for netfilter; shared with compatibility layer. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
15#include <linux/vmalloc.h>
16#include <net/checksum.h>
17#include <net/icmp.h>
18#include <net/ip.h>
19#include <net/tcp.h>  /* For tcp_prot in getorigdst */
20#include <linux/icmp.h>
21#include <linux/udp.h>
22#include <linux/jhash.h>
23
24#include <linux/netfilter_ipv4.h>
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_core.h>
27#include <net/netfilter/nf_nat.h>
28#include <net/netfilter/nf_nat_protocol.h>
29#include <net/netfilter/nf_nat_core.h>
30#include <net/netfilter/nf_nat_helper.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_l3proto.h>
33#include <net/netfilter/nf_conntrack_l4proto.h>
34#include <linux/netfilter_ipv4/ipt_cone.h>
35
36#ifdef HNDCTF
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <typedefs.h>
40#include <osl.h>
41#include <ctf/hndctf.h>
42
43#define NFC_CTF_ENABLED	(1 << 31)
44#endif /* HNDCTF */
45
46#define DEBUGP(format, args...)
47
48static DEFINE_RWLOCK(nf_nat_lock);
49
50static struct nf_conntrack_l3proto *l3proto = NULL;
51
52/* Calculated at init based on memory size */
53static unsigned int nf_nat_htable_size;
54
55static struct list_head *bysource;
56
57#define MAX_IP_NAT_PROTO 256
58static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
59
60static inline struct nf_nat_protocol *
61__nf_nat_proto_find(u_int8_t protonum)
62{
63	return rcu_dereference(nf_nat_protos[protonum]);
64}
65
66struct nf_nat_protocol *
67nf_nat_proto_find_get(u_int8_t protonum)
68{
69	struct nf_nat_protocol *p;
70
71	rcu_read_lock();
72	p = __nf_nat_proto_find(protonum);
73	if (!try_module_get(p->me))
74		p = &nf_nat_unknown_protocol;
75	rcu_read_unlock();
76
77	return p;
78}
79EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
80
81void
82nf_nat_proto_put(struct nf_nat_protocol *p)
83{
84	module_put(p->me);
85}
86EXPORT_SYMBOL_GPL(nf_nat_proto_put);
87
88/* We keep an extra hash for each conntrack, for fast searching. */
89static inline unsigned int
90hash_by_src(const struct nf_conntrack_tuple *tuple)
91{
92	/* Original src, to ensure we map it consistently if poss. */
93	return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
94			    tuple->dst.protonum, 0) % nf_nat_htable_size;
95}
96
97#ifdef HNDCTF
98extern void ip_conntrack_ipct_add(struct sk_buff *skb, u_int32_t hooknum,
99	struct nf_conn *ct, enum ip_conntrack_info ci,
100	struct nf_conntrack_tuple *manip);
101#endif /* HNDCTF */
102
103/* Noone using conntrack by the time this called. */
104static void nf_nat_cleanup_conntrack(struct nf_conn *conn)
105{
106	struct nf_conn_nat *nat;
107	if (!(conn->status & IPS_NAT_DONE_MASK))
108		return;
109
110	nat = nfct_nat(conn);
111	write_lock_bh(&nf_nat_lock);
112	list_del(&nat->info.bysource);
113	write_unlock_bh(&nf_nat_lock);
114
115	/* Detach from cone list */
116	ipt_cone_cleanup_conntrack(nat);
117}
118
119/* Is this tuple already taken? (not by us) */
120int
121nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
122		  const struct nf_conn *ignored_conntrack)
123{
124	/* Conntrack tracking doesn't keep track of outgoing tuples; only
125	   incoming ones.  NAT means they don't have a fixed mapping,
126	   so we invert the tuple and look for the incoming reply.
127
128	   We could keep a separate hash if this proves too slow. */
129	struct nf_conntrack_tuple reply;
130
131	nf_ct_invert_tuplepr(&reply, tuple);
132
133	return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
134}
135EXPORT_SYMBOL(nf_nat_used_tuple);
136
137/* If we source map this tuple so reply looks like reply_tuple, will
138 * that meet the constraints of range. */
139static int
140in_range(const struct nf_conntrack_tuple *tuple,
141	 const struct nf_nat_range *range)
142{
143	struct nf_nat_protocol *proto;
144	int ret = 0;
145
146	/* If we are supposed to map IPs, then we must be in the
147	   range specified, otherwise let this drag us onto a new src IP. */
148	if (range->flags & IP_NAT_RANGE_MAP_IPS) {
149		if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
150		    ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
151			return 0;
152	}
153
154	rcu_read_lock();
155	proto = __nf_nat_proto_find(tuple->dst.protonum);
156	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
157	    proto->in_range(tuple, IP_NAT_MANIP_SRC,
158			    &range->min, &range->max))
159		ret = 1;
160	rcu_read_unlock();
161
162	return ret;
163}
164
165static inline int
166same_src(const struct nf_conn *ct,
167	 const struct nf_conntrack_tuple *tuple)
168{
169	const struct nf_conntrack_tuple *t;
170
171	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
172	return (t->dst.protonum == tuple->dst.protonum &&
173		t->src.u3.ip == tuple->src.u3.ip &&
174		t->src.u.all == tuple->src.u.all);
175}
176
177/* Only called for SRC manip */
178static int
179find_appropriate_src(const struct nf_conntrack_tuple *tuple,
180		     struct nf_conntrack_tuple *result,
181		     const struct nf_nat_range *range)
182{
183	unsigned int h = hash_by_src(tuple);
184	struct nf_conn_nat *nat;
185	struct nf_conn *ct;
186
187	read_lock_bh(&nf_nat_lock);
188	list_for_each_entry(nat, &bysource[h], info.bysource) {
189		ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data));
190		if (same_src(ct, tuple)) {
191			/* Copy source part from reply tuple. */
192			nf_ct_invert_tuplepr(result,
193				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
194			result->dst = tuple->dst;
195
196			if (in_range(result, range)) {
197				read_unlock_bh(&nf_nat_lock);
198				return 1;
199			}
200		}
201	}
202	read_unlock_bh(&nf_nat_lock);
203	return 0;
204}
205
206/* For [FUTURE] fragmentation handling, we want the least-used
207   src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
208   if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
209   1-65535, we don't do pro-rata allocation based on ports; we choose
210   the ip with the lowest src-ip/dst-ip/proto usage.
211*/
212static void
213find_best_ips_proto(struct nf_conntrack_tuple *tuple,
214		    const struct nf_nat_range *range,
215		    const struct nf_conn *ct,
216		    enum nf_nat_manip_type maniptype)
217{
218	__be32 *var_ipp;
219	/* Host order */
220	u_int32_t minip, maxip, j;
221
222	/* No IP mapping?  Do nothing. */
223	if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
224		return;
225
226	if (maniptype == IP_NAT_MANIP_SRC)
227		var_ipp = &tuple->src.u3.ip;
228	else
229		var_ipp = &tuple->dst.u3.ip;
230
231	/* Fast path: only one choice. */
232	if (range->min_ip == range->max_ip) {
233		*var_ipp = range->min_ip;
234		return;
235	}
236
237	/* Hashing source and destination IPs gives a fairly even
238	 * spread in practice (if there are a small number of IPs
239	 * involved, there usually aren't that many connections
240	 * anyway).  The consistency means that servers see the same
241	 * client coming from the same IP (some Internet Banking sites
242	 * like this), even across reboots. */
243	minip = ntohl(range->min_ip);
244	maxip = ntohl(range->max_ip);
245	j = jhash_2words((__force u32)tuple->src.u3.ip,
246			 (__force u32)tuple->dst.u3.ip, 0);
247	*var_ipp = htonl(minip + j % (maxip - minip + 1));
248}
249
250/* Manipulate the tuple into the range given.  For NF_IP_POST_ROUTING,
251 * we change the source to map into the range.  For NF_IP_PRE_ROUTING
252 * and NF_IP_LOCAL_OUT, we change the destination to map into the
253 * range.  It might not be possible to get a unique tuple, but we try.
254 * At worst (or if we race), we will end up with a final duplicate in
255 * __ip_conntrack_confirm and drop the packet. */
256static void
257get_unique_tuple(struct nf_conntrack_tuple *tuple,
258		 const struct nf_conntrack_tuple *orig_tuple,
259		 const struct nf_nat_range *range,
260		 struct nf_conn *ct,
261		 enum nf_nat_manip_type maniptype)
262{
263	struct nf_nat_protocol *proto;
264
265	/* 1) If this srcip/proto/src-proto-part is currently mapped,
266	   and that same mapping gives a unique tuple within the given
267	   range, use that.
268
269	   This is only required for source (ie. NAT/masq) mappings.
270	   So far, we don't do local source mappings, so multiple
271	   manips not an issue.  */
272	if (maniptype == IP_NAT_MANIP_SRC) {
273		if (find_appropriate_src(orig_tuple, tuple, range)) {
274			DEBUGP("get_unique_tuple: Found current src map\n");
275			if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
276				if (!nf_nat_used_tuple(tuple, ct))
277					return;
278		}
279	}
280
281	/* 2) Select the least-used IP/proto combination in the given
282	   range. */
283	*tuple = *orig_tuple;
284	find_best_ips_proto(tuple, range, ct, maniptype);
285
286	/* 3) The per-protocol part of the manip is made to map into
287	   the range to make a unique tuple. */
288
289	rcu_read_lock();
290	proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
291
292	/* Change protocol info to have some randomization */
293	if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
294		proto->unique_tuple(tuple, range, maniptype, ct);
295		goto out;
296	}
297
298	/* Only bother mapping if it's not already in range and unique */
299	if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
300	     proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
301	    !nf_nat_used_tuple(tuple, ct))
302		goto out;
303
304	/* Last change: get protocol to try to obtain unique tuple. */
305	proto->unique_tuple(tuple, range, maniptype, ct);
306out:
307	rcu_read_unlock();
308}
309
310unsigned int
311nf_nat_setup_info(struct nf_conn *ct,
312		  const struct nf_nat_range *range,
313		  unsigned int hooknum)
314{
315	struct nf_conntrack_tuple curr_tuple, new_tuple;
316	struct nf_conn_nat *nat = nfct_nat(ct);
317	struct nf_nat_info *info = &nat->info;
318	int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
319	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
320
321	NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
322		     hooknum == NF_IP_POST_ROUTING ||
323		     hooknum == NF_IP_LOCAL_IN ||
324		     hooknum == NF_IP_LOCAL_OUT);
325	BUG_ON(nf_nat_initialized(ct, maniptype));
326
327	/* What we've got will look like inverse of reply. Normally
328	   this is what is in the conntrack, except for prior
329	   manipulations (future optimization: if num_manips == 0,
330	   orig_tp =
331	   conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
332	nf_ct_invert_tuplepr(&curr_tuple,
333			     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
334
335	get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
336
337	if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
338		struct nf_conntrack_tuple reply;
339
340		/* Alter conntrack table so will recognize replies. */
341		nf_ct_invert_tuplepr(&reply, &new_tuple);
342		nf_conntrack_alter_reply(ct, &reply);
343
344		/* Non-atomic: we own this at the moment. */
345		if (maniptype == IP_NAT_MANIP_SRC)
346			ct->status |= IPS_SRC_NAT;
347		else
348			ct->status |= IPS_DST_NAT;
349	}
350
351	/* Place in source hash if this is the first time. */
352	if (have_to_hash) {
353		unsigned int srchash;
354
355		srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
356		write_lock_bh(&nf_nat_lock);
357		list_add(&info->bysource, &bysource[srchash]);
358		write_unlock_bh(&nf_nat_lock);
359	}
360
361	/* It's done. */
362	if (maniptype == IP_NAT_MANIP_DST)
363		set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
364	else
365		set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
366
367	return NF_ACCEPT;
368}
369EXPORT_SYMBOL(nf_nat_setup_info);
370
371/* Returns true if succeeded. */
372static int
373manip_pkt(u_int16_t proto,
374	  struct sk_buff **pskb,
375	  unsigned int iphdroff,
376	  const struct nf_conntrack_tuple *target,
377	  enum nf_nat_manip_type maniptype)
378{
379	struct iphdr *iph;
380	struct nf_nat_protocol *p;
381
382	if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
383		return 0;
384
385	iph = (void *)(*pskb)->data + iphdroff;
386
387	/* Manipulate protcol part. */
388
389	/* rcu_read_lock()ed by nf_hook_slow */
390	p = __nf_nat_proto_find(proto);
391	if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
392		return 0;
393
394	iph = (void *)(*pskb)->data + iphdroff;
395
396	if (maniptype == IP_NAT_MANIP_SRC) {
397		nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
398		iph->saddr = target->src.u3.ip;
399	} else {
400		nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
401		iph->daddr = target->dst.u3.ip;
402	}
403	return 1;
404}
405
406/* Do packet manipulations according to nf_nat_setup_info. */
407unsigned int nf_nat_packet(struct nf_conn *ct,
408			   enum ip_conntrack_info ctinfo,
409			   unsigned int hooknum,
410			   struct sk_buff **pskb)
411{
412	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
413	unsigned long statusbit;
414	enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
415
416	if (mtype == IP_NAT_MANIP_SRC)
417		statusbit = IPS_SRC_NAT;
418	else
419		statusbit = IPS_DST_NAT;
420
421	/* Invert if this is reply dir. */
422	if (dir == IP_CT_DIR_REPLY)
423		statusbit ^= IPS_NAT_MASK;
424
425	/* Non-atomic: these bits don't change. */
426	if (ct->status & statusbit) {
427		struct nf_conntrack_tuple target;
428
429		/* We are aiming to look like inverse of other direction. */
430		nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
431#ifdef HNDCTF
432		ip_conntrack_ipct_add(*pskb, hooknum, ct, ctinfo, &target);
433#endif /* HNDCTF */
434		if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
435			return NF_DROP;
436	} else {
437#ifdef HNDCTF
438#endif /* HNDCTF */
439	}
440
441	return NF_ACCEPT;
442}
443EXPORT_SYMBOL_GPL(nf_nat_packet);
444
445/* Dir is direction ICMP is coming from (opposite to packet it contains) */
446int nf_nat_icmp_reply_translation(struct nf_conn *ct,
447				  enum ip_conntrack_info ctinfo,
448				  unsigned int hooknum,
449				  struct sk_buff **pskb)
450{
451	struct {
452		struct icmphdr icmp;
453		struct iphdr ip;
454	} *inside;
455	struct nf_conntrack_l4proto *l4proto;
456	struct nf_conntrack_tuple inner, target;
457	int hdrlen = ip_hdrlen(*pskb);
458	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
459	unsigned long statusbit;
460	enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
461
462	if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
463		return 0;
464
465	inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
466
467	/* We're actually going to mangle it beyond trivial checksum
468	   adjustment, so make sure the current checksum is correct. */
469	if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0))
470		return 0;
471
472	/* Must be RELATED */
473	NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED ||
474		     (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
475
476	/* Redirects on non-null nats must be dropped, else they'll
477	   start talking to each other without our translation, and be
478	   confused... --RR */
479	if (inside->icmp.type == ICMP_REDIRECT) {
480		/* If NAT isn't finished, assume it and drop. */
481		if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
482			return 0;
483
484		if (ct->status & IPS_NAT_MASK)
485			return 0;
486	}
487
488	DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
489	       *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
490
491	/* rcu_read_lock()ed by nf_hook_slow */
492	l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
493
494	if (!nf_ct_get_tuple(*pskb,
495			     ip_hdrlen(*pskb) + sizeof(struct icmphdr),
496			     (ip_hdrlen(*pskb) +
497			      sizeof(struct icmphdr) + inside->ip.ihl * 4),
498			     (u_int16_t)AF_INET,
499			     inside->ip.protocol,
500			     &inner, l3proto, l4proto))
501		return 0;
502
503	/* Change inner back to look like incoming packet.  We do the
504	   opposite manip on this hook to normal, because it might not
505	   pass all hooks (locally-generated ICMP).  Consider incoming
506	   packet: PREROUTING (DST manip), routing produces ICMP, goes
507	   through POSTROUTING (which must correct the DST manip). */
508	if (!manip_pkt(inside->ip.protocol, pskb,
509		       ip_hdrlen(*pskb) + sizeof(inside->icmp),
510		       &ct->tuplehash[!dir].tuple,
511		       !manip))
512		return 0;
513
514	if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
515		/* Reloading "inside" here since manip_pkt inner. */
516		inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
517		inside->icmp.checksum = 0;
518		inside->icmp.checksum =
519			csum_fold(skb_checksum(*pskb, hdrlen,
520					       (*pskb)->len - hdrlen, 0));
521	}
522
523	/* Change outer to look the reply to an incoming packet
524	 * (proto 0 means don't invert per-proto part). */
525	if (manip == IP_NAT_MANIP_SRC)
526		statusbit = IPS_SRC_NAT;
527	else
528		statusbit = IPS_DST_NAT;
529
530	/* Invert if this is reply dir. */
531	if (dir == IP_CT_DIR_REPLY)
532		statusbit ^= IPS_NAT_MASK;
533
534	if (ct->status & statusbit) {
535		nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
536		if (!manip_pkt(0, pskb, 0, &target, manip))
537			return 0;
538	}
539
540	return 1;
541}
542EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
543
544/* Protocol registration. */
545int nf_nat_protocol_register(struct nf_nat_protocol *proto)
546{
547	int ret = 0;
548
549	write_lock_bh(&nf_nat_lock);
550	if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
551		ret = -EBUSY;
552		goto out;
553	}
554	rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
555 out:
556	write_unlock_bh(&nf_nat_lock);
557	return ret;
558}
559EXPORT_SYMBOL(nf_nat_protocol_register);
560
561/* Noone stores the protocol anywhere; simply delete it. */
562void nf_nat_protocol_unregister(struct nf_nat_protocol *proto)
563{
564	write_lock_bh(&nf_nat_lock);
565	rcu_assign_pointer(nf_nat_protos[proto->protonum],
566			   &nf_nat_unknown_protocol);
567	write_unlock_bh(&nf_nat_lock);
568	synchronize_rcu();
569}
570EXPORT_SYMBOL(nf_nat_protocol_unregister);
571
572#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
573int
574nf_nat_port_range_to_nfattr(struct sk_buff *skb,
575			    const struct nf_nat_range *range)
576{
577	NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
578		&range->min.tcp.port);
579	NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
580		&range->max.tcp.port);
581
582	return 0;
583
584nfattr_failure:
585	return -1;
586}
587EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range);
588
589int
590nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range)
591{
592	int ret = 0;
593
594	/* we have to return whether we actually parsed something or not */
595
596	if (tb[CTA_PROTONAT_PORT_MIN-1]) {
597		ret = 1;
598		range->min.tcp.port =
599			*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
600	}
601
602	if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
603		if (ret)
604			range->max.tcp.port = range->min.tcp.port;
605	} else {
606		ret = 1;
607		range->max.tcp.port =
608			*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
609	}
610
611	return ret;
612}
613EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr);
614#endif
615
616static int __init nf_nat_init(void)
617{
618	size_t i;
619
620	/* Leave them the same for the moment. */
621	nf_nat_htable_size = nf_conntrack_htable_size;
622
623	/* One vmalloc for both hash tables */
624	bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size);
625	if (!bysource)
626		return -ENOMEM;
627
628	/* Sew in builtin protocols. */
629	write_lock_bh(&nf_nat_lock);
630	for (i = 0; i < MAX_IP_NAT_PROTO; i++)
631		rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
632	rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
633	rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
634	rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
635	write_unlock_bh(&nf_nat_lock);
636
637	for (i = 0; i < nf_nat_htable_size; i++) {
638		INIT_LIST_HEAD(&bysource[i]);
639	}
640
641	NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL);
642	rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack);
643
644	/* Initialize fake conntrack so that NAT will skip it */
645	nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
646
647	l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
648	return 0;
649}
650
651/* Clear NAT section of all conntracks, in case we're loaded again. */
652static int clean_nat(struct nf_conn *i, void *data)
653{
654	struct nf_conn_nat *nat = nfct_nat(i);
655
656	if (!nat)
657		return 0;
658	memset(nat, 0, sizeof(nat));
659	i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
660	return 0;
661}
662
663static void __exit nf_nat_cleanup(void)
664{
665	nf_ct_iterate_cleanup(&clean_nat, NULL);
666	rcu_assign_pointer(nf_conntrack_destroyed, NULL);
667	synchronize_rcu();
668	vfree(bysource);
669	nf_ct_l3proto_put(l3proto);
670}
671
672MODULE_LICENSE("GPL");
673
674module_init(nf_nat_init);
675module_exit(nf_nat_cleanup);
676