1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C)2003,2004 USAGI/WIDE Project
4 *
5 * Authors	Mitsuru KANDA  <mk@linux-ipv6.org>
6 *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
7 *
8 * Based on net/ipv4/xfrm4_tunnel.c
9 */
10#include <linux/module.h>
11#include <linux/xfrm.h>
12#include <linux/slab.h>
13#include <linux/rculist.h>
14#include <net/ip.h>
15#include <net/xfrm.h>
16#include <net/ipv6.h>
17#include <linux/ipv6.h>
18#include <linux/icmpv6.h>
19#include <linux/mutex.h>
20#include <net/netns/generic.h>
21
22#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
23#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
24
25#define XFRM6_TUNNEL_SPI_MIN	1
26#define XFRM6_TUNNEL_SPI_MAX	0xffffffff
27
28struct xfrm6_tunnel_net {
29	struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
30	struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
31	u32 spi;
32};
33
34static unsigned int xfrm6_tunnel_net_id __read_mostly;
35static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
36{
37	return net_generic(net, xfrm6_tunnel_net_id);
38}
39
40/*
41 * xfrm_tunnel_spi things are for allocating unique id ("spi")
42 * per xfrm_address_t.
43 */
44struct xfrm6_tunnel_spi {
45	struct hlist_node	list_byaddr;
46	struct hlist_node	list_byspi;
47	xfrm_address_t		addr;
48	u32			spi;
49	refcount_t		refcnt;
50	struct rcu_head		rcu_head;
51};
52
53static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
54
55static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
56
57static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
58{
59	unsigned int h;
60
61	h = ipv6_addr_hash((const struct in6_addr *)addr);
62	h ^= h >> 16;
63	h ^= h >> 8;
64	h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
65
66	return h;
67}
68
69static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
70{
71	return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
72}
73
74static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
75{
76	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
77	struct xfrm6_tunnel_spi *x6spi;
78
79	hlist_for_each_entry_rcu(x6spi,
80			     &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
81			     list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
82		if (xfrm6_addr_equal(&x6spi->addr, saddr))
83			return x6spi;
84	}
85
86	return NULL;
87}
88
89__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
90{
91	struct xfrm6_tunnel_spi *x6spi;
92	u32 spi;
93
94	rcu_read_lock_bh();
95	x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
96	spi = x6spi ? x6spi->spi : 0;
97	rcu_read_unlock_bh();
98	return htonl(spi);
99}
100EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
101
102static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
103{
104	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
105	struct xfrm6_tunnel_spi *x6spi;
106	int index = xfrm6_tunnel_spi_hash_byspi(spi);
107
108	hlist_for_each_entry(x6spi,
109			     &xfrm6_tn->spi_byspi[index],
110			     list_byspi) {
111		if (x6spi->spi == spi)
112			return -1;
113	}
114	return index;
115}
116
117static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
118{
119	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
120	u32 spi;
121	struct xfrm6_tunnel_spi *x6spi;
122	int index;
123
124	if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
125	    xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
126		xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
127	else
128		xfrm6_tn->spi++;
129
130	for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
131		index = __xfrm6_tunnel_spi_check(net, spi);
132		if (index >= 0)
133			goto alloc_spi;
134
135		if (spi == XFRM6_TUNNEL_SPI_MAX)
136			break;
137	}
138	for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
139		index = __xfrm6_tunnel_spi_check(net, spi);
140		if (index >= 0)
141			goto alloc_spi;
142	}
143	spi = 0;
144	goto out;
145alloc_spi:
146	xfrm6_tn->spi = spi;
147	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
148	if (!x6spi)
149		goto out;
150
151	memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
152	x6spi->spi = spi;
153	refcount_set(&x6spi->refcnt, 1);
154
155	hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
156
157	index = xfrm6_tunnel_spi_hash_byaddr(saddr);
158	hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
159out:
160	return spi;
161}
162
163__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
164{
165	struct xfrm6_tunnel_spi *x6spi;
166	u32 spi;
167
168	spin_lock_bh(&xfrm6_tunnel_spi_lock);
169	x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
170	if (x6spi) {
171		refcount_inc(&x6spi->refcnt);
172		spi = x6spi->spi;
173	} else
174		spi = __xfrm6_tunnel_alloc_spi(net, saddr);
175	spin_unlock_bh(&xfrm6_tunnel_spi_lock);
176
177	return htonl(spi);
178}
179EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
180
181static void x6spi_destroy_rcu(struct rcu_head *head)
182{
183	kmem_cache_free(xfrm6_tunnel_spi_kmem,
184			container_of(head, struct xfrm6_tunnel_spi, rcu_head));
185}
186
187static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
188{
189	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
190	struct xfrm6_tunnel_spi *x6spi;
191	struct hlist_node *n;
192
193	spin_lock_bh(&xfrm6_tunnel_spi_lock);
194
195	hlist_for_each_entry_safe(x6spi, n,
196				  &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
197				  list_byaddr)
198	{
199		if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
200			if (refcount_dec_and_test(&x6spi->refcnt)) {
201				hlist_del_rcu(&x6spi->list_byaddr);
202				hlist_del_rcu(&x6spi->list_byspi);
203				call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
204				break;
205			}
206		}
207	}
208	spin_unlock_bh(&xfrm6_tunnel_spi_lock);
209}
210
211static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
212{
213	skb_push(skb, -skb_network_offset(skb));
214	return 0;
215}
216
217static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
218{
219	return skb_network_header(skb)[IP6CB(skb)->nhoff];
220}
221
222static int xfrm6_tunnel_rcv(struct sk_buff *skb)
223{
224	struct net *net = dev_net(skb->dev);
225	const struct ipv6hdr *iph = ipv6_hdr(skb);
226	__be32 spi;
227
228	spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
229	return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
230}
231
232static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
233			    u8 type, u8 code, int offset, __be32 info)
234{
235	/* xfrm6_tunnel native err handling */
236	switch (type) {
237	case ICMPV6_DEST_UNREACH:
238		switch (code) {
239		case ICMPV6_NOROUTE:
240		case ICMPV6_ADM_PROHIBITED:
241		case ICMPV6_NOT_NEIGHBOUR:
242		case ICMPV6_ADDR_UNREACH:
243		case ICMPV6_PORT_UNREACH:
244		default:
245			break;
246		}
247		break;
248	case ICMPV6_PKT_TOOBIG:
249		break;
250	case ICMPV6_TIME_EXCEED:
251		switch (code) {
252		case ICMPV6_EXC_HOPLIMIT:
253			break;
254		case ICMPV6_EXC_FRAGTIME:
255		default:
256			break;
257		}
258		break;
259	case ICMPV6_PARAMPROB:
260		switch (code) {
261		case ICMPV6_HDR_FIELD: break;
262		case ICMPV6_UNK_NEXTHDR: break;
263		case ICMPV6_UNK_OPTION: break;
264		}
265		break;
266	default:
267		break;
268	}
269
270	return 0;
271}
272
273static int xfrm6_tunnel_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
274{
275	if (x->props.mode != XFRM_MODE_TUNNEL) {
276		NL_SET_ERR_MSG(extack, "IPv6 tunnel can only be used with tunnel mode");
277		return -EINVAL;
278	}
279
280	if (x->encap) {
281		NL_SET_ERR_MSG(extack, "IPv6 tunnel is not compatible with encapsulation");
282		return -EINVAL;
283	}
284
285	x->props.header_len = sizeof(struct ipv6hdr);
286
287	return 0;
288}
289
290static void xfrm6_tunnel_destroy(struct xfrm_state *x)
291{
292	struct net *net = xs_net(x);
293
294	xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
295}
296
297static const struct xfrm_type xfrm6_tunnel_type = {
298	.owner          = THIS_MODULE,
299	.proto		= IPPROTO_IPV6,
300	.init_state	= xfrm6_tunnel_init_state,
301	.destructor	= xfrm6_tunnel_destroy,
302	.input		= xfrm6_tunnel_input,
303	.output		= xfrm6_tunnel_output,
304};
305
306static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
307	.handler	= xfrm6_tunnel_rcv,
308	.err_handler	= xfrm6_tunnel_err,
309	.priority	= 3,
310};
311
312static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
313	.handler	= xfrm6_tunnel_rcv,
314	.err_handler	= xfrm6_tunnel_err,
315	.priority	= 3,
316};
317
318static int __net_init xfrm6_tunnel_net_init(struct net *net)
319{
320	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
321	unsigned int i;
322
323	for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
324		INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
325	for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
326		INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
327	xfrm6_tn->spi = 0;
328
329	return 0;
330}
331
332static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
333{
334	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
335	unsigned int i;
336
337	xfrm_flush_gc();
338	xfrm_state_flush(net, 0, false, true);
339
340	for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
341		WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
342
343	for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
344		WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i]));
345}
346
347static struct pernet_operations xfrm6_tunnel_net_ops = {
348	.init	= xfrm6_tunnel_net_init,
349	.exit	= xfrm6_tunnel_net_exit,
350	.id	= &xfrm6_tunnel_net_id,
351	.size	= sizeof(struct xfrm6_tunnel_net),
352};
353
354static int __init xfrm6_tunnel_init(void)
355{
356	int rv;
357
358	xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN);
359	if (!xfrm6_tunnel_spi_kmem)
360		return -ENOMEM;
361	rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
362	if (rv < 0)
363		goto out_pernet;
364	rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
365	if (rv < 0)
366		goto out_type;
367	rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
368	if (rv < 0)
369		goto out_xfrm6;
370	rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
371	if (rv < 0)
372		goto out_xfrm46;
373	return 0;
374
375out_xfrm46:
376	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
377out_xfrm6:
378	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
379out_type:
380	unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
381out_pernet:
382	kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
383	return rv;
384}
385
386static void __exit xfrm6_tunnel_fini(void)
387{
388	xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
389	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
390	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
391	unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
392	/* Someone maybe has gotten the xfrm6_tunnel_spi.
393	 * So need to wait it.
394	 */
395	rcu_barrier();
396	kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
397}
398
399module_init(xfrm6_tunnel_init);
400module_exit(xfrm6_tunnel_fini);
401MODULE_DESCRIPTION("IPv6 XFRM tunnel driver");
402MODULE_LICENSE("GPL");
403MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
404