1// SPDX-License-Identifier: GPL-2.0
2#include <linux/jhash.h>
3#include <linux/netfilter.h>
4#include <linux/rcupdate.h>
5#include <linux/rhashtable.h>
6#include <linux/vmalloc.h>
7#include <net/genetlink.h>
8#include <net/netns/generic.h>
9#include <uapi/linux/genetlink.h>
10#include "ila.h"
11
12struct ila_xlat_params {
13	struct ila_params ip;
14	int ifindex;
15};
16
17struct ila_map {
18	struct ila_xlat_params xp;
19	struct rhash_head node;
20	struct ila_map __rcu *next;
21	struct rcu_head rcu;
22};
23
24#define MAX_LOCKS 1024
25#define	LOCKS_PER_CPU 10
26
27static int alloc_ila_locks(struct ila_net *ilan)
28{
29	return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
30				      MAX_LOCKS, LOCKS_PER_CPU,
31				      GFP_KERNEL);
32}
33
34static u32 hashrnd __read_mostly;
35static __always_inline void __ila_hash_secret_init(void)
36{
37	net_get_random_once(&hashrnd, sizeof(hashrnd));
38}
39
40static inline u32 ila_locator_hash(struct ila_locator loc)
41{
42	u32 *v = (u32 *)loc.v32;
43
44	__ila_hash_secret_init();
45	return jhash_2words(v[0], v[1], hashrnd);
46}
47
48static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
49				       struct ila_locator loc)
50{
51	return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
52}
53
54static inline int ila_cmp_wildcards(struct ila_map *ila,
55				    struct ila_addr *iaddr, int ifindex)
56{
57	return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
58}
59
60static inline int ila_cmp_params(struct ila_map *ila,
61				 struct ila_xlat_params *xp)
62{
63	return (ila->xp.ifindex != xp->ifindex);
64}
65
66static int ila_cmpfn(struct rhashtable_compare_arg *arg,
67		     const void *obj)
68{
69	const struct ila_map *ila = obj;
70
71	return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
72}
73
74static inline int ila_order(struct ila_map *ila)
75{
76	int score = 0;
77
78	if (ila->xp.ifindex)
79		score += 1 << 1;
80
81	return score;
82}
83
84static const struct rhashtable_params rht_params = {
85	.nelem_hint = 1024,
86	.head_offset = offsetof(struct ila_map, node),
87	.key_offset = offsetof(struct ila_map, xp.ip.locator_match),
88	.key_len = sizeof(u64), /* identifier */
89	.max_size = 1048576,
90	.min_size = 256,
91	.automatic_shrinking = true,
92	.obj_cmpfn = ila_cmpfn,
93};
94
95static int parse_nl_config(struct genl_info *info,
96			   struct ila_xlat_params *xp)
97{
98	memset(xp, 0, sizeof(*xp));
99
100	if (info->attrs[ILA_ATTR_LOCATOR])
101		xp->ip.locator.v64 = (__force __be64)nla_get_u64(
102			info->attrs[ILA_ATTR_LOCATOR]);
103
104	if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
105		xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
106			info->attrs[ILA_ATTR_LOCATOR_MATCH]);
107
108	if (info->attrs[ILA_ATTR_CSUM_MODE])
109		xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
110	else
111		xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
112
113	if (info->attrs[ILA_ATTR_IDENT_TYPE])
114		xp->ip.ident_type = nla_get_u8(
115				info->attrs[ILA_ATTR_IDENT_TYPE]);
116	else
117		xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
118
119	if (info->attrs[ILA_ATTR_IFINDEX])
120		xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
121
122	return 0;
123}
124
125/* Must be called with rcu readlock */
126static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
127						   int ifindex,
128						   struct ila_net *ilan)
129{
130	struct ila_map *ila;
131
132	ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
133				     rht_params);
134	while (ila) {
135		if (!ila_cmp_wildcards(ila, iaddr, ifindex))
136			return ila;
137		ila = rcu_access_pointer(ila->next);
138	}
139
140	return NULL;
141}
142
143/* Must be called with rcu readlock */
144static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
145						   struct ila_net *ilan)
146{
147	struct ila_map *ila;
148
149	ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
150				     &xp->ip.locator_match,
151				     rht_params);
152	while (ila) {
153		if (!ila_cmp_params(ila, xp))
154			return ila;
155		ila = rcu_access_pointer(ila->next);
156	}
157
158	return NULL;
159}
160
161static inline void ila_release(struct ila_map *ila)
162{
163	kfree_rcu(ila, rcu);
164}
165
166static void ila_free_node(struct ila_map *ila)
167{
168	struct ila_map *next;
169
170	/* Assume rcu_readlock held */
171	while (ila) {
172		next = rcu_access_pointer(ila->next);
173		ila_release(ila);
174		ila = next;
175	}
176}
177
178static void ila_free_cb(void *ptr, void *arg)
179{
180	ila_free_node((struct ila_map *)ptr);
181}
182
183static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
184
185static unsigned int
186ila_nf_input(void *priv,
187	     struct sk_buff *skb,
188	     const struct nf_hook_state *state)
189{
190	ila_xlat_addr(skb, false);
191	return NF_ACCEPT;
192}
193
194static const struct nf_hook_ops ila_nf_hook_ops[] = {
195	{
196		.hook = ila_nf_input,
197		.pf = NFPROTO_IPV6,
198		.hooknum = NF_INET_PRE_ROUTING,
199		.priority = -1,
200	},
201};
202
203static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
204{
205	struct ila_net *ilan = net_generic(net, ila_net_id);
206	struct ila_map *ila, *head;
207	spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
208	int err = 0, order;
209
210	if (!ilan->xlat.hooks_registered) {
211		/* We defer registering net hooks in the namespace until the
212		 * first mapping is added.
213		 */
214		err = nf_register_net_hooks(net, ila_nf_hook_ops,
215					    ARRAY_SIZE(ila_nf_hook_ops));
216		if (err)
217			return err;
218
219		ilan->xlat.hooks_registered = true;
220	}
221
222	ila = kzalloc(sizeof(*ila), GFP_KERNEL);
223	if (!ila)
224		return -ENOMEM;
225
226	ila_init_saved_csum(&xp->ip);
227
228	ila->xp = *xp;
229
230	order = ila_order(ila);
231
232	spin_lock(lock);
233
234	head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
235				      &xp->ip.locator_match,
236				      rht_params);
237	if (!head) {
238		/* New entry for the rhash_table */
239		err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
240						    &ila->node, rht_params);
241	} else {
242		struct ila_map *tila = head, *prev = NULL;
243
244		do {
245			if (!ila_cmp_params(tila, xp)) {
246				err = -EEXIST;
247				goto out;
248			}
249
250			if (order > ila_order(tila))
251				break;
252
253			prev = tila;
254			tila = rcu_dereference_protected(tila->next,
255				lockdep_is_held(lock));
256		} while (tila);
257
258		if (prev) {
259			/* Insert in sub list of head */
260			RCU_INIT_POINTER(ila->next, tila);
261			rcu_assign_pointer(prev->next, ila);
262		} else {
263			/* Make this ila new head */
264			RCU_INIT_POINTER(ila->next, head);
265			err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
266						      &head->node,
267						      &ila->node, rht_params);
268			if (err)
269				goto out;
270		}
271	}
272
273out:
274	spin_unlock(lock);
275
276	if (err)
277		kfree(ila);
278
279	return err;
280}
281
282static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
283{
284	struct ila_net *ilan = net_generic(net, ila_net_id);
285	struct ila_map *ila, *head, *prev;
286	spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
287	int err = -ENOENT;
288
289	spin_lock(lock);
290
291	head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
292				      &xp->ip.locator_match, rht_params);
293	ila = head;
294
295	prev = NULL;
296
297	while (ila) {
298		if (ila_cmp_params(ila, xp)) {
299			prev = ila;
300			ila = rcu_dereference_protected(ila->next,
301							lockdep_is_held(lock));
302			continue;
303		}
304
305		err = 0;
306
307		if (prev) {
308			/* Not head, just delete from list */
309			rcu_assign_pointer(prev->next, ila->next);
310		} else {
311			/* It is the head. If there is something in the
312			 * sublist we need to make a new head.
313			 */
314			head = rcu_dereference_protected(ila->next,
315							 lockdep_is_held(lock));
316			if (head) {
317				/* Put first entry in the sublist into the
318				 * table
319				 */
320				err = rhashtable_replace_fast(
321					&ilan->xlat.rhash_table, &ila->node,
322					&head->node, rht_params);
323				if (err)
324					goto out;
325			} else {
326				/* Entry no longer used */
327				err = rhashtable_remove_fast(
328						&ilan->xlat.rhash_table,
329						&ila->node, rht_params);
330			}
331		}
332
333		ila_release(ila);
334
335		break;
336	}
337
338out:
339	spin_unlock(lock);
340
341	return err;
342}
343
344int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
345{
346	struct net *net = genl_info_net(info);
347	struct ila_xlat_params p;
348	int err;
349
350	err = parse_nl_config(info, &p);
351	if (err)
352		return err;
353
354	return ila_add_mapping(net, &p);
355}
356
357int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
358{
359	struct net *net = genl_info_net(info);
360	struct ila_xlat_params xp;
361	int err;
362
363	err = parse_nl_config(info, &xp);
364	if (err)
365		return err;
366
367	ila_del_mapping(net, &xp);
368
369	return 0;
370}
371
372static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
373					    struct ila_map *ila)
374{
375	return ila_get_lock(ilan, ila->xp.ip.locator_match);
376}
377
378int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
379{
380	struct net *net = genl_info_net(info);
381	struct ila_net *ilan = net_generic(net, ila_net_id);
382	struct rhashtable_iter iter;
383	struct ila_map *ila;
384	spinlock_t *lock;
385	int ret = 0;
386
387	rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
388	rhashtable_walk_start(&iter);
389
390	for (;;) {
391		ila = rhashtable_walk_next(&iter);
392
393		if (IS_ERR(ila)) {
394			if (PTR_ERR(ila) == -EAGAIN)
395				continue;
396			ret = PTR_ERR(ila);
397			goto done;
398		} else if (!ila) {
399			break;
400		}
401
402		lock = lock_from_ila_map(ilan, ila);
403
404		spin_lock(lock);
405
406		ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
407					     &ila->node, rht_params);
408		if (!ret)
409			ila_free_node(ila);
410
411		spin_unlock(lock);
412
413		if (ret)
414			break;
415	}
416
417done:
418	rhashtable_walk_stop(&iter);
419	rhashtable_walk_exit(&iter);
420	return ret;
421}
422
423static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
424{
425	if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
426			      (__force u64)ila->xp.ip.locator.v64,
427			      ILA_ATTR_PAD) ||
428	    nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
429			      (__force u64)ila->xp.ip.locator_match.v64,
430			      ILA_ATTR_PAD) ||
431	    nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
432	    nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
433	    nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
434		return -1;
435
436	return 0;
437}
438
439static int ila_dump_info(struct ila_map *ila,
440			 u32 portid, u32 seq, u32 flags,
441			 struct sk_buff *skb, u8 cmd)
442{
443	void *hdr;
444
445	hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
446	if (!hdr)
447		return -ENOMEM;
448
449	if (ila_fill_info(ila, skb) < 0)
450		goto nla_put_failure;
451
452	genlmsg_end(skb, hdr);
453	return 0;
454
455nla_put_failure:
456	genlmsg_cancel(skb, hdr);
457	return -EMSGSIZE;
458}
459
460int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
461{
462	struct net *net = genl_info_net(info);
463	struct ila_net *ilan = net_generic(net, ila_net_id);
464	struct sk_buff *msg;
465	struct ila_xlat_params xp;
466	struct ila_map *ila;
467	int ret;
468
469	ret = parse_nl_config(info, &xp);
470	if (ret)
471		return ret;
472
473	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
474	if (!msg)
475		return -ENOMEM;
476
477	rcu_read_lock();
478
479	ret = -ESRCH;
480	ila = ila_lookup_by_params(&xp, ilan);
481	if (ila) {
482		ret = ila_dump_info(ila,
483				    info->snd_portid,
484				    info->snd_seq, 0, msg,
485				    info->genlhdr->cmd);
486	}
487
488	rcu_read_unlock();
489
490	if (ret < 0)
491		goto out_free;
492
493	return genlmsg_reply(msg, info);
494
495out_free:
496	nlmsg_free(msg);
497	return ret;
498}
499
500struct ila_dump_iter {
501	struct rhashtable_iter rhiter;
502	int skip;
503};
504
505int ila_xlat_nl_dump_start(struct netlink_callback *cb)
506{
507	struct net *net = sock_net(cb->skb->sk);
508	struct ila_net *ilan = net_generic(net, ila_net_id);
509	struct ila_dump_iter *iter;
510
511	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
512	if (!iter)
513		return -ENOMEM;
514
515	rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
516
517	iter->skip = 0;
518	cb->args[0] = (long)iter;
519
520	return 0;
521}
522
523int ila_xlat_nl_dump_done(struct netlink_callback *cb)
524{
525	struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
526
527	rhashtable_walk_exit(&iter->rhiter);
528
529	kfree(iter);
530
531	return 0;
532}
533
534int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
535{
536	struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
537	struct rhashtable_iter *rhiter = &iter->rhiter;
538	int skip = iter->skip;
539	struct ila_map *ila;
540	int ret;
541
542	rhashtable_walk_start(rhiter);
543
544	/* Get first entry */
545	ila = rhashtable_walk_peek(rhiter);
546
547	if (ila && !IS_ERR(ila) && skip) {
548		/* Skip over visited entries */
549
550		while (ila && skip) {
551			/* Skip over any ila entries in this list that we
552			 * have already dumped.
553			 */
554			ila = rcu_access_pointer(ila->next);
555			skip--;
556		}
557	}
558
559	skip = 0;
560
561	for (;;) {
562		if (IS_ERR(ila)) {
563			ret = PTR_ERR(ila);
564			if (ret == -EAGAIN) {
565				/* Table has changed and iter has reset. Return
566				 * -EAGAIN to the application even if we have
567				 * written data to the skb. The application
568				 * needs to deal with this.
569				 */
570
571				goto out_ret;
572			} else {
573				break;
574			}
575		} else if (!ila) {
576			ret = 0;
577			break;
578		}
579
580		while (ila) {
581			ret =  ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
582					     cb->nlh->nlmsg_seq, NLM_F_MULTI,
583					     skb, ILA_CMD_GET);
584			if (ret)
585				goto out;
586
587			skip++;
588			ila = rcu_access_pointer(ila->next);
589		}
590
591		skip = 0;
592		ila = rhashtable_walk_next(rhiter);
593	}
594
595out:
596	iter->skip = skip;
597	ret = (skb->len ? : ret);
598
599out_ret:
600	rhashtable_walk_stop(rhiter);
601	return ret;
602}
603
604int ila_xlat_init_net(struct net *net)
605{
606	struct ila_net *ilan = net_generic(net, ila_net_id);
607	int err;
608
609	err = alloc_ila_locks(ilan);
610	if (err)
611		return err;
612
613	err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
614	if (err) {
615		free_bucket_spinlocks(ilan->xlat.locks);
616		return err;
617	}
618
619	return 0;
620}
621
622void ila_xlat_exit_net(struct net *net)
623{
624	struct ila_net *ilan = net_generic(net, ila_net_id);
625
626	rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
627
628	free_bucket_spinlocks(ilan->xlat.locks);
629
630	if (ilan->xlat.hooks_registered)
631		nf_unregister_net_hooks(net, ila_nf_hook_ops,
632					ARRAY_SIZE(ila_nf_hook_ops));
633}
634
635static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
636{
637	struct ila_map *ila;
638	struct ipv6hdr *ip6h = ipv6_hdr(skb);
639	struct net *net = dev_net(skb->dev);
640	struct ila_net *ilan = net_generic(net, ila_net_id);
641	struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
642
643	/* Assumes skb contains a valid IPv6 header that is pulled */
644
645	/* No check here that ILA type in the mapping matches what is in the
646	 * address. We assume that whatever sender gaves us can be translated.
647	 * The checksum mode however is relevant.
648	 */
649
650	rcu_read_lock();
651
652	ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
653	if (ila)
654		ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
655
656	rcu_read_unlock();
657
658	return 0;
659}
660