1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
7 */
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/socket.h>
11#include <linux/in.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/string.h>
15#include <linux/sockios.h>
16#include <linux/net.h>
17#include <linux/slab.h>
18#include <net/ax25.h>
19#include <linux/inet.h>
20#include <linux/netdevice.h>
21#include <net/arp.h>
22#include <linux/if_arp.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/termios.h>	/* For TIOCINQ/OUTQ */
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/notifier.h>
31#include <linux/init.h>
32#include <linux/spinlock.h>
33#include <net/netrom.h>
34#include <linux/seq_file.h>
35#include <linux/export.h>
36
37static unsigned int nr_neigh_no = 1;
38
39static HLIST_HEAD(nr_node_list);
40static DEFINE_SPINLOCK(nr_node_list_lock);
41static HLIST_HEAD(nr_neigh_list);
42static DEFINE_SPINLOCK(nr_neigh_list_lock);
43
44static struct nr_node *nr_node_get(ax25_address *callsign)
45{
46	struct nr_node *found = NULL;
47	struct nr_node *nr_node;
48
49	spin_lock_bh(&nr_node_list_lock);
50	nr_node_for_each(nr_node, &nr_node_list)
51		if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52			nr_node_hold(nr_node);
53			found = nr_node;
54			break;
55		}
56	spin_unlock_bh(&nr_node_list_lock);
57	return found;
58}
59
60static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61					 struct net_device *dev)
62{
63	struct nr_neigh *found = NULL;
64	struct nr_neigh *nr_neigh;
65
66	spin_lock_bh(&nr_neigh_list_lock);
67	nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68		if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69		    nr_neigh->dev == dev) {
70			nr_neigh_hold(nr_neigh);
71			found = nr_neigh;
72			break;
73		}
74	spin_unlock_bh(&nr_neigh_list_lock);
75	return found;
76}
77
78static void nr_remove_neigh(struct nr_neigh *);
79
80/*      re-sort the routes in quality order.    */
81static void re_sort_routes(struct nr_node *nr_node, int x, int y)
82{
83	if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84		if (nr_node->which == x)
85			nr_node->which = y;
86		else if (nr_node->which == y)
87			nr_node->which = x;
88
89		swap(nr_node->routes[x], nr_node->routes[y]);
90	}
91}
92
93/*
94 *	Add a new route to a node, and in the process add the node and the
95 *	neighbour if it is new.
96 */
97static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98	ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99	int quality, int obs_count)
100{
101	struct nr_node  *nr_node;
102	struct nr_neigh *nr_neigh;
103	int i, found;
104	struct net_device *odev;
105
106	if ((odev=nr_dev_get(nr)) != NULL) {	/* Can't add routes to ourself */
107		dev_put(odev);
108		return -EINVAL;
109	}
110
111	nr_node = nr_node_get(nr);
112
113	nr_neigh = nr_neigh_get_dev(ax25, dev);
114
115	/*
116	 * The L2 link to a neighbour has failed in the past
117	 * and now a frame comes from this neighbour. We assume
118	 * it was a temporary trouble with the link and reset the
119	 * routes now (and not wait for a node broadcast).
120	 */
121	if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122		struct nr_node *nr_nodet;
123
124		spin_lock_bh(&nr_node_list_lock);
125		nr_node_for_each(nr_nodet, &nr_node_list) {
126			nr_node_lock(nr_nodet);
127			for (i = 0; i < nr_nodet->count; i++)
128				if (nr_nodet->routes[i].neighbour == nr_neigh)
129					if (i < nr_nodet->which)
130						nr_nodet->which = i;
131			nr_node_unlock(nr_nodet);
132		}
133		spin_unlock_bh(&nr_node_list_lock);
134	}
135
136	if (nr_neigh != NULL)
137		nr_neigh->failed = 0;
138
139	if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140		nr_neigh_put(nr_neigh);
141		nr_node_put(nr_node);
142		return 0;
143	}
144
145	if (nr_neigh == NULL) {
146		if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
147			if (nr_node)
148				nr_node_put(nr_node);
149			return -ENOMEM;
150		}
151
152		nr_neigh->callsign = *ax25;
153		nr_neigh->digipeat = NULL;
154		nr_neigh->ax25     = NULL;
155		nr_neigh->dev      = dev;
156		nr_neigh->quality  = READ_ONCE(sysctl_netrom_default_path_quality);
157		nr_neigh->locked   = 0;
158		nr_neigh->count    = 0;
159		nr_neigh->number   = nr_neigh_no++;
160		nr_neigh->failed   = 0;
161		refcount_set(&nr_neigh->refcount, 1);
162
163		if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164			nr_neigh->digipeat = kmemdup(ax25_digi,
165						     sizeof(*ax25_digi),
166						     GFP_KERNEL);
167			if (nr_neigh->digipeat == NULL) {
168				kfree(nr_neigh);
169				if (nr_node)
170					nr_node_put(nr_node);
171				return -ENOMEM;
172			}
173		}
174
175		spin_lock_bh(&nr_neigh_list_lock);
176		hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177		nr_neigh_hold(nr_neigh);
178		spin_unlock_bh(&nr_neigh_list_lock);
179	}
180
181	if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182		nr_neigh->quality = quality;
183
184	if (nr_node == NULL) {
185		if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
186			if (nr_neigh)
187				nr_neigh_put(nr_neigh);
188			return -ENOMEM;
189		}
190
191		nr_node->callsign = *nr;
192		strcpy(nr_node->mnemonic, mnemonic);
193
194		nr_node->which = 0;
195		nr_node->count = 1;
196		refcount_set(&nr_node->refcount, 1);
197		spin_lock_init(&nr_node->node_lock);
198
199		nr_node->routes[0].quality   = quality;
200		nr_node->routes[0].obs_count = obs_count;
201		nr_node->routes[0].neighbour = nr_neigh;
202
203		nr_neigh_hold(nr_neigh);
204		nr_neigh->count++;
205
206		spin_lock_bh(&nr_node_list_lock);
207		hlist_add_head(&nr_node->node_node, &nr_node_list);
208		/* refcount initialized at 1 */
209		spin_unlock_bh(&nr_node_list_lock);
210
211		nr_neigh_put(nr_neigh);
212		return 0;
213	}
214	nr_node_lock(nr_node);
215
216	if (quality != 0)
217		strcpy(nr_node->mnemonic, mnemonic);
218
219	for (found = 0, i = 0; i < nr_node->count; i++) {
220		if (nr_node->routes[i].neighbour == nr_neigh) {
221			nr_node->routes[i].quality   = quality;
222			nr_node->routes[i].obs_count = obs_count;
223			found = 1;
224			break;
225		}
226	}
227
228	if (!found) {
229		/* We have space at the bottom, slot it in */
230		if (nr_node->count < 3) {
231			nr_node->routes[2] = nr_node->routes[1];
232			nr_node->routes[1] = nr_node->routes[0];
233
234			nr_node->routes[0].quality   = quality;
235			nr_node->routes[0].obs_count = obs_count;
236			nr_node->routes[0].neighbour = nr_neigh;
237
238			nr_node->which++;
239			nr_node->count++;
240			nr_neigh_hold(nr_neigh);
241			nr_neigh->count++;
242		} else {
243			/* It must be better than the worst */
244			if (quality > nr_node->routes[2].quality) {
245				nr_node->routes[2].neighbour->count--;
246				nr_neigh_put(nr_node->routes[2].neighbour);
247
248				if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249					nr_remove_neigh(nr_node->routes[2].neighbour);
250
251				nr_node->routes[2].quality   = quality;
252				nr_node->routes[2].obs_count = obs_count;
253				nr_node->routes[2].neighbour = nr_neigh;
254
255				nr_neigh_hold(nr_neigh);
256				nr_neigh->count++;
257			}
258		}
259	}
260
261	/* Now re-sort the routes in quality order */
262	switch (nr_node->count) {
263	case 3:
264		re_sort_routes(nr_node, 0, 1);
265		re_sort_routes(nr_node, 1, 2);
266		fallthrough;
267	case 2:
268		re_sort_routes(nr_node, 0, 1);
269		break;
270	case 1:
271		break;
272	}
273
274	for (i = 0; i < nr_node->count; i++) {
275		if (nr_node->routes[i].neighbour == nr_neigh) {
276			if (i < nr_node->which)
277				nr_node->which = i;
278			break;
279		}
280	}
281
282	nr_neigh_put(nr_neigh);
283	nr_node_unlock(nr_node);
284	nr_node_put(nr_node);
285	return 0;
286}
287
288static inline void __nr_remove_node(struct nr_node *nr_node)
289{
290	hlist_del_init(&nr_node->node_node);
291	nr_node_put(nr_node);
292}
293
294#define nr_remove_node_locked(__node) \
295	__nr_remove_node(__node)
296
297static void nr_remove_node(struct nr_node *nr_node)
298{
299	spin_lock_bh(&nr_node_list_lock);
300	__nr_remove_node(nr_node);
301	spin_unlock_bh(&nr_node_list_lock);
302}
303
304static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
305{
306	hlist_del_init(&nr_neigh->neigh_node);
307	nr_neigh_put(nr_neigh);
308}
309
310#define nr_remove_neigh_locked(__neigh) \
311	__nr_remove_neigh(__neigh)
312
313static void nr_remove_neigh(struct nr_neigh *nr_neigh)
314{
315	spin_lock_bh(&nr_neigh_list_lock);
316	__nr_remove_neigh(nr_neigh);
317	spin_unlock_bh(&nr_neigh_list_lock);
318}
319
320/*
321 *	"Delete" a node. Strictly speaking remove a route to a node. The node
322 *	is only deleted if no routes are left to it.
323 */
324static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
325{
326	struct nr_node  *nr_node;
327	struct nr_neigh *nr_neigh;
328	int i;
329
330	nr_node = nr_node_get(callsign);
331
332	if (nr_node == NULL)
333		return -EINVAL;
334
335	nr_neigh = nr_neigh_get_dev(neighbour, dev);
336
337	if (nr_neigh == NULL) {
338		nr_node_put(nr_node);
339		return -EINVAL;
340	}
341
342	nr_node_lock(nr_node);
343	for (i = 0; i < nr_node->count; i++) {
344		if (nr_node->routes[i].neighbour == nr_neigh) {
345			nr_neigh->count--;
346			nr_neigh_put(nr_neigh);
347
348			if (nr_neigh->count == 0 && !nr_neigh->locked)
349				nr_remove_neigh(nr_neigh);
350			nr_neigh_put(nr_neigh);
351
352			nr_node->count--;
353
354			if (nr_node->count == 0) {
355				nr_remove_node(nr_node);
356			} else {
357				switch (i) {
358				case 0:
359					nr_node->routes[0] = nr_node->routes[1];
360					fallthrough;
361				case 1:
362					nr_node->routes[1] = nr_node->routes[2];
363					fallthrough;
364				case 2:
365					break;
366				}
367				nr_node_put(nr_node);
368			}
369			nr_node_unlock(nr_node);
370
371			return 0;
372		}
373	}
374	nr_neigh_put(nr_neigh);
375	nr_node_unlock(nr_node);
376	nr_node_put(nr_node);
377
378	return -EINVAL;
379}
380
381/*
382 *	Lock a neighbour with a quality.
383 */
384static int __must_check nr_add_neigh(ax25_address *callsign,
385	ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
386{
387	struct nr_neigh *nr_neigh;
388
389	nr_neigh = nr_neigh_get_dev(callsign, dev);
390	if (nr_neigh) {
391		nr_neigh->quality = quality;
392		nr_neigh->locked  = 1;
393		nr_neigh_put(nr_neigh);
394		return 0;
395	}
396
397	if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
398		return -ENOMEM;
399
400	nr_neigh->callsign = *callsign;
401	nr_neigh->digipeat = NULL;
402	nr_neigh->ax25     = NULL;
403	nr_neigh->dev      = dev;
404	nr_neigh->quality  = quality;
405	nr_neigh->locked   = 1;
406	nr_neigh->count    = 0;
407	nr_neigh->number   = nr_neigh_no++;
408	nr_neigh->failed   = 0;
409	refcount_set(&nr_neigh->refcount, 1);
410
411	if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
412		nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
413					     GFP_KERNEL);
414		if (nr_neigh->digipeat == NULL) {
415			kfree(nr_neigh);
416			return -ENOMEM;
417		}
418	}
419
420	spin_lock_bh(&nr_neigh_list_lock);
421	hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
422	/* refcount is initialized at 1 */
423	spin_unlock_bh(&nr_neigh_list_lock);
424
425	return 0;
426}
427
428/*
429 *	"Delete" a neighbour. The neighbour is only removed if the number
430 *	of nodes that may use it is zero.
431 */
432static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
433{
434	struct nr_neigh *nr_neigh;
435
436	nr_neigh = nr_neigh_get_dev(callsign, dev);
437
438	if (nr_neigh == NULL) return -EINVAL;
439
440	nr_neigh->quality = quality;
441	nr_neigh->locked  = 0;
442
443	if (nr_neigh->count == 0)
444		nr_remove_neigh(nr_neigh);
445	nr_neigh_put(nr_neigh);
446
447	return 0;
448}
449
450/*
451 *	Decrement the obsolescence count by one. If a route is reduced to a
452 *	count of zero, remove it. Also remove any unlocked neighbours with
453 *	zero nodes routing via it.
454 */
455static int nr_dec_obs(void)
456{
457	struct nr_neigh *nr_neigh;
458	struct nr_node  *s;
459	struct hlist_node *nodet;
460	int i;
461
462	spin_lock_bh(&nr_node_list_lock);
463	nr_node_for_each_safe(s, nodet, &nr_node_list) {
464		nr_node_lock(s);
465		for (i = 0; i < s->count; i++) {
466			switch (s->routes[i].obs_count) {
467			case 0:		/* A locked entry */
468				break;
469
470			case 1:		/* From 1 -> 0 */
471				nr_neigh = s->routes[i].neighbour;
472
473				nr_neigh->count--;
474				nr_neigh_put(nr_neigh);
475
476				if (nr_neigh->count == 0 && !nr_neigh->locked)
477					nr_remove_neigh(nr_neigh);
478
479				s->count--;
480
481				switch (i) {
482				case 0:
483					s->routes[0] = s->routes[1];
484					fallthrough;
485				case 1:
486					s->routes[1] = s->routes[2];
487					break;
488				case 2:
489					break;
490				}
491				break;
492
493			default:
494				s->routes[i].obs_count--;
495				break;
496
497			}
498		}
499
500		if (s->count <= 0)
501			nr_remove_node_locked(s);
502		nr_node_unlock(s);
503	}
504	spin_unlock_bh(&nr_node_list_lock);
505
506	return 0;
507}
508
509/*
510 *	A device has been removed. Remove its routes and neighbours.
511 */
512void nr_rt_device_down(struct net_device *dev)
513{
514	struct nr_neigh *s;
515	struct hlist_node *nodet, *node2t;
516	struct nr_node  *t;
517	int i;
518
519	spin_lock_bh(&nr_neigh_list_lock);
520	nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
521		if (s->dev == dev) {
522			spin_lock_bh(&nr_node_list_lock);
523			nr_node_for_each_safe(t, node2t, &nr_node_list) {
524				nr_node_lock(t);
525				for (i = 0; i < t->count; i++) {
526					if (t->routes[i].neighbour == s) {
527						t->count--;
528
529						switch (i) {
530						case 0:
531							t->routes[0] = t->routes[1];
532							fallthrough;
533						case 1:
534							t->routes[1] = t->routes[2];
535							break;
536						case 2:
537							break;
538						}
539					}
540				}
541
542				if (t->count <= 0)
543					nr_remove_node_locked(t);
544				nr_node_unlock(t);
545			}
546			spin_unlock_bh(&nr_node_list_lock);
547
548			nr_remove_neigh_locked(s);
549		}
550	}
551	spin_unlock_bh(&nr_neigh_list_lock);
552}
553
554/*
555 *	Check that the device given is a valid AX.25 interface that is "up".
556 *	Or a valid ethernet interface with an AX.25 callsign binding.
557 */
558static struct net_device *nr_ax25_dev_get(char *devname)
559{
560	struct net_device *dev;
561
562	if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
563		return NULL;
564
565	if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
566		return dev;
567
568	dev_put(dev);
569	return NULL;
570}
571
572/*
573 *	Find the first active NET/ROM device, usually "nr0".
574 */
575struct net_device *nr_dev_first(void)
576{
577	struct net_device *dev, *first = NULL;
578
579	rcu_read_lock();
580	for_each_netdev_rcu(&init_net, dev) {
581		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
582			if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
583				first = dev;
584	}
585	dev_hold(first);
586	rcu_read_unlock();
587
588	return first;
589}
590
591/*
592 *	Find the NET/ROM device for the given callsign.
593 */
594struct net_device *nr_dev_get(ax25_address *addr)
595{
596	struct net_device *dev;
597
598	rcu_read_lock();
599	for_each_netdev_rcu(&init_net, dev) {
600		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
601		    ax25cmp(addr, (const ax25_address *)dev->dev_addr) == 0) {
602			dev_hold(dev);
603			goto out;
604		}
605	}
606	dev = NULL;
607out:
608	rcu_read_unlock();
609	return dev;
610}
611
612static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
613	ax25_address *digipeaters)
614{
615	int i;
616
617	if (ndigis == 0)
618		return NULL;
619
620	for (i = 0; i < ndigis; i++) {
621		digi->calls[i]    = digipeaters[i];
622		digi->repeated[i] = 0;
623	}
624
625	digi->ndigi      = ndigis;
626	digi->lastrepeat = -1;
627
628	return digi;
629}
630
631/*
632 *	Handle the ioctls that control the routing functions.
633 */
634int nr_rt_ioctl(unsigned int cmd, void __user *arg)
635{
636	struct nr_route_struct nr_route;
637	struct net_device *dev;
638	ax25_digi digi;
639	int ret;
640
641	switch (cmd) {
642	case SIOCADDRT:
643		if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
644			return -EFAULT;
645		if (nr_route.ndigis > AX25_MAX_DIGIS)
646			return -EINVAL;
647		if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
648			return -EINVAL;
649		switch (nr_route.type) {
650		case NETROM_NODE:
651			if (strnlen(nr_route.mnemonic, 7) == 7) {
652				ret = -EINVAL;
653				break;
654			}
655
656			ret = nr_add_node(&nr_route.callsign,
657				nr_route.mnemonic,
658				&nr_route.neighbour,
659				nr_call_to_digi(&digi, nr_route.ndigis,
660						nr_route.digipeaters),
661				dev, nr_route.quality,
662				nr_route.obs_count);
663			break;
664		case NETROM_NEIGH:
665			ret = nr_add_neigh(&nr_route.callsign,
666				nr_call_to_digi(&digi, nr_route.ndigis,
667						nr_route.digipeaters),
668				dev, nr_route.quality);
669			break;
670		default:
671			ret = -EINVAL;
672		}
673		dev_put(dev);
674		return ret;
675
676	case SIOCDELRT:
677		if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
678			return -EFAULT;
679		if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
680			return -EINVAL;
681		switch (nr_route.type) {
682		case NETROM_NODE:
683			ret = nr_del_node(&nr_route.callsign,
684				&nr_route.neighbour, dev);
685			break;
686		case NETROM_NEIGH:
687			ret = nr_del_neigh(&nr_route.callsign,
688				dev, nr_route.quality);
689			break;
690		default:
691			ret = -EINVAL;
692		}
693		dev_put(dev);
694		return ret;
695
696	case SIOCNRDECOBS:
697		return nr_dec_obs();
698
699	default:
700		return -EINVAL;
701	}
702
703	return 0;
704}
705
706/*
707 * 	A level 2 link has timed out, therefore it appears to be a poor link,
708 *	then don't use that neighbour until it is reset.
709 */
710void nr_link_failed(ax25_cb *ax25, int reason)
711{
712	struct nr_neigh *s, *nr_neigh = NULL;
713	struct nr_node  *nr_node = NULL;
714
715	spin_lock_bh(&nr_neigh_list_lock);
716	nr_neigh_for_each(s, &nr_neigh_list) {
717		if (s->ax25 == ax25) {
718			nr_neigh_hold(s);
719			nr_neigh = s;
720			break;
721		}
722	}
723	spin_unlock_bh(&nr_neigh_list_lock);
724
725	if (nr_neigh == NULL)
726		return;
727
728	nr_neigh->ax25 = NULL;
729	ax25_cb_put(ax25);
730
731	if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
732		nr_neigh_put(nr_neigh);
733		return;
734	}
735	spin_lock_bh(&nr_node_list_lock);
736	nr_node_for_each(nr_node, &nr_node_list) {
737		nr_node_lock(nr_node);
738		if (nr_node->which < nr_node->count &&
739		    nr_node->routes[nr_node->which].neighbour == nr_neigh)
740			nr_node->which++;
741		nr_node_unlock(nr_node);
742	}
743	spin_unlock_bh(&nr_node_list_lock);
744	nr_neigh_put(nr_neigh);
745}
746
747/*
748 *	Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
749 *	indicates an internally generated frame.
750 */
751int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
752{
753	ax25_address *nr_src, *nr_dest;
754	struct nr_neigh *nr_neigh;
755	struct nr_node  *nr_node;
756	struct net_device *dev;
757	unsigned char *dptr;
758	ax25_cb *ax25s;
759	int ret;
760	struct sk_buff *skbn;
761
762
763	nr_src  = (ax25_address *)(skb->data + 0);
764	nr_dest = (ax25_address *)(skb->data + 7);
765
766	if (ax25 != NULL) {
767		ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
768				  ax25->ax25_dev->dev, 0,
769				  READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
770		if (ret)
771			return ret;
772	}
773
774	if ((dev = nr_dev_get(nr_dest)) != NULL) {	/* Its for me */
775		if (ax25 == NULL)			/* Its from me */
776			ret = nr_loopback_queue(skb);
777		else
778			ret = nr_rx_frame(skb, dev);
779		dev_put(dev);
780		return ret;
781	}
782
783	if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
784		return 0;
785
786	/* Its Time-To-Live has expired */
787	if (skb->data[14] == 1) {
788		return 0;
789	}
790
791	nr_node = nr_node_get(nr_dest);
792	if (nr_node == NULL)
793		return 0;
794	nr_node_lock(nr_node);
795
796	if (nr_node->which >= nr_node->count) {
797		nr_node_unlock(nr_node);
798		nr_node_put(nr_node);
799		return 0;
800	}
801
802	nr_neigh = nr_node->routes[nr_node->which].neighbour;
803
804	if ((dev = nr_dev_first()) == NULL) {
805		nr_node_unlock(nr_node);
806		nr_node_put(nr_node);
807		return 0;
808	}
809
810	/* We are going to change the netrom headers so we should get our
811	   own skb, we also did not know until now how much header space
812	   we had to reserve... - RXQ */
813	if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
814		nr_node_unlock(nr_node);
815		nr_node_put(nr_node);
816		dev_put(dev);
817		return 0;
818	}
819	kfree_skb(skb);
820	skb=skbn;
821	skb->data[14]--;
822
823	dptr  = skb_push(skb, 1);
824	*dptr = AX25_P_NETROM;
825
826	ax25s = nr_neigh->ax25;
827	nr_neigh->ax25 = ax25_send_frame(skb, 256,
828					 (const ax25_address *)dev->dev_addr,
829					 &nr_neigh->callsign,
830					 nr_neigh->digipeat, nr_neigh->dev);
831	if (ax25s)
832		ax25_cb_put(ax25s);
833
834	dev_put(dev);
835	ret = (nr_neigh->ax25 != NULL);
836	nr_node_unlock(nr_node);
837	nr_node_put(nr_node);
838
839	return ret;
840}
841
842#ifdef CONFIG_PROC_FS
843
844static void *nr_node_start(struct seq_file *seq, loff_t *pos)
845	__acquires(&nr_node_list_lock)
846{
847	spin_lock_bh(&nr_node_list_lock);
848	return seq_hlist_start_head(&nr_node_list, *pos);
849}
850
851static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
852{
853	return seq_hlist_next(v, &nr_node_list, pos);
854}
855
856static void nr_node_stop(struct seq_file *seq, void *v)
857	__releases(&nr_node_list_lock)
858{
859	spin_unlock_bh(&nr_node_list_lock);
860}
861
862static int nr_node_show(struct seq_file *seq, void *v)
863{
864	char buf[11];
865	int i;
866
867	if (v == SEQ_START_TOKEN)
868		seq_puts(seq,
869			 "callsign  mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
870	else {
871		struct nr_node *nr_node = hlist_entry(v, struct nr_node,
872						      node_node);
873
874		nr_node_lock(nr_node);
875		seq_printf(seq, "%-9s %-7s  %d %d",
876			ax2asc(buf, &nr_node->callsign),
877			(nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
878			nr_node->which + 1,
879			nr_node->count);
880
881		for (i = 0; i < nr_node->count; i++) {
882			seq_printf(seq, "  %3d   %d %05d",
883				nr_node->routes[i].quality,
884				nr_node->routes[i].obs_count,
885				nr_node->routes[i].neighbour->number);
886		}
887		nr_node_unlock(nr_node);
888
889		seq_puts(seq, "\n");
890	}
891	return 0;
892}
893
894const struct seq_operations nr_node_seqops = {
895	.start = nr_node_start,
896	.next = nr_node_next,
897	.stop = nr_node_stop,
898	.show = nr_node_show,
899};
900
901static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
902	__acquires(&nr_neigh_list_lock)
903{
904	spin_lock_bh(&nr_neigh_list_lock);
905	return seq_hlist_start_head(&nr_neigh_list, *pos);
906}
907
908static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
909{
910	return seq_hlist_next(v, &nr_neigh_list, pos);
911}
912
913static void nr_neigh_stop(struct seq_file *seq, void *v)
914	__releases(&nr_neigh_list_lock)
915{
916	spin_unlock_bh(&nr_neigh_list_lock);
917}
918
919static int nr_neigh_show(struct seq_file *seq, void *v)
920{
921	char buf[11];
922	int i;
923
924	if (v == SEQ_START_TOKEN)
925		seq_puts(seq, "addr  callsign  dev  qual lock count failed digipeaters\n");
926	else {
927		struct nr_neigh *nr_neigh;
928
929		nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
930		seq_printf(seq, "%05d %-9s %-4s  %3d    %d   %3d    %3d",
931			nr_neigh->number,
932			ax2asc(buf, &nr_neigh->callsign),
933			nr_neigh->dev ? nr_neigh->dev->name : "???",
934			nr_neigh->quality,
935			nr_neigh->locked,
936			nr_neigh->count,
937			nr_neigh->failed);
938
939		if (nr_neigh->digipeat != NULL) {
940			for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
941				seq_printf(seq, " %s",
942					   ax2asc(buf, &nr_neigh->digipeat->calls[i]));
943		}
944
945		seq_puts(seq, "\n");
946	}
947	return 0;
948}
949
950const struct seq_operations nr_neigh_seqops = {
951	.start = nr_neigh_start,
952	.next = nr_neigh_next,
953	.stop = nr_neigh_stop,
954	.show = nr_neigh_show,
955};
956#endif
957
958/*
959 *	Free all memory associated with the nodes and routes lists.
960 */
961void nr_rt_free(void)
962{
963	struct nr_neigh *s = NULL;
964	struct nr_node  *t = NULL;
965	struct hlist_node *nodet;
966
967	spin_lock_bh(&nr_neigh_list_lock);
968	spin_lock_bh(&nr_node_list_lock);
969	nr_node_for_each_safe(t, nodet, &nr_node_list) {
970		nr_node_lock(t);
971		nr_remove_node_locked(t);
972		nr_node_unlock(t);
973	}
974	nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
975		while(s->count) {
976			s->count--;
977			nr_neigh_put(s);
978		}
979		nr_remove_neigh_locked(s);
980	}
981	spin_unlock_bh(&nr_node_list_lock);
982	spin_unlock_bh(&nr_neigh_list_lock);
983}
984