1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * The HSR spec says never to forward the same frame twice on the same
8 * interface. A frame is identified by its source MAC address and its HSR
9 * sequence number. This code keeps track of senders and their sequence numbers
10 * to allow filtering of duplicate frames, and to detect HSR ring errors.
11 * Same code handles filtering of duplicates for PRP as well.
12 */
13
14#include <linux/if_ether.h>
15#include <linux/etherdevice.h>
16#include <linux/slab.h>
17#include <linux/rculist.h>
18#include "hsr_main.h"
19#include "hsr_framereg.h"
20#include "hsr_netlink.h"
21
22/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
23 * false otherwise.
24 */
25static bool seq_nr_after(u16 a, u16 b)
26{
27	/* Remove inconsistency where
28	 * seq_nr_after(a, b) == seq_nr_before(a, b)
29	 */
30	if ((int)b - a == 32768)
31		return false;
32
33	return (((s16)(b - a)) < 0);
34}
35
36#define seq_nr_before(a, b)		seq_nr_after((b), (a))
37#define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
38
39bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
40{
41	struct hsr_self_node *sn;
42	bool ret = false;
43
44	rcu_read_lock();
45	sn = rcu_dereference(hsr->self_node);
46	if (!sn) {
47		WARN_ONCE(1, "HSR: No self node\n");
48		goto out;
49	}
50
51	if (ether_addr_equal(addr, sn->macaddress_A) ||
52	    ether_addr_equal(addr, sn->macaddress_B))
53		ret = true;
54out:
55	rcu_read_unlock();
56	return ret;
57}
58
59/* Search for mac entry. Caller must hold rcu read lock.
60 */
61static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
62					    const unsigned char addr[ETH_ALEN])
63{
64	struct hsr_node *node;
65
66	list_for_each_entry_rcu(node, node_db, mac_list) {
67		if (ether_addr_equal(node->macaddress_A, addr))
68			return node;
69	}
70
71	return NULL;
72}
73
74/* Helper for device init; the self_node is used in hsr_rcv() to recognize
75 * frames from self that's been looped over the HSR ring.
76 */
77int hsr_create_self_node(struct hsr_priv *hsr,
78			 const unsigned char addr_a[ETH_ALEN],
79			 const unsigned char addr_b[ETH_ALEN])
80{
81	struct hsr_self_node *sn, *old;
82
83	sn = kmalloc(sizeof(*sn), GFP_KERNEL);
84	if (!sn)
85		return -ENOMEM;
86
87	ether_addr_copy(sn->macaddress_A, addr_a);
88	ether_addr_copy(sn->macaddress_B, addr_b);
89
90	spin_lock_bh(&hsr->list_lock);
91	old = rcu_replace_pointer(hsr->self_node, sn,
92				  lockdep_is_held(&hsr->list_lock));
93	spin_unlock_bh(&hsr->list_lock);
94
95	if (old)
96		kfree_rcu(old, rcu_head);
97	return 0;
98}
99
100void hsr_del_self_node(struct hsr_priv *hsr)
101{
102	struct hsr_self_node *old;
103
104	spin_lock_bh(&hsr->list_lock);
105	old = rcu_replace_pointer(hsr->self_node, NULL,
106				  lockdep_is_held(&hsr->list_lock));
107	spin_unlock_bh(&hsr->list_lock);
108	if (old)
109		kfree_rcu(old, rcu_head);
110}
111
112void hsr_del_nodes(struct list_head *node_db)
113{
114	struct hsr_node *node;
115	struct hsr_node *tmp;
116
117	list_for_each_entry_safe(node, tmp, node_db, mac_list)
118		kfree(node);
119}
120
121void prp_handle_san_frame(bool san, enum hsr_port_type port,
122			  struct hsr_node *node)
123{
124	/* Mark if the SAN node is over LAN_A or LAN_B */
125	if (port == HSR_PT_SLAVE_A) {
126		node->san_a = true;
127		return;
128	}
129
130	if (port == HSR_PT_SLAVE_B)
131		node->san_b = true;
132}
133
134/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
135 * seq_out is used to initialize filtering of outgoing duplicate frames
136 * originating from the newly added node.
137 */
138static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
139				     struct list_head *node_db,
140				     unsigned char addr[],
141				     u16 seq_out, bool san,
142				     enum hsr_port_type rx_port)
143{
144	struct hsr_node *new_node, *node;
145	unsigned long now;
146	int i;
147
148	new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
149	if (!new_node)
150		return NULL;
151
152	ether_addr_copy(new_node->macaddress_A, addr);
153	spin_lock_init(&new_node->seq_out_lock);
154
155	/* We are only interested in time diffs here, so use current jiffies
156	 * as initialization. (0 could trigger an spurious ring error warning).
157	 */
158	now = jiffies;
159	for (i = 0; i < HSR_PT_PORTS; i++) {
160		new_node->time_in[i] = now;
161		new_node->time_out[i] = now;
162	}
163	for (i = 0; i < HSR_PT_PORTS; i++)
164		new_node->seq_out[i] = seq_out;
165
166	if (san && hsr->proto_ops->handle_san_frame)
167		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
168
169	spin_lock_bh(&hsr->list_lock);
170	list_for_each_entry_rcu(node, node_db, mac_list,
171				lockdep_is_held(&hsr->list_lock)) {
172		if (ether_addr_equal(node->macaddress_A, addr))
173			goto out;
174		if (ether_addr_equal(node->macaddress_B, addr))
175			goto out;
176	}
177	list_add_tail_rcu(&new_node->mac_list, node_db);
178	spin_unlock_bh(&hsr->list_lock);
179	return new_node;
180out:
181	spin_unlock_bh(&hsr->list_lock);
182	kfree(new_node);
183	return node;
184}
185
186void prp_update_san_info(struct hsr_node *node, bool is_sup)
187{
188	if (!is_sup)
189		return;
190
191	node->san_a = false;
192	node->san_b = false;
193}
194
195/* Get the hsr_node from which 'skb' was sent.
196 */
197struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
198			      struct sk_buff *skb, bool is_sup,
199			      enum hsr_port_type rx_port)
200{
201	struct hsr_priv *hsr = port->hsr;
202	struct hsr_node *node;
203	struct ethhdr *ethhdr;
204	struct prp_rct *rct;
205	bool san = false;
206	u16 seq_out;
207
208	if (!skb_mac_header_was_set(skb))
209		return NULL;
210
211	ethhdr = (struct ethhdr *)skb_mac_header(skb);
212
213	list_for_each_entry_rcu(node, node_db, mac_list) {
214		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
215			if (hsr->proto_ops->update_san_info)
216				hsr->proto_ops->update_san_info(node, is_sup);
217			return node;
218		}
219		if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
220			if (hsr->proto_ops->update_san_info)
221				hsr->proto_ops->update_san_info(node, is_sup);
222			return node;
223		}
224	}
225
226	/* Everyone may create a node entry, connected node to a HSR/PRP
227	 * device.
228	 */
229	if (ethhdr->h_proto == htons(ETH_P_PRP) ||
230	    ethhdr->h_proto == htons(ETH_P_HSR)) {
231		/* Check if skb contains hsr_ethhdr */
232		if (skb->mac_len < sizeof(struct hsr_ethhdr))
233			return NULL;
234
235		/* Use the existing sequence_nr from the tag as starting point
236		 * for filtering duplicate frames.
237		 */
238		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
239	} else {
240		rct = skb_get_PRP_rct(skb);
241		if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
242			seq_out = prp_get_skb_sequence_nr(rct);
243		} else {
244			if (rx_port != HSR_PT_MASTER)
245				san = true;
246			seq_out = HSR_SEQNR_START;
247		}
248	}
249
250	return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
251			    san, rx_port);
252}
253
254/* Use the Supervision frame's info about an eventual macaddress_B for merging
255 * nodes that has previously had their macaddress_B registered as a separate
256 * node.
257 */
258void hsr_handle_sup_frame(struct hsr_frame_info *frame)
259{
260	struct hsr_node *node_curr = frame->node_src;
261	struct hsr_port *port_rcv = frame->port_rcv;
262	struct hsr_priv *hsr = port_rcv->hsr;
263	struct hsr_sup_payload *hsr_sp;
264	struct hsr_sup_tlv *hsr_sup_tlv;
265	struct hsr_node *node_real;
266	struct sk_buff *skb = NULL;
267	struct list_head *node_db;
268	struct ethhdr *ethhdr;
269	int i;
270	unsigned int pull_size = 0;
271	unsigned int total_pull_size = 0;
272
273	/* Here either frame->skb_hsr or frame->skb_prp should be
274	 * valid as supervision frame always will have protocol
275	 * header info.
276	 */
277	if (frame->skb_hsr)
278		skb = frame->skb_hsr;
279	else if (frame->skb_prp)
280		skb = frame->skb_prp;
281	else if (frame->skb_std)
282		skb = frame->skb_std;
283	if (!skb)
284		return;
285
286	/* Leave the ethernet header. */
287	pull_size = sizeof(struct ethhdr);
288	skb_pull(skb, pull_size);
289	total_pull_size += pull_size;
290
291	ethhdr = (struct ethhdr *)skb_mac_header(skb);
292
293	/* And leave the HSR tag. */
294	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
295		pull_size = sizeof(struct hsr_tag);
296		skb_pull(skb, pull_size);
297		total_pull_size += pull_size;
298	}
299
300	/* And leave the HSR sup tag. */
301	pull_size = sizeof(struct hsr_sup_tag);
302	skb_pull(skb, pull_size);
303	total_pull_size += pull_size;
304
305	/* get HSR sup payload */
306	hsr_sp = (struct hsr_sup_payload *)skb->data;
307
308	/* Merge node_curr (registered on macaddress_B) into node_real */
309	node_db = &port_rcv->hsr->node_db;
310	node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
311	if (!node_real)
312		/* No frame received from AddrA of this node yet */
313		node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
314					 HSR_SEQNR_START - 1, true,
315					 port_rcv->type);
316	if (!node_real)
317		goto done; /* No mem */
318	if (node_real == node_curr)
319		/* Node has already been merged */
320		goto done;
321
322	/* Leave the first HSR sup payload. */
323	pull_size = sizeof(struct hsr_sup_payload);
324	skb_pull(skb, pull_size);
325	total_pull_size += pull_size;
326
327	/* Get second supervision tlv */
328	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
329	/* And check if it is a redbox mac TLV */
330	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
331		/* We could stop here after pushing hsr_sup_payload,
332		 * or proceed and allow macaddress_B and for redboxes.
333		 */
334		/* Sanity check length */
335		if (hsr_sup_tlv->HSR_TLV_length != 6)
336			goto done;
337
338		/* Leave the second HSR sup tlv. */
339		pull_size = sizeof(struct hsr_sup_tlv);
340		skb_pull(skb, pull_size);
341		total_pull_size += pull_size;
342
343		/* Get redbox mac address. */
344		hsr_sp = (struct hsr_sup_payload *)skb->data;
345
346		/* Check if redbox mac and node mac are equal. */
347		if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
348			/* This is a redbox supervision frame for a VDAN! */
349			goto done;
350		}
351	}
352
353	ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
354	spin_lock_bh(&node_real->seq_out_lock);
355	for (i = 0; i < HSR_PT_PORTS; i++) {
356		if (!node_curr->time_in_stale[i] &&
357		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
358			node_real->time_in[i] = node_curr->time_in[i];
359			node_real->time_in_stale[i] =
360						node_curr->time_in_stale[i];
361		}
362		if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
363			node_real->seq_out[i] = node_curr->seq_out[i];
364	}
365	spin_unlock_bh(&node_real->seq_out_lock);
366	node_real->addr_B_port = port_rcv->type;
367
368	spin_lock_bh(&hsr->list_lock);
369	if (!node_curr->removed) {
370		list_del_rcu(&node_curr->mac_list);
371		node_curr->removed = true;
372		kfree_rcu(node_curr, rcu_head);
373	}
374	spin_unlock_bh(&hsr->list_lock);
375
376done:
377	/* Push back here */
378	skb_push(skb, total_pull_size);
379}
380
381/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
382 *
383 * If the frame was sent by a node's B interface, replace the source
384 * address with that node's "official" address (macaddress_A) so that upper
385 * layers recognize where it came from.
386 */
387void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
388{
389	if (!skb_mac_header_was_set(skb)) {
390		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
391		return;
392	}
393
394	memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
395}
396
397/* 'skb' is a frame meant for another host.
398 * 'port' is the outgoing interface
399 *
400 * Substitute the target (dest) MAC address if necessary, so the it matches the
401 * recipient interface MAC address, regardless of whether that is the
402 * recipient's A or B interface.
403 * This is needed to keep the packets flowing through switches that learn on
404 * which "side" the different interfaces are.
405 */
406void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
407			 struct hsr_port *port)
408{
409	struct hsr_node *node_dst;
410
411	if (!skb_mac_header_was_set(skb)) {
412		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
413		return;
414	}
415
416	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
417		return;
418
419	node_dst = find_node_by_addr_A(&port->hsr->node_db,
420				       eth_hdr(skb)->h_dest);
421	if (!node_dst) {
422		if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
423			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
424		return;
425	}
426	if (port->type != node_dst->addr_B_port)
427		return;
428
429	if (is_valid_ether_addr(node_dst->macaddress_B))
430		ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
431}
432
433void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
434			   u16 sequence_nr)
435{
436	/* Don't register incoming frames without a valid sequence number. This
437	 * ensures entries of restarted nodes gets pruned so that they can
438	 * re-register and resume communications.
439	 */
440	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
441	    seq_nr_before(sequence_nr, node->seq_out[port->type]))
442		return;
443
444	node->time_in[port->type] = jiffies;
445	node->time_in_stale[port->type] = false;
446}
447
448/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
449 * ethhdr->h_source address and skb->mac_header set.
450 *
451 * Return:
452 *	 1 if frame can be shown to have been sent recently on this interface,
453 *	 0 otherwise, or
454 *	 negative error code on error
455 */
456int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
457			   u16 sequence_nr)
458{
459	spin_lock_bh(&node->seq_out_lock);
460	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
461	    time_is_after_jiffies(node->time_out[port->type] +
462	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
463		spin_unlock_bh(&node->seq_out_lock);
464		return 1;
465	}
466
467	node->time_out[port->type] = jiffies;
468	node->seq_out[port->type] = sequence_nr;
469	spin_unlock_bh(&node->seq_out_lock);
470	return 0;
471}
472
473static struct hsr_port *get_late_port(struct hsr_priv *hsr,
474				      struct hsr_node *node)
475{
476	if (node->time_in_stale[HSR_PT_SLAVE_A])
477		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
478	if (node->time_in_stale[HSR_PT_SLAVE_B])
479		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
480
481	if (time_after(node->time_in[HSR_PT_SLAVE_B],
482		       node->time_in[HSR_PT_SLAVE_A] +
483					msecs_to_jiffies(MAX_SLAVE_DIFF)))
484		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
485	if (time_after(node->time_in[HSR_PT_SLAVE_A],
486		       node->time_in[HSR_PT_SLAVE_B] +
487					msecs_to_jiffies(MAX_SLAVE_DIFF)))
488		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
489
490	return NULL;
491}
492
493/* Remove stale sequence_nr records. Called by timer every
494 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
495 */
496void hsr_prune_nodes(struct timer_list *t)
497{
498	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
499	struct hsr_node *node;
500	struct hsr_node *tmp;
501	struct hsr_port *port;
502	unsigned long timestamp;
503	unsigned long time_a, time_b;
504
505	spin_lock_bh(&hsr->list_lock);
506	list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
507		/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
508		 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
509		 * the master port. Thus the master node will be repeatedly
510		 * pruned leading to packet loss.
511		 */
512		if (hsr_addr_is_self(hsr, node->macaddress_A))
513			continue;
514
515		/* Shorthand */
516		time_a = node->time_in[HSR_PT_SLAVE_A];
517		time_b = node->time_in[HSR_PT_SLAVE_B];
518
519		/* Check for timestamps old enough to risk wrap-around */
520		if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
521			node->time_in_stale[HSR_PT_SLAVE_A] = true;
522		if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
523			node->time_in_stale[HSR_PT_SLAVE_B] = true;
524
525		/* Get age of newest frame from node.
526		 * At least one time_in is OK here; nodes get pruned long
527		 * before both time_ins can get stale
528		 */
529		timestamp = time_a;
530		if (node->time_in_stale[HSR_PT_SLAVE_A] ||
531		    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
532		    time_after(time_b, time_a)))
533			timestamp = time_b;
534
535		/* Warn of ring error only as long as we get frames at all */
536		if (time_is_after_jiffies(timestamp +
537				msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
538			rcu_read_lock();
539			port = get_late_port(hsr, node);
540			if (port)
541				hsr_nl_ringerror(hsr, node->macaddress_A, port);
542			rcu_read_unlock();
543		}
544
545		/* Prune old entries */
546		if (time_is_before_jiffies(timestamp +
547				msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
548			hsr_nl_nodedown(hsr, node->macaddress_A);
549			if (!node->removed) {
550				list_del_rcu(&node->mac_list);
551				node->removed = true;
552				/* Note that we need to free this entry later: */
553				kfree_rcu(node, rcu_head);
554			}
555		}
556	}
557	spin_unlock_bh(&hsr->list_lock);
558
559	/* Restart timer */
560	mod_timer(&hsr->prune_timer,
561		  jiffies + msecs_to_jiffies(PRUNE_PERIOD));
562}
563
564void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
565			unsigned char addr[ETH_ALEN])
566{
567	struct hsr_node *node;
568
569	if (!_pos) {
570		node = list_first_or_null_rcu(&hsr->node_db,
571					      struct hsr_node, mac_list);
572		if (node)
573			ether_addr_copy(addr, node->macaddress_A);
574		return node;
575	}
576
577	node = _pos;
578	list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
579		ether_addr_copy(addr, node->macaddress_A);
580		return node;
581	}
582
583	return NULL;
584}
585
586int hsr_get_node_data(struct hsr_priv *hsr,
587		      const unsigned char *addr,
588		      unsigned char addr_b[ETH_ALEN],
589		      unsigned int *addr_b_ifindex,
590		      int *if1_age,
591		      u16 *if1_seq,
592		      int *if2_age,
593		      u16 *if2_seq)
594{
595	struct hsr_node *node;
596	struct hsr_port *port;
597	unsigned long tdiff;
598
599	node = find_node_by_addr_A(&hsr->node_db, addr);
600	if (!node)
601		return -ENOENT;
602
603	ether_addr_copy(addr_b, node->macaddress_B);
604
605	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
606	if (node->time_in_stale[HSR_PT_SLAVE_A])
607		*if1_age = INT_MAX;
608#if HZ <= MSEC_PER_SEC
609	else if (tdiff > msecs_to_jiffies(INT_MAX))
610		*if1_age = INT_MAX;
611#endif
612	else
613		*if1_age = jiffies_to_msecs(tdiff);
614
615	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
616	if (node->time_in_stale[HSR_PT_SLAVE_B])
617		*if2_age = INT_MAX;
618#if HZ <= MSEC_PER_SEC
619	else if (tdiff > msecs_to_jiffies(INT_MAX))
620		*if2_age = INT_MAX;
621#endif
622	else
623		*if2_age = jiffies_to_msecs(tdiff);
624
625	/* Present sequence numbers as if they were incoming on interface */
626	*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
627	*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
628
629	if (node->addr_B_port != HSR_PT_NONE) {
630		port = hsr_port_get_hsr(hsr, node->addr_B_port);
631		*addr_b_ifindex = port->dev->ifindex;
632	} else {
633		*addr_b_ifindex = -1;
634	}
635
636	return 0;
637}
638