1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/cfm_bridge.h>
4#include <uapi/linux/cfm_bridge.h>
5#include "br_private_cfm.h"
6
7static struct br_cfm_mep *br_mep_find(struct net_bridge *br, u32 instance)
8{
9	struct br_cfm_mep *mep;
10
11	hlist_for_each_entry(mep, &br->mep_list, head)
12		if (mep->instance == instance)
13			return mep;
14
15	return NULL;
16}
17
18static struct br_cfm_mep *br_mep_find_ifindex(struct net_bridge *br,
19					      u32 ifindex)
20{
21	struct br_cfm_mep *mep;
22
23	hlist_for_each_entry_rcu(mep, &br->mep_list, head,
24				 lockdep_rtnl_is_held())
25		if (mep->create.ifindex == ifindex)
26			return mep;
27
28	return NULL;
29}
30
31static struct br_cfm_peer_mep *br_peer_mep_find(struct br_cfm_mep *mep,
32						u32 mepid)
33{
34	struct br_cfm_peer_mep *peer_mep;
35
36	hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head,
37				 lockdep_rtnl_is_held())
38		if (peer_mep->mepid == mepid)
39			return peer_mep;
40
41	return NULL;
42}
43
44static struct net_bridge_port *br_mep_get_port(struct net_bridge *br,
45					       u32 ifindex)
46{
47	struct net_bridge_port *port;
48
49	list_for_each_entry(port, &br->port_list, list)
50		if (port->dev->ifindex == ifindex)
51			return port;
52
53	return NULL;
54}
55
56/* Calculate the CCM interval in us. */
57static u32 interval_to_us(enum br_cfm_ccm_interval interval)
58{
59	switch (interval) {
60	case BR_CFM_CCM_INTERVAL_NONE:
61		return 0;
62	case BR_CFM_CCM_INTERVAL_3_3_MS:
63		return 3300;
64	case BR_CFM_CCM_INTERVAL_10_MS:
65		return 10 * 1000;
66	case BR_CFM_CCM_INTERVAL_100_MS:
67		return 100 * 1000;
68	case BR_CFM_CCM_INTERVAL_1_SEC:
69		return 1000 * 1000;
70	case BR_CFM_CCM_INTERVAL_10_SEC:
71		return 10 * 1000 * 1000;
72	case BR_CFM_CCM_INTERVAL_1_MIN:
73		return 60 * 1000 * 1000;
74	case BR_CFM_CCM_INTERVAL_10_MIN:
75		return 10 * 60 * 1000 * 1000;
76	}
77	return 0;
78}
79
80/* Convert the interface interval to CCM PDU value. */
81static u32 interval_to_pdu(enum br_cfm_ccm_interval interval)
82{
83	switch (interval) {
84	case BR_CFM_CCM_INTERVAL_NONE:
85		return 0;
86	case BR_CFM_CCM_INTERVAL_3_3_MS:
87		return 1;
88	case BR_CFM_CCM_INTERVAL_10_MS:
89		return 2;
90	case BR_CFM_CCM_INTERVAL_100_MS:
91		return 3;
92	case BR_CFM_CCM_INTERVAL_1_SEC:
93		return 4;
94	case BR_CFM_CCM_INTERVAL_10_SEC:
95		return 5;
96	case BR_CFM_CCM_INTERVAL_1_MIN:
97		return 6;
98	case BR_CFM_CCM_INTERVAL_10_MIN:
99		return 7;
100	}
101	return 0;
102}
103
104/* Convert the CCM PDU value to interval on interface. */
105static u32 pdu_to_interval(u32 value)
106{
107	switch (value) {
108	case 0:
109		return BR_CFM_CCM_INTERVAL_NONE;
110	case 1:
111		return BR_CFM_CCM_INTERVAL_3_3_MS;
112	case 2:
113		return BR_CFM_CCM_INTERVAL_10_MS;
114	case 3:
115		return BR_CFM_CCM_INTERVAL_100_MS;
116	case 4:
117		return BR_CFM_CCM_INTERVAL_1_SEC;
118	case 5:
119		return BR_CFM_CCM_INTERVAL_10_SEC;
120	case 6:
121		return BR_CFM_CCM_INTERVAL_1_MIN;
122	case 7:
123		return BR_CFM_CCM_INTERVAL_10_MIN;
124	}
125	return BR_CFM_CCM_INTERVAL_NONE;
126}
127
128static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
129{
130	u32 interval_us;
131
132	interval_us = interval_to_us(peer_mep->mep->cc_config.exp_interval);
133	/* Function ccm_rx_dwork must be called with 1/4
134	 * of the configured CC 'expected_interval'
135	 * in order to detect CCM defect after 3.25 interval.
136	 */
137	queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
138			   usecs_to_jiffies(interval_us / 4));
139}
140
141static void br_cfm_notify(int event, const struct net_bridge_port *port)
142{
143	u32 filter = RTEXT_FILTER_CFM_STATUS;
144
145	br_info_notify(event, port->br, NULL, filter);
146}
147
148static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
149{
150	memset(&peer_mep->cc_status, 0, sizeof(peer_mep->cc_status));
151	peer_mep->ccm_rx_count_miss = 0;
152
153	ccm_rx_timer_start(peer_mep);
154}
155
156static void cc_peer_disable(struct br_cfm_peer_mep *peer_mep)
157{
158	cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
159}
160
161static struct sk_buff *ccm_frame_build(struct br_cfm_mep *mep,
162				       const struct br_cfm_cc_ccm_tx_info *const tx_info)
163
164{
165	struct br_cfm_common_hdr *common_hdr;
166	struct net_bridge_port *b_port;
167	struct br_cfm_maid *maid;
168	u8 *itu_reserved, *e_tlv;
169	struct ethhdr *eth_hdr;
170	struct sk_buff *skb;
171	__be32 *status_tlv;
172	__be32 *snumber;
173	__be16 *mepid;
174
175	skb = dev_alloc_skb(CFM_CCM_MAX_FRAME_LENGTH);
176	if (!skb)
177		return NULL;
178
179	rcu_read_lock();
180	b_port = rcu_dereference(mep->b_port);
181	if (!b_port) {
182		kfree_skb(skb);
183		rcu_read_unlock();
184		return NULL;
185	}
186	skb->dev = b_port->dev;
187	rcu_read_unlock();
188	/* The device cannot be deleted until the work_queue functions has
189	 * completed. This function is called from ccm_tx_work_expired()
190	 * that is a work_queue functions.
191	 */
192
193	skb->protocol = htons(ETH_P_CFM);
194	skb->priority = CFM_FRAME_PRIO;
195
196	/* Ethernet header */
197	eth_hdr = skb_put(skb, sizeof(*eth_hdr));
198	ether_addr_copy(eth_hdr->h_dest, tx_info->dmac.addr);
199	ether_addr_copy(eth_hdr->h_source, mep->config.unicast_mac.addr);
200	eth_hdr->h_proto = htons(ETH_P_CFM);
201
202	/* Common CFM Header */
203	common_hdr = skb_put(skb, sizeof(*common_hdr));
204	common_hdr->mdlevel_version = mep->config.mdlevel << 5;
205	common_hdr->opcode = BR_CFM_OPCODE_CCM;
206	common_hdr->flags = (mep->rdi << 7) |
207			    interval_to_pdu(mep->cc_config.exp_interval);
208	common_hdr->tlv_offset = CFM_CCM_TLV_OFFSET;
209
210	/* Sequence number */
211	snumber = skb_put(skb, sizeof(*snumber));
212	if (tx_info->seq_no_update) {
213		*snumber = cpu_to_be32(mep->ccm_tx_snumber);
214		mep->ccm_tx_snumber += 1;
215	} else {
216		*snumber = 0;
217	}
218
219	mepid = skb_put(skb, sizeof(*mepid));
220	*mepid = cpu_to_be16((u16)mep->config.mepid);
221
222	maid = skb_put(skb, sizeof(*maid));
223	memcpy(maid->data, mep->cc_config.exp_maid.data, sizeof(maid->data));
224
225	/* ITU reserved (CFM_CCM_ITU_RESERVED_SIZE octets) */
226	itu_reserved = skb_put(skb, CFM_CCM_ITU_RESERVED_SIZE);
227	memset(itu_reserved, 0, CFM_CCM_ITU_RESERVED_SIZE);
228
229	/* Generel CFM TLV format:
230	 * TLV type:		one byte
231	 * TLV value length:	two bytes
232	 * TLV value:		'TLV value length' bytes
233	 */
234
235	/* Port status TLV. The value length is 1. Total of 4 bytes. */
236	if (tx_info->port_tlv) {
237		status_tlv = skb_put(skb, sizeof(*status_tlv));
238		*status_tlv = cpu_to_be32((CFM_PORT_STATUS_TLV_TYPE << 24) |
239					  (1 << 8) |	/* Value length */
240					  (tx_info->port_tlv_value & 0xFF));
241	}
242
243	/* Interface status TLV. The value length is 1. Total of 4 bytes. */
244	if (tx_info->if_tlv) {
245		status_tlv = skb_put(skb, sizeof(*status_tlv));
246		*status_tlv = cpu_to_be32((CFM_IF_STATUS_TLV_TYPE << 24) |
247					  (1 << 8) |	/* Value length */
248					  (tx_info->if_tlv_value & 0xFF));
249	}
250
251	/* End TLV */
252	e_tlv = skb_put(skb, sizeof(*e_tlv));
253	*e_tlv = CFM_ENDE_TLV_TYPE;
254
255	return skb;
256}
257
258static void ccm_frame_tx(struct sk_buff *skb)
259{
260	skb_reset_network_header(skb);
261	dev_queue_xmit(skb);
262}
263
264/* This function is called with the configured CC 'expected_interval'
265 * in order to drive CCM transmission when enabled.
266 */
267static void ccm_tx_work_expired(struct work_struct *work)
268{
269	struct delayed_work *del_work;
270	struct br_cfm_mep *mep;
271	struct sk_buff *skb;
272	u32 interval_us;
273
274	del_work = to_delayed_work(work);
275	mep = container_of(del_work, struct br_cfm_mep, ccm_tx_dwork);
276
277	if (time_before_eq(mep->ccm_tx_end, jiffies)) {
278		/* Transmission period has ended */
279		mep->cc_ccm_tx_info.period = 0;
280		return;
281	}
282
283	skb = ccm_frame_build(mep, &mep->cc_ccm_tx_info);
284	if (skb)
285		ccm_frame_tx(skb);
286
287	interval_us = interval_to_us(mep->cc_config.exp_interval);
288	queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
289			   usecs_to_jiffies(interval_us));
290}
291
292/* This function is called with 1/4 of the configured CC 'expected_interval'
293 * in order to detect CCM defect after 3.25 interval.
294 */
295static void ccm_rx_work_expired(struct work_struct *work)
296{
297	struct br_cfm_peer_mep *peer_mep;
298	struct net_bridge_port *b_port;
299	struct delayed_work *del_work;
300
301	del_work = to_delayed_work(work);
302	peer_mep = container_of(del_work, struct br_cfm_peer_mep, ccm_rx_dwork);
303
304	/* After 13 counts (4 * 3,25) then 3.25 intervals are expired */
305	if (peer_mep->ccm_rx_count_miss < 13) {
306		/* 3.25 intervals are NOT expired without CCM reception */
307		peer_mep->ccm_rx_count_miss++;
308
309		/* Start timer again */
310		ccm_rx_timer_start(peer_mep);
311	} else {
312		/* 3.25 intervals are expired without CCM reception.
313		 * CCM defect detected
314		 */
315		peer_mep->cc_status.ccm_defect = true;
316
317		/* Change in CCM defect status - notify */
318		rcu_read_lock();
319		b_port = rcu_dereference(peer_mep->mep->b_port);
320		if (b_port)
321			br_cfm_notify(RTM_NEWLINK, b_port);
322		rcu_read_unlock();
323	}
324}
325
326static u32 ccm_tlv_extract(struct sk_buff *skb, u32 index,
327			   struct br_cfm_peer_mep *peer_mep)
328{
329	__be32 *s_tlv;
330	__be32 _s_tlv;
331	u32 h_s_tlv;
332	u8 *e_tlv;
333	u8 _e_tlv;
334
335	e_tlv = skb_header_pointer(skb, index, sizeof(_e_tlv), &_e_tlv);
336	if (!e_tlv)
337		return 0;
338
339	/* TLV is present - get the status TLV */
340	s_tlv = skb_header_pointer(skb,
341				   index,
342				   sizeof(_s_tlv), &_s_tlv);
343	if (!s_tlv)
344		return 0;
345
346	h_s_tlv = ntohl(*s_tlv);
347	if ((h_s_tlv >> 24) == CFM_IF_STATUS_TLV_TYPE) {
348		/* Interface status TLV */
349		peer_mep->cc_status.tlv_seen = true;
350		peer_mep->cc_status.if_tlv_value = (h_s_tlv & 0xFF);
351	}
352
353	if ((h_s_tlv >> 24) == CFM_PORT_STATUS_TLV_TYPE) {
354		/* Port status TLV */
355		peer_mep->cc_status.tlv_seen = true;
356		peer_mep->cc_status.port_tlv_value = (h_s_tlv & 0xFF);
357	}
358
359	/* The Sender ID TLV is not handled */
360	/* The Organization-Specific TLV is not handled */
361
362	/* Return the length of this tlv.
363	 * This is the length of the value field plus 3 bytes for size of type
364	 * field and length field
365	 */
366	return ((h_s_tlv >> 8) & 0xFFFF) + 3;
367}
368
369/* note: already called with rcu_read_lock */
370static int br_cfm_frame_rx(struct net_bridge_port *port, struct sk_buff *skb)
371{
372	u32 mdlevel, interval, size, index, max;
373	const struct br_cfm_common_hdr *hdr;
374	struct br_cfm_peer_mep *peer_mep;
375	const struct br_cfm_maid *maid;
376	struct br_cfm_common_hdr _hdr;
377	struct br_cfm_maid _maid;
378	struct br_cfm_mep *mep;
379	struct net_bridge *br;
380	__be32 *snumber;
381	__be32 _snumber;
382	__be16 *mepid;
383	__be16 _mepid;
384
385	if (port->state == BR_STATE_DISABLED)
386		return 0;
387
388	hdr = skb_header_pointer(skb, 0, sizeof(_hdr), &_hdr);
389	if (!hdr)
390		return 1;
391
392	br = port->br;
393	mep = br_mep_find_ifindex(br, port->dev->ifindex);
394	if (unlikely(!mep))
395		/* No MEP on this port - must be forwarded */
396		return 0;
397
398	mdlevel = hdr->mdlevel_version >> 5;
399	if (mdlevel > mep->config.mdlevel)
400		/* The level is above this MEP level - must be forwarded */
401		return 0;
402
403	if ((hdr->mdlevel_version & 0x1F) != 0) {
404		/* Invalid version */
405		mep->status.version_unexp_seen = true;
406		return 1;
407	}
408
409	if (mdlevel < mep->config.mdlevel) {
410		/* The level is below this MEP level */
411		mep->status.rx_level_low_seen = true;
412		return 1;
413	}
414
415	if (hdr->opcode == BR_CFM_OPCODE_CCM) {
416		/* CCM PDU received. */
417		/* MA ID is after common header + sequence number + MEP ID */
418		maid = skb_header_pointer(skb,
419					  CFM_CCM_PDU_MAID_OFFSET,
420					  sizeof(_maid), &_maid);
421		if (!maid)
422			return 1;
423		if (memcmp(maid->data, mep->cc_config.exp_maid.data,
424			   sizeof(maid->data)))
425			/* MA ID not as expected */
426			return 1;
427
428		/* MEP ID is after common header + sequence number */
429		mepid = skb_header_pointer(skb,
430					   CFM_CCM_PDU_MEPID_OFFSET,
431					   sizeof(_mepid), &_mepid);
432		if (!mepid)
433			return 1;
434		peer_mep = br_peer_mep_find(mep, (u32)ntohs(*mepid));
435		if (!peer_mep)
436			return 1;
437
438		/* Interval is in common header flags */
439		interval = hdr->flags & 0x07;
440		if (mep->cc_config.exp_interval != pdu_to_interval(interval))
441			/* Interval not as expected */
442			return 1;
443
444		/* A valid CCM frame is received */
445		if (peer_mep->cc_status.ccm_defect) {
446			peer_mep->cc_status.ccm_defect = false;
447
448			/* Change in CCM defect status - notify */
449			br_cfm_notify(RTM_NEWLINK, port);
450
451			/* Start CCM RX timer */
452			ccm_rx_timer_start(peer_mep);
453		}
454
455		peer_mep->cc_status.seen = true;
456		peer_mep->ccm_rx_count_miss = 0;
457
458		/* RDI is in common header flags */
459		peer_mep->cc_status.rdi = (hdr->flags & 0x80) ? true : false;
460
461		/* Sequence number is after common header */
462		snumber = skb_header_pointer(skb,
463					     CFM_CCM_PDU_SEQNR_OFFSET,
464					     sizeof(_snumber), &_snumber);
465		if (!snumber)
466			return 1;
467		if (ntohl(*snumber) != (mep->ccm_rx_snumber + 1))
468			/* Unexpected sequence number */
469			peer_mep->cc_status.seq_unexp_seen = true;
470
471		mep->ccm_rx_snumber = ntohl(*snumber);
472
473		/* TLV end is after common header + sequence number + MEP ID +
474		 * MA ID + ITU reserved
475		 */
476		index = CFM_CCM_PDU_TLV_OFFSET;
477		max = 0;
478		do { /* Handle all TLVs */
479			size = ccm_tlv_extract(skb, index, peer_mep);
480			index += size;
481			max += 1;
482		} while (size != 0 && max < 4); /* Max four TLVs possible */
483
484		return 1;
485	}
486
487	mep->status.opcode_unexp_seen = true;
488
489	return 1;
490}
491
492static struct br_frame_type cfm_frame_type __read_mostly = {
493	.type = cpu_to_be16(ETH_P_CFM),
494	.frame_handler = br_cfm_frame_rx,
495};
496
497int br_cfm_mep_create(struct net_bridge *br,
498		      const u32 instance,
499		      struct br_cfm_mep_create *const create,
500		      struct netlink_ext_ack *extack)
501{
502	struct net_bridge_port *p;
503	struct br_cfm_mep *mep;
504
505	ASSERT_RTNL();
506
507	if (create->domain == BR_CFM_VLAN) {
508		NL_SET_ERR_MSG_MOD(extack,
509				   "VLAN domain not supported");
510		return -EINVAL;
511	}
512	if (create->domain != BR_CFM_PORT) {
513		NL_SET_ERR_MSG_MOD(extack,
514				   "Invalid domain value");
515		return -EINVAL;
516	}
517	if (create->direction == BR_CFM_MEP_DIRECTION_UP) {
518		NL_SET_ERR_MSG_MOD(extack,
519				   "Up-MEP not supported");
520		return -EINVAL;
521	}
522	if (create->direction != BR_CFM_MEP_DIRECTION_DOWN) {
523		NL_SET_ERR_MSG_MOD(extack,
524				   "Invalid direction value");
525		return -EINVAL;
526	}
527	p = br_mep_get_port(br, create->ifindex);
528	if (!p) {
529		NL_SET_ERR_MSG_MOD(extack,
530				   "Port is not related to bridge");
531		return -EINVAL;
532	}
533	mep = br_mep_find(br, instance);
534	if (mep) {
535		NL_SET_ERR_MSG_MOD(extack,
536				   "MEP instance already exists");
537		return -EEXIST;
538	}
539
540	/* In PORT domain only one instance can be created per port */
541	if (create->domain == BR_CFM_PORT) {
542		mep = br_mep_find_ifindex(br, create->ifindex);
543		if (mep) {
544			NL_SET_ERR_MSG_MOD(extack,
545					   "Only one Port MEP on a port allowed");
546			return -EINVAL;
547		}
548	}
549
550	mep = kzalloc(sizeof(*mep), GFP_KERNEL);
551	if (!mep)
552		return -ENOMEM;
553
554	mep->create = *create;
555	mep->instance = instance;
556	rcu_assign_pointer(mep->b_port, p);
557
558	INIT_HLIST_HEAD(&mep->peer_mep_list);
559	INIT_DELAYED_WORK(&mep->ccm_tx_dwork, ccm_tx_work_expired);
560
561	if (hlist_empty(&br->mep_list))
562		br_add_frame(br, &cfm_frame_type);
563
564	hlist_add_tail_rcu(&mep->head, &br->mep_list);
565
566	return 0;
567}
568
569static void mep_delete_implementation(struct net_bridge *br,
570				      struct br_cfm_mep *mep)
571{
572	struct br_cfm_peer_mep *peer_mep;
573	struct hlist_node *n_store;
574
575	ASSERT_RTNL();
576
577	/* Empty and free peer MEP list */
578	hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) {
579		cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
580		hlist_del_rcu(&peer_mep->head);
581		kfree_rcu(peer_mep, rcu);
582	}
583
584	cancel_delayed_work_sync(&mep->ccm_tx_dwork);
585
586	RCU_INIT_POINTER(mep->b_port, NULL);
587	hlist_del_rcu(&mep->head);
588	kfree_rcu(mep, rcu);
589
590	if (hlist_empty(&br->mep_list))
591		br_del_frame(br, &cfm_frame_type);
592}
593
594int br_cfm_mep_delete(struct net_bridge *br,
595		      const u32 instance,
596		      struct netlink_ext_ack *extack)
597{
598	struct br_cfm_mep *mep;
599
600	ASSERT_RTNL();
601
602	mep = br_mep_find(br, instance);
603	if (!mep) {
604		NL_SET_ERR_MSG_MOD(extack,
605				   "MEP instance does not exists");
606		return -ENOENT;
607	}
608
609	mep_delete_implementation(br, mep);
610
611	return 0;
612}
613
614int br_cfm_mep_config_set(struct net_bridge *br,
615			  const u32 instance,
616			  const struct br_cfm_mep_config *const config,
617			  struct netlink_ext_ack *extack)
618{
619	struct br_cfm_mep *mep;
620
621	ASSERT_RTNL();
622
623	mep = br_mep_find(br, instance);
624	if (!mep) {
625		NL_SET_ERR_MSG_MOD(extack,
626				   "MEP instance does not exists");
627		return -ENOENT;
628	}
629
630	mep->config = *config;
631
632	return 0;
633}
634
635int br_cfm_cc_config_set(struct net_bridge *br,
636			 const u32 instance,
637			 const struct br_cfm_cc_config *const config,
638			 struct netlink_ext_ack *extack)
639{
640	struct br_cfm_peer_mep *peer_mep;
641	struct br_cfm_mep *mep;
642
643	ASSERT_RTNL();
644
645	mep = br_mep_find(br, instance);
646	if (!mep) {
647		NL_SET_ERR_MSG_MOD(extack,
648				   "MEP instance does not exists");
649		return -ENOENT;
650	}
651
652	/* Check for no change in configuration */
653	if (memcmp(config, &mep->cc_config, sizeof(*config)) == 0)
654		return 0;
655
656	if (config->enable && !mep->cc_config.enable)
657		/* CC is enabled */
658		hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
659			cc_peer_enable(peer_mep);
660
661	if (!config->enable && mep->cc_config.enable)
662		/* CC is disabled */
663		hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
664			cc_peer_disable(peer_mep);
665
666	mep->cc_config = *config;
667	mep->ccm_rx_snumber = 0;
668	mep->ccm_tx_snumber = 1;
669
670	return 0;
671}
672
673int br_cfm_cc_peer_mep_add(struct net_bridge *br, const u32 instance,
674			   u32 mepid,
675			   struct netlink_ext_ack *extack)
676{
677	struct br_cfm_peer_mep *peer_mep;
678	struct br_cfm_mep *mep;
679
680	ASSERT_RTNL();
681
682	mep = br_mep_find(br, instance);
683	if (!mep) {
684		NL_SET_ERR_MSG_MOD(extack,
685				   "MEP instance does not exists");
686		return -ENOENT;
687	}
688
689	peer_mep = br_peer_mep_find(mep, mepid);
690	if (peer_mep) {
691		NL_SET_ERR_MSG_MOD(extack,
692				   "Peer MEP-ID already exists");
693		return -EEXIST;
694	}
695
696	peer_mep = kzalloc(sizeof(*peer_mep), GFP_KERNEL);
697	if (!peer_mep)
698		return -ENOMEM;
699
700	peer_mep->mepid = mepid;
701	peer_mep->mep = mep;
702	INIT_DELAYED_WORK(&peer_mep->ccm_rx_dwork, ccm_rx_work_expired);
703
704	if (mep->cc_config.enable)
705		cc_peer_enable(peer_mep);
706
707	hlist_add_tail_rcu(&peer_mep->head, &mep->peer_mep_list);
708
709	return 0;
710}
711
712int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
713			      u32 mepid,
714			      struct netlink_ext_ack *extack)
715{
716	struct br_cfm_peer_mep *peer_mep;
717	struct br_cfm_mep *mep;
718
719	ASSERT_RTNL();
720
721	mep = br_mep_find(br, instance);
722	if (!mep) {
723		NL_SET_ERR_MSG_MOD(extack,
724				   "MEP instance does not exists");
725		return -ENOENT;
726	}
727
728	peer_mep = br_peer_mep_find(mep, mepid);
729	if (!peer_mep) {
730		NL_SET_ERR_MSG_MOD(extack,
731				   "Peer MEP-ID does not exists");
732		return -ENOENT;
733	}
734
735	cc_peer_disable(peer_mep);
736
737	hlist_del_rcu(&peer_mep->head);
738	kfree_rcu(peer_mep, rcu);
739
740	return 0;
741}
742
743int br_cfm_cc_rdi_set(struct net_bridge *br, const u32 instance,
744		      const bool rdi, struct netlink_ext_ack *extack)
745{
746	struct br_cfm_mep *mep;
747
748	ASSERT_RTNL();
749
750	mep = br_mep_find(br, instance);
751	if (!mep) {
752		NL_SET_ERR_MSG_MOD(extack,
753				   "MEP instance does not exists");
754		return -ENOENT;
755	}
756
757	mep->rdi = rdi;
758
759	return 0;
760}
761
762int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
763		     const struct br_cfm_cc_ccm_tx_info *const tx_info,
764		     struct netlink_ext_ack *extack)
765{
766	struct br_cfm_mep *mep;
767
768	ASSERT_RTNL();
769
770	mep = br_mep_find(br, instance);
771	if (!mep) {
772		NL_SET_ERR_MSG_MOD(extack,
773				   "MEP instance does not exists");
774		return -ENOENT;
775	}
776
777	if (memcmp(tx_info, &mep->cc_ccm_tx_info, sizeof(*tx_info)) == 0) {
778		/* No change in tx_info. */
779		if (mep->cc_ccm_tx_info.period == 0)
780			/* Transmission is not enabled - just return */
781			return 0;
782
783		/* Transmission is ongoing, the end time is recalculated */
784		mep->ccm_tx_end = jiffies +
785				  usecs_to_jiffies(tx_info->period * 1000000);
786		return 0;
787	}
788
789	if (tx_info->period == 0 && mep->cc_ccm_tx_info.period == 0)
790		/* Some change in info and transmission is not ongoing */
791		goto save;
792
793	if (tx_info->period != 0 && mep->cc_ccm_tx_info.period != 0) {
794		/* Some change in info and transmission is ongoing
795		 * The end time is recalculated
796		 */
797		mep->ccm_tx_end = jiffies +
798				  usecs_to_jiffies(tx_info->period * 1000000);
799
800		goto save;
801	}
802
803	if (tx_info->period == 0 && mep->cc_ccm_tx_info.period != 0) {
804		cancel_delayed_work_sync(&mep->ccm_tx_dwork);
805		goto save;
806	}
807
808	/* Start delayed work to transmit CCM frames. It is done with zero delay
809	 * to send first frame immediately
810	 */
811	mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
812	queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
813
814save:
815	mep->cc_ccm_tx_info = *tx_info;
816
817	return 0;
818}
819
820int br_cfm_mep_count(struct net_bridge *br, u32 *count)
821{
822	struct br_cfm_mep *mep;
823
824	*count = 0;
825
826	rcu_read_lock();
827	hlist_for_each_entry_rcu(mep, &br->mep_list, head)
828		*count += 1;
829	rcu_read_unlock();
830
831	return 0;
832}
833
834int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count)
835{
836	struct br_cfm_peer_mep *peer_mep;
837	struct br_cfm_mep *mep;
838
839	*count = 0;
840
841	rcu_read_lock();
842	hlist_for_each_entry_rcu(mep, &br->mep_list, head)
843		hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head)
844			*count += 1;
845	rcu_read_unlock();
846
847	return 0;
848}
849
850bool br_cfm_created(struct net_bridge *br)
851{
852	return !hlist_empty(&br->mep_list);
853}
854
855/* Deletes the CFM instances on a specific bridge port
856 */
857void br_cfm_port_del(struct net_bridge *br, struct net_bridge_port *port)
858{
859	struct hlist_node *n_store;
860	struct br_cfm_mep *mep;
861
862	ASSERT_RTNL();
863
864	hlist_for_each_entry_safe(mep, n_store, &br->mep_list, head)
865		if (mep->create.ifindex == port->dev->ifindex)
866			mep_delete_implementation(br, mep);
867}
868