1/*
2 * Copyright (c) 2006 Intel Corporation.�� All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/bitops.h>
38#include <linux/random.h>
39
40#include <rdma/ib_cache.h>
41#include "sa.h"
42
43static void mcast_add_one(struct ib_device *device);
44static void mcast_remove_one(struct ib_device *device);
45
46static struct ib_client mcast_client = {
47	.name   = "ib_multicast",
48	.add    = mcast_add_one,
49	.remove = mcast_remove_one
50};
51
52static struct ib_sa_client	sa_client;
53static struct workqueue_struct	*mcast_wq;
54static union ib_gid mgid0;
55
56struct mcast_device;
57
58struct mcast_port {
59	struct mcast_device	*dev;
60	spinlock_t		lock;
61	struct rb_root		table;
62	atomic_t		refcount;
63	struct completion	comp;
64	u8			port_num;
65};
66
67struct mcast_device {
68	struct ib_device	*device;
69	struct ib_event_handler	event_handler;
70	int			start_port;
71	int			end_port;
72	struct mcast_port	port[0];
73};
74
75enum mcast_state {
76	MCAST_IDLE,
77	MCAST_JOINING,
78	MCAST_MEMBER,
79	MCAST_BUSY,
80	MCAST_ERROR
81};
82
83struct mcast_member;
84
85struct mcast_group {
86	struct ib_sa_mcmember_rec rec;
87	struct rb_node		node;
88	struct mcast_port	*port;
89	spinlock_t		lock;
90	struct work_struct	work;
91	struct list_head	pending_list;
92	struct list_head	active_list;
93	struct mcast_member	*last_join;
94	int			members[3];
95	atomic_t		refcount;
96	enum mcast_state	state;
97	struct ib_sa_query	*query;
98	int			query_id;
99};
100
101struct mcast_member {
102	struct ib_sa_multicast	multicast;
103	struct ib_sa_client	*client;
104	struct mcast_group	*group;
105	struct list_head	list;
106	enum mcast_state	state;
107	atomic_t		refcount;
108	struct completion	comp;
109};
110
111static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
112			 void *context);
113static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
114			  void *context);
115
116static struct mcast_group *mcast_find(struct mcast_port *port,
117				      union ib_gid *mgid)
118{
119	struct rb_node *node = port->table.rb_node;
120	struct mcast_group *group;
121	int ret;
122
123	while (node) {
124		group = rb_entry(node, struct mcast_group, node);
125		ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
126		if (!ret)
127			return group;
128
129		if (ret < 0)
130			node = node->rb_left;
131		else
132			node = node->rb_right;
133	}
134	return NULL;
135}
136
137static struct mcast_group *mcast_insert(struct mcast_port *port,
138					struct mcast_group *group,
139					int allow_duplicates)
140{
141	struct rb_node **link = &port->table.rb_node;
142	struct rb_node *parent = NULL;
143	struct mcast_group *cur_group;
144	int ret;
145
146	while (*link) {
147		parent = *link;
148		cur_group = rb_entry(parent, struct mcast_group, node);
149
150		ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
151			     sizeof group->rec.mgid);
152		if (ret < 0)
153			link = &(*link)->rb_left;
154		else if (ret > 0)
155			link = &(*link)->rb_right;
156		else if (allow_duplicates)
157			link = &(*link)->rb_left;
158		else
159			return cur_group;
160	}
161	rb_link_node(&group->node, parent, link);
162	rb_insert_color(&group->node, &port->table);
163	return NULL;
164}
165
166static void deref_port(struct mcast_port *port)
167{
168	if (atomic_dec_and_test(&port->refcount))
169		complete(&port->comp);
170}
171
172static void release_group(struct mcast_group *group)
173{
174	struct mcast_port *port = group->port;
175	unsigned long flags;
176
177	spin_lock_irqsave(&port->lock, flags);
178	if (atomic_dec_and_test(&group->refcount)) {
179		rb_erase(&group->node, &port->table);
180		spin_unlock_irqrestore(&port->lock, flags);
181		kfree(group);
182		deref_port(port);
183	} else
184		spin_unlock_irqrestore(&port->lock, flags);
185}
186
187static void deref_member(struct mcast_member *member)
188{
189	if (atomic_dec_and_test(&member->refcount))
190		complete(&member->comp);
191}
192
193static void queue_join(struct mcast_member *member)
194{
195	struct mcast_group *group = member->group;
196	unsigned long flags;
197
198	spin_lock_irqsave(&group->lock, flags);
199	list_add(&member->list, &group->pending_list);
200	if (group->state == MCAST_IDLE) {
201		group->state = MCAST_BUSY;
202		atomic_inc(&group->refcount);
203		queue_work(mcast_wq, &group->work);
204	}
205	spin_unlock_irqrestore(&group->lock, flags);
206}
207
208/*
209 * A multicast group has three types of members: full member, non member, and
210 * send only member.  We need to keep track of the number of members of each
211 * type based on their join state.  Adjust the number of members the belong to
212 * the specified join states.
213 */
214static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
215{
216	int i;
217
218	for (i = 0; i < 3; i++, join_state >>= 1)
219		if (join_state & 0x1)
220			group->members[i] += inc;
221}
222
223/*
224 * If a multicast group has zero members left for a particular join state, but
225 * the group is still a member with the SA, we need to leave that join state.
226 * Determine which join states we still belong to, but that do not have any
227 * active members.
228 */
229static u8 get_leave_state(struct mcast_group *group)
230{
231	u8 leave_state = 0;
232	int i;
233
234	for (i = 0; i < 3; i++)
235		if (!group->members[i])
236			leave_state |= (0x1 << i);
237
238	return leave_state & group->rec.join_state;
239}
240
241static int check_selector(ib_sa_comp_mask comp_mask,
242			  ib_sa_comp_mask selector_mask,
243			  ib_sa_comp_mask value_mask,
244			  u8 selector, u8 src_value, u8 dst_value)
245{
246	int err;
247
248	if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
249		return 0;
250
251	switch (selector) {
252	case IB_SA_GT:
253		err = (src_value <= dst_value);
254		break;
255	case IB_SA_LT:
256		err = (src_value >= dst_value);
257		break;
258	case IB_SA_EQ:
259		err = (src_value != dst_value);
260		break;
261	default:
262		err = 0;
263		break;
264	}
265
266	return err;
267}
268
269static int cmp_rec(struct ib_sa_mcmember_rec *src,
270		   struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
271{
272	/* MGID must already match */
273
274	if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
275	    memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
276		return -EINVAL;
277	if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
278		return -EINVAL;
279	if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
280		return -EINVAL;
281	if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
282			   IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
283			   src->mtu, dst->mtu))
284		return -EINVAL;
285	if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
286	    src->traffic_class != dst->traffic_class)
287		return -EINVAL;
288	if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
289		return -EINVAL;
290	if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
291			   IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
292			   src->rate, dst->rate))
293		return -EINVAL;
294	if (check_selector(comp_mask,
295			   IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
296			   IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
297			   dst->packet_life_time_selector,
298			   src->packet_life_time, dst->packet_life_time))
299		return -EINVAL;
300	if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
301		return -EINVAL;
302	if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
303	    src->flow_label != dst->flow_label)
304		return -EINVAL;
305	if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
306	    src->hop_limit != dst->hop_limit)
307		return -EINVAL;
308	if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
309		return -EINVAL;
310
311	/* join_state checked separately, proxy_join ignored */
312
313	return 0;
314}
315
316static int send_join(struct mcast_group *group, struct mcast_member *member)
317{
318	struct mcast_port *port = group->port;
319	int ret;
320
321	group->last_join = member;
322	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
323				       port->port_num, IB_MGMT_METHOD_SET,
324				       &member->multicast.rec,
325				       member->multicast.comp_mask,
326				       3000, GFP_KERNEL, join_handler, group,
327				       &group->query);
328	if (ret >= 0) {
329		group->query_id = ret;
330		ret = 0;
331	}
332	return ret;
333}
334
335static int send_leave(struct mcast_group *group, u8 leave_state)
336{
337	struct mcast_port *port = group->port;
338	struct ib_sa_mcmember_rec rec;
339	int ret;
340
341	rec = group->rec;
342	rec.join_state = leave_state;
343
344	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
345				       port->port_num, IB_SA_METHOD_DELETE, &rec,
346				       IB_SA_MCMEMBER_REC_MGID     |
347				       IB_SA_MCMEMBER_REC_PORT_GID |
348				       IB_SA_MCMEMBER_REC_JOIN_STATE,
349				       3000, GFP_KERNEL, leave_handler,
350				       group, &group->query);
351	if (ret >= 0) {
352		group->query_id = ret;
353		ret = 0;
354	}
355	return ret;
356}
357
358static void join_group(struct mcast_group *group, struct mcast_member *member,
359		       u8 join_state)
360{
361	member->state = MCAST_MEMBER;
362	adjust_membership(group, join_state, 1);
363	group->rec.join_state |= join_state;
364	member->multicast.rec = group->rec;
365	member->multicast.rec.join_state = join_state;
366	list_move(&member->list, &group->active_list);
367}
368
369static int fail_join(struct mcast_group *group, struct mcast_member *member,
370		     int status)
371{
372	spin_lock_irq(&group->lock);
373	list_del_init(&member->list);
374	spin_unlock_irq(&group->lock);
375	return member->multicast.callback(status, &member->multicast);
376}
377
378static void process_group_error(struct mcast_group *group)
379{
380	struct mcast_member *member;
381	int ret;
382
383	spin_lock_irq(&group->lock);
384	while (!list_empty(&group->active_list)) {
385		member = list_entry(group->active_list.next,
386				    struct mcast_member, list);
387		atomic_inc(&member->refcount);
388		list_del_init(&member->list);
389		adjust_membership(group, member->multicast.rec.join_state, -1);
390		member->state = MCAST_ERROR;
391		spin_unlock_irq(&group->lock);
392
393		ret = member->multicast.callback(-ENETRESET,
394						 &member->multicast);
395		deref_member(member);
396		if (ret)
397			ib_sa_free_multicast(&member->multicast);
398		spin_lock_irq(&group->lock);
399	}
400
401	group->rec.join_state = 0;
402	group->state = MCAST_BUSY;
403	spin_unlock_irq(&group->lock);
404}
405
406static void mcast_work_handler(struct work_struct *work)
407{
408	struct mcast_group *group;
409	struct mcast_member *member;
410	struct ib_sa_multicast *multicast;
411	int status, ret;
412	u8 join_state;
413
414	group = container_of(work, typeof(*group), work);
415retest:
416	spin_lock_irq(&group->lock);
417	while (!list_empty(&group->pending_list) ||
418	       (group->state == MCAST_ERROR)) {
419
420		if (group->state == MCAST_ERROR) {
421			spin_unlock_irq(&group->lock);
422			process_group_error(group);
423			goto retest;
424		}
425
426		member = list_entry(group->pending_list.next,
427				    struct mcast_member, list);
428		multicast = &member->multicast;
429		join_state = multicast->rec.join_state;
430		atomic_inc(&member->refcount);
431
432		if (join_state == (group->rec.join_state & join_state)) {
433			status = cmp_rec(&group->rec, &multicast->rec,
434					 multicast->comp_mask);
435			if (!status)
436				join_group(group, member, join_state);
437			else
438				list_del_init(&member->list);
439			spin_unlock_irq(&group->lock);
440			ret = multicast->callback(status, multicast);
441		} else {
442			spin_unlock_irq(&group->lock);
443			status = send_join(group, member);
444			if (!status) {
445				deref_member(member);
446				return;
447			}
448			ret = fail_join(group, member, status);
449		}
450
451		deref_member(member);
452		if (ret)
453			ib_sa_free_multicast(&member->multicast);
454		spin_lock_irq(&group->lock);
455	}
456
457	join_state = get_leave_state(group);
458	if (join_state) {
459		group->rec.join_state &= ~join_state;
460		spin_unlock_irq(&group->lock);
461		if (send_leave(group, join_state))
462			goto retest;
463	} else {
464		group->state = MCAST_IDLE;
465		spin_unlock_irq(&group->lock);
466		release_group(group);
467	}
468}
469
470/*
471 * Fail a join request if it is still active - at the head of the pending queue.
472 */
473static void process_join_error(struct mcast_group *group, int status)
474{
475	struct mcast_member *member;
476	int ret;
477
478	spin_lock_irq(&group->lock);
479	member = list_entry(group->pending_list.next,
480			    struct mcast_member, list);
481	if (group->last_join == member) {
482		atomic_inc(&member->refcount);
483		list_del_init(&member->list);
484		spin_unlock_irq(&group->lock);
485		ret = member->multicast.callback(status, &member->multicast);
486		deref_member(member);
487		if (ret)
488			ib_sa_free_multicast(&member->multicast);
489	} else
490		spin_unlock_irq(&group->lock);
491}
492
493static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
494			 void *context)
495{
496	struct mcast_group *group = context;
497
498	if (status)
499		process_join_error(group, status);
500	else {
501		spin_lock_irq(&group->port->lock);
502		group->rec = *rec;
503		if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
504			rb_erase(&group->node, &group->port->table);
505			mcast_insert(group->port, group, 1);
506		}
507		spin_unlock_irq(&group->port->lock);
508	}
509	mcast_work_handler(&group->work);
510}
511
512static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
513			  void *context)
514{
515	struct mcast_group *group = context;
516
517	mcast_work_handler(&group->work);
518}
519
520static struct mcast_group *acquire_group(struct mcast_port *port,
521					 union ib_gid *mgid, gfp_t gfp_mask)
522{
523	struct mcast_group *group, *cur_group;
524	unsigned long flags;
525	int is_mgid0;
526
527	is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
528	if (!is_mgid0) {
529		spin_lock_irqsave(&port->lock, flags);
530		group = mcast_find(port, mgid);
531		if (group)
532			goto found;
533		spin_unlock_irqrestore(&port->lock, flags);
534	}
535
536	group = kzalloc(sizeof *group, gfp_mask);
537	if (!group)
538		return NULL;
539
540	group->port = port;
541	group->rec.mgid = *mgid;
542	INIT_LIST_HEAD(&group->pending_list);
543	INIT_LIST_HEAD(&group->active_list);
544	INIT_WORK(&group->work, mcast_work_handler);
545	spin_lock_init(&group->lock);
546
547	spin_lock_irqsave(&port->lock, flags);
548	cur_group = mcast_insert(port, group, is_mgid0);
549	if (cur_group) {
550		kfree(group);
551		group = cur_group;
552	} else
553		atomic_inc(&port->refcount);
554found:
555	atomic_inc(&group->refcount);
556	spin_unlock_irqrestore(&port->lock, flags);
557	return group;
558}
559
560/*
561 * We serialize all join requests to a single group to make our lives much
562 * easier.  Otherwise, two users could try to join the same group
563 * simultaneously, with different configurations, one could leave while the
564 * join is in progress, etc., which makes locking around error recovery
565 * difficult.
566 */
567struct ib_sa_multicast *
568ib_sa_join_multicast(struct ib_sa_client *client,
569		     struct ib_device *device, u8 port_num,
570		     struct ib_sa_mcmember_rec *rec,
571		     ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
572		     int (*callback)(int status,
573				     struct ib_sa_multicast *multicast),
574		     void *context)
575{
576	struct mcast_device *dev;
577	struct mcast_member *member;
578	struct ib_sa_multicast *multicast;
579	int ret;
580
581	dev = ib_get_client_data(device, &mcast_client);
582	if (!dev)
583		return ERR_PTR(-ENODEV);
584
585	member = kmalloc(sizeof *member, gfp_mask);
586	if (!member)
587		return ERR_PTR(-ENOMEM);
588
589	ib_sa_client_get(client);
590	member->client = client;
591	member->multicast.rec = *rec;
592	member->multicast.comp_mask = comp_mask;
593	member->multicast.callback = callback;
594	member->multicast.context = context;
595	init_completion(&member->comp);
596	atomic_set(&member->refcount, 1);
597	member->state = MCAST_JOINING;
598
599	member->group = acquire_group(&dev->port[port_num - dev->start_port],
600				      &rec->mgid, gfp_mask);
601	if (!member->group) {
602		ret = -ENOMEM;
603		goto err;
604	}
605
606	/*
607	 * The user will get the multicast structure in their callback.  They
608	 * could then free the multicast structure before we can return from
609	 * this routine.  So we save the pointer to return before queuing
610	 * any callback.
611	 */
612	multicast = &member->multicast;
613	queue_join(member);
614	return multicast;
615
616err:
617	ib_sa_client_put(client);
618	kfree(member);
619	return ERR_PTR(ret);
620}
621EXPORT_SYMBOL(ib_sa_join_multicast);
622
623void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
624{
625	struct mcast_member *member;
626	struct mcast_group *group;
627
628	member = container_of(multicast, struct mcast_member, multicast);
629	group = member->group;
630
631	spin_lock_irq(&group->lock);
632	if (member->state == MCAST_MEMBER)
633		adjust_membership(group, multicast->rec.join_state, -1);
634
635	list_del_init(&member->list);
636
637	if (group->state == MCAST_IDLE) {
638		group->state = MCAST_BUSY;
639		spin_unlock_irq(&group->lock);
640		/* Continue to hold reference on group until callback */
641		queue_work(mcast_wq, &group->work);
642	} else {
643		spin_unlock_irq(&group->lock);
644		release_group(group);
645	}
646
647	deref_member(member);
648	wait_for_completion(&member->comp);
649	ib_sa_client_put(member->client);
650	kfree(member);
651}
652EXPORT_SYMBOL(ib_sa_free_multicast);
653
654int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
655			   union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
656{
657	struct mcast_device *dev;
658	struct mcast_port *port;
659	struct mcast_group *group;
660	unsigned long flags;
661	int ret = 0;
662
663	dev = ib_get_client_data(device, &mcast_client);
664	if (!dev)
665		return -ENODEV;
666
667	port = &dev->port[port_num - dev->start_port];
668	spin_lock_irqsave(&port->lock, flags);
669	group = mcast_find(port, mgid);
670	if (group)
671		*rec = group->rec;
672	else
673		ret = -EADDRNOTAVAIL;
674	spin_unlock_irqrestore(&port->lock, flags);
675
676	return ret;
677}
678EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
679
680int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
681			     struct ib_sa_mcmember_rec *rec,
682			     struct ib_ah_attr *ah_attr)
683{
684	int ret;
685	u16 gid_index;
686	u8 p;
687
688	ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
689	if (ret)
690		return ret;
691
692	memset(ah_attr, 0, sizeof *ah_attr);
693	ah_attr->dlid = be16_to_cpu(rec->mlid);
694	ah_attr->sl = rec->sl;
695	ah_attr->port_num = port_num;
696	ah_attr->static_rate = rec->rate;
697
698	ah_attr->ah_flags = IB_AH_GRH;
699	ah_attr->grh.dgid = rec->mgid;
700
701	ah_attr->grh.sgid_index = (u8) gid_index;
702	ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
703	ah_attr->grh.hop_limit = rec->hop_limit;
704	ah_attr->grh.traffic_class = rec->traffic_class;
705
706	return 0;
707}
708EXPORT_SYMBOL(ib_init_ah_from_mcmember);
709
710static void mcast_groups_lost(struct mcast_port *port)
711{
712	struct mcast_group *group;
713	struct rb_node *node;
714	unsigned long flags;
715
716	spin_lock_irqsave(&port->lock, flags);
717	for (node = rb_first(&port->table); node; node = rb_next(node)) {
718		group = rb_entry(node, struct mcast_group, node);
719		spin_lock(&group->lock);
720		if (group->state == MCAST_IDLE) {
721			atomic_inc(&group->refcount);
722			queue_work(mcast_wq, &group->work);
723		}
724		group->state = MCAST_ERROR;
725		spin_unlock(&group->lock);
726	}
727	spin_unlock_irqrestore(&port->lock, flags);
728}
729
730static void mcast_event_handler(struct ib_event_handler *handler,
731				struct ib_event *event)
732{
733	struct mcast_device *dev;
734
735	dev = container_of(handler, struct mcast_device, event_handler);
736
737	switch (event->event) {
738	case IB_EVENT_PORT_ERR:
739	case IB_EVENT_LID_CHANGE:
740	case IB_EVENT_SM_CHANGE:
741	case IB_EVENT_CLIENT_REREGISTER:
742		mcast_groups_lost(&dev->port[event->element.port_num -
743					     dev->start_port]);
744		break;
745	default:
746		break;
747	}
748}
749
750static void mcast_add_one(struct ib_device *device)
751{
752	struct mcast_device *dev;
753	struct mcast_port *port;
754	int i;
755
756	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
757		return;
758
759	dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
760		      GFP_KERNEL);
761	if (!dev)
762		return;
763
764	if (device->node_type == RDMA_NODE_IB_SWITCH)
765		dev->start_port = dev->end_port = 0;
766	else {
767		dev->start_port = 1;
768		dev->end_port = device->phys_port_cnt;
769	}
770
771	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
772		port = &dev->port[i];
773		port->dev = dev;
774		port->port_num = dev->start_port + i;
775		spin_lock_init(&port->lock);
776		port->table = RB_ROOT;
777		init_completion(&port->comp);
778		atomic_set(&port->refcount, 1);
779	}
780
781	dev->device = device;
782	ib_set_client_data(device, &mcast_client, dev);
783
784	INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
785	ib_register_event_handler(&dev->event_handler);
786}
787
788static void mcast_remove_one(struct ib_device *device)
789{
790	struct mcast_device *dev;
791	struct mcast_port *port;
792	int i;
793
794	dev = ib_get_client_data(device, &mcast_client);
795	if (!dev)
796		return;
797
798	ib_unregister_event_handler(&dev->event_handler);
799	flush_workqueue(mcast_wq);
800
801	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
802		port = &dev->port[i];
803		deref_port(port);
804		wait_for_completion(&port->comp);
805	}
806
807	kfree(dev);
808}
809
810int mcast_init(void)
811{
812	int ret;
813
814	mcast_wq = create_singlethread_workqueue("ib_mcast");
815	if (!mcast_wq)
816		return -ENOMEM;
817
818	ib_sa_register_client(&sa_client);
819
820	ret = ib_register_client(&mcast_client);
821	if (ret)
822		goto err;
823	return 0;
824
825err:
826	ib_sa_unregister_client(&sa_client);
827	destroy_workqueue(mcast_wq);
828	return ret;
829}
830
831void mcast_cleanup(void)
832{
833	ib_unregister_client(&mcast_client);
834	ib_sa_unregister_client(&sa_client);
835	destroy_workqueue(mcast_wq);
836}
837