• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/ipath/
1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <linux/rcupdate.h>
36
37#include "ipath_verbs.h"
38
39/*
40 * Global table of GID to attached QPs.
41 * The table is global to all ipath devices since a send from one QP/device
42 * needs to be locally routed to any locally attached QPs on the same
43 * or different device.
44 */
45static struct rb_root mcast_tree;
46static DEFINE_SPINLOCK(mcast_lock);
47
48/**
49 * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
50 * @qp: the QP to link
51 */
52static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
53{
54	struct ipath_mcast_qp *mqp;
55
56	mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
57	if (!mqp)
58		goto bail;
59
60	mqp->qp = qp;
61	atomic_inc(&qp->refcount);
62
63bail:
64	return mqp;
65}
66
67static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
68{
69	struct ipath_qp *qp = mqp->qp;
70
71	/* Notify ipath_destroy_qp() if it is waiting. */
72	if (atomic_dec_and_test(&qp->refcount))
73		wake_up(&qp->wait);
74
75	kfree(mqp);
76}
77
78/**
79 * ipath_mcast_alloc - allocate the multicast GID structure
80 * @mgid: the multicast GID
81 *
82 * A list of QPs will be attached to this structure.
83 */
84static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
85{
86	struct ipath_mcast *mcast;
87
88	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
89	if (!mcast)
90		goto bail;
91
92	mcast->mgid = *mgid;
93	INIT_LIST_HEAD(&mcast->qp_list);
94	init_waitqueue_head(&mcast->wait);
95	atomic_set(&mcast->refcount, 0);
96	mcast->n_attached = 0;
97
98bail:
99	return mcast;
100}
101
102static void ipath_mcast_free(struct ipath_mcast *mcast)
103{
104	struct ipath_mcast_qp *p, *tmp;
105
106	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
107		ipath_mcast_qp_free(p);
108
109	kfree(mcast);
110}
111
112/**
113 * ipath_mcast_find - search the global table for the given multicast GID
114 * @mgid: the multicast GID to search for
115 *
116 * Returns NULL if not found.
117 *
118 * The caller is responsible for decrementing the reference count if found.
119 */
120struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
121{
122	struct rb_node *n;
123	unsigned long flags;
124	struct ipath_mcast *mcast;
125
126	spin_lock_irqsave(&mcast_lock, flags);
127	n = mcast_tree.rb_node;
128	while (n) {
129		int ret;
130
131		mcast = rb_entry(n, struct ipath_mcast, rb_node);
132
133		ret = memcmp(mgid->raw, mcast->mgid.raw,
134			     sizeof(union ib_gid));
135		if (ret < 0)
136			n = n->rb_left;
137		else if (ret > 0)
138			n = n->rb_right;
139		else {
140			atomic_inc(&mcast->refcount);
141			spin_unlock_irqrestore(&mcast_lock, flags);
142			goto bail;
143		}
144	}
145	spin_unlock_irqrestore(&mcast_lock, flags);
146
147	mcast = NULL;
148
149bail:
150	return mcast;
151}
152
153/**
154 * ipath_mcast_add - insert mcast GID into table and attach QP struct
155 * @mcast: the mcast GID table
156 * @mqp: the QP to attach
157 *
158 * Return zero if both were added.  Return EEXIST if the GID was already in
159 * the table but the QP was added.  Return ESRCH if the QP was already
160 * attached and neither structure was added.
161 */
162static int ipath_mcast_add(struct ipath_ibdev *dev,
163			   struct ipath_mcast *mcast,
164			   struct ipath_mcast_qp *mqp)
165{
166	struct rb_node **n = &mcast_tree.rb_node;
167	struct rb_node *pn = NULL;
168	int ret;
169
170	spin_lock_irq(&mcast_lock);
171
172	while (*n) {
173		struct ipath_mcast *tmcast;
174		struct ipath_mcast_qp *p;
175
176		pn = *n;
177		tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
178
179		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
180			     sizeof(union ib_gid));
181		if (ret < 0) {
182			n = &pn->rb_left;
183			continue;
184		}
185		if (ret > 0) {
186			n = &pn->rb_right;
187			continue;
188		}
189
190		/* Search the QP list to see if this is already there. */
191		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
192			if (p->qp == mqp->qp) {
193				ret = ESRCH;
194				goto bail;
195			}
196		}
197		if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
198			ret = ENOMEM;
199			goto bail;
200		}
201
202		tmcast->n_attached++;
203
204		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
205		ret = EEXIST;
206		goto bail;
207	}
208
209	spin_lock(&dev->n_mcast_grps_lock);
210	if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
211		spin_unlock(&dev->n_mcast_grps_lock);
212		ret = ENOMEM;
213		goto bail;
214	}
215
216	dev->n_mcast_grps_allocated++;
217	spin_unlock(&dev->n_mcast_grps_lock);
218
219	mcast->n_attached++;
220
221	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
222
223	atomic_inc(&mcast->refcount);
224	rb_link_node(&mcast->rb_node, pn, n);
225	rb_insert_color(&mcast->rb_node, &mcast_tree);
226
227	ret = 0;
228
229bail:
230	spin_unlock_irq(&mcast_lock);
231
232	return ret;
233}
234
235int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
236{
237	struct ipath_qp *qp = to_iqp(ibqp);
238	struct ipath_ibdev *dev = to_idev(ibqp->device);
239	struct ipath_mcast *mcast;
240	struct ipath_mcast_qp *mqp;
241	int ret;
242
243	/*
244	 * Allocate data structures since its better to do this outside of
245	 * spin locks and it will most likely be needed.
246	 */
247	mcast = ipath_mcast_alloc(gid);
248	if (mcast == NULL) {
249		ret = -ENOMEM;
250		goto bail;
251	}
252	mqp = ipath_mcast_qp_alloc(qp);
253	if (mqp == NULL) {
254		ipath_mcast_free(mcast);
255		ret = -ENOMEM;
256		goto bail;
257	}
258	switch (ipath_mcast_add(dev, mcast, mqp)) {
259	case ESRCH:
260		/* Neither was used: can't attach the same QP twice. */
261		ipath_mcast_qp_free(mqp);
262		ipath_mcast_free(mcast);
263		ret = -EINVAL;
264		goto bail;
265	case EEXIST:		/* The mcast wasn't used */
266		ipath_mcast_free(mcast);
267		break;
268	case ENOMEM:
269		/* Exceeded the maximum number of mcast groups. */
270		ipath_mcast_qp_free(mqp);
271		ipath_mcast_free(mcast);
272		ret = -ENOMEM;
273		goto bail;
274	default:
275		break;
276	}
277
278	ret = 0;
279
280bail:
281	return ret;
282}
283
284int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
285{
286	struct ipath_qp *qp = to_iqp(ibqp);
287	struct ipath_ibdev *dev = to_idev(ibqp->device);
288	struct ipath_mcast *mcast = NULL;
289	struct ipath_mcast_qp *p, *tmp;
290	struct rb_node *n;
291	int last = 0;
292	int ret;
293
294	spin_lock_irq(&mcast_lock);
295
296	/* Find the GID in the mcast table. */
297	n = mcast_tree.rb_node;
298	while (1) {
299		if (n == NULL) {
300			spin_unlock_irq(&mcast_lock);
301			ret = -EINVAL;
302			goto bail;
303		}
304
305		mcast = rb_entry(n, struct ipath_mcast, rb_node);
306		ret = memcmp(gid->raw, mcast->mgid.raw,
307			     sizeof(union ib_gid));
308		if (ret < 0)
309			n = n->rb_left;
310		else if (ret > 0)
311			n = n->rb_right;
312		else
313			break;
314	}
315
316	/* Search the QP list. */
317	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
318		if (p->qp != qp)
319			continue;
320		/*
321		 * We found it, so remove it, but don't poison the forward
322		 * link until we are sure there are no list walkers.
323		 */
324		list_del_rcu(&p->list);
325		mcast->n_attached--;
326
327		/* If this was the last attached QP, remove the GID too. */
328		if (list_empty(&mcast->qp_list)) {
329			rb_erase(&mcast->rb_node, &mcast_tree);
330			last = 1;
331		}
332		break;
333	}
334
335	spin_unlock_irq(&mcast_lock);
336
337	if (p) {
338		/*
339		 * Wait for any list walkers to finish before freeing the
340		 * list element.
341		 */
342		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
343		ipath_mcast_qp_free(p);
344	}
345	if (last) {
346		atomic_dec(&mcast->refcount);
347		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
348		ipath_mcast_free(mcast);
349		spin_lock_irq(&dev->n_mcast_grps_lock);
350		dev->n_mcast_grps_allocated--;
351		spin_unlock_irq(&dev->n_mcast_grps_lock);
352	}
353
354	ret = 0;
355
356bail:
357	return ret;
358}
359
360int ipath_mcast_tree_empty(void)
361{
362	return mcast_tree.rb_node == NULL;
363}
364