1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3219820Sjeff * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4219820Sjeff * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5219820Sjeff * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6219820Sjeff * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7219820Sjeff * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
8219820Sjeff *
9219820Sjeff * This software is available to you under a choice of one of two
10219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
11219820Sjeff * General Public License (GPL) Version 2, available from the file
12219820Sjeff * COPYING in the main directory of this source tree, or the
13219820Sjeff * OpenIB.org BSD license below:
14219820Sjeff *
15219820Sjeff *     Redistribution and use in source and binary forms, with or
16219820Sjeff *     without modification, are permitted provided that the following
17219820Sjeff *     conditions are met:
18219820Sjeff *
19219820Sjeff *      - Redistributions of source code must retain the above
20219820Sjeff *        copyright notice, this list of conditions and the following
21219820Sjeff *        disclaimer.
22219820Sjeff *
23219820Sjeff *      - Redistributions in binary form must reproduce the above
24219820Sjeff *        copyright notice, this list of conditions and the following
25219820Sjeff *        disclaimer in the documentation and/or other materials
26219820Sjeff *        provided with the distribution.
27219820Sjeff *
28219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35219820Sjeff * SOFTWARE.
36219820Sjeff *
37219820Sjeff */
38219820Sjeff#include <linux/dma-mapping.h>
39219820Sjeff#include <linux/err.h>
40219820Sjeff#include <linux/idr.h>
41219820Sjeff#include <linux/interrupt.h>
42219820Sjeff#include <linux/rbtree.h>
43219820Sjeff#include <linux/spinlock.h>
44219820Sjeff#include <linux/workqueue.h>
45219820Sjeff#include <linux/completion.h>
46271127Shselasky#include <linux/string.h>
47219820Sjeff
48219820Sjeff#include <rdma/iw_cm.h>
49219820Sjeff#include <rdma/ib_addr.h>
50219820Sjeff
51219820Sjeff#include "iwcm.h"
52219820Sjeff
53219820SjeffMODULE_AUTHOR("Tom Tucker");
54219820SjeffMODULE_DESCRIPTION("iWARP CM");
55219820SjeffMODULE_LICENSE("Dual BSD/GPL");
56219820Sjeff
57219820Sjeffstatic struct workqueue_struct *iwcm_wq;
58219820Sjeffstruct iwcm_work {
59219820Sjeff	struct work_struct work;
60219820Sjeff	struct iwcm_id_private *cm_id;
61219820Sjeff	struct list_head list;
62219820Sjeff	struct iw_cm_event event;
63219820Sjeff	struct list_head free_list;
64219820Sjeff};
65219820Sjeff
66219820Sjeff/*
67219820Sjeff * The following services provide a mechanism for pre-allocating iwcm_work
68219820Sjeff * elements.  The design pre-allocates them  based on the cm_id type:
69219820Sjeff *	LISTENING IDS: 	Get enough elements preallocated to handle the
70219820Sjeff *			listen backlog.
71219820Sjeff *	ACTIVE IDS:	4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
72219820Sjeff *	PASSIVE IDS:	3: ESTABLISHED, DISCONNECT, CLOSE
73219820Sjeff *
74219820Sjeff * Allocating them in connect and listen avoids having to deal
75219820Sjeff * with allocation failures on the event upcall from the provider (which
76219820Sjeff * is called in the interrupt context).
77219820Sjeff *
78219820Sjeff * One exception is when creating the cm_id for incoming connection requests.
79219820Sjeff * There are two cases:
80219820Sjeff * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
81219820Sjeff *    the backlog is exceeded, then no more connection request events will
82219820Sjeff *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
83219820Sjeff *    to the provider to reject the connection request.
84219820Sjeff * 2) in the connection request workqueue handler, cm_conn_req_handler().
85219820Sjeff *    If work elements cannot be allocated for the new connect request cm_id,
86219820Sjeff *    then IWCM will call the provider reject method.  This is ok since
87219820Sjeff *    cm_conn_req_handler() runs in the workqueue thread context.
88219820Sjeff */
89219820Sjeff
90219820Sjeffstatic struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
91219820Sjeff{
92219820Sjeff	struct iwcm_work *work;
93219820Sjeff
94219820Sjeff	if (list_empty(&cm_id_priv->work_free_list))
95219820Sjeff		return NULL;
96219820Sjeff	work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
97219820Sjeff			  free_list);
98219820Sjeff	list_del_init(&work->free_list);
99219820Sjeff	return work;
100219820Sjeff}
101219820Sjeff
102219820Sjeffstatic void put_work(struct iwcm_work *work)
103219820Sjeff{
104219820Sjeff	list_add(&work->free_list, &work->cm_id->work_free_list);
105219820Sjeff}
106219820Sjeff
107219820Sjeffstatic void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
108219820Sjeff{
109219820Sjeff	struct list_head *e, *tmp;
110219820Sjeff
111219820Sjeff	list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
112219820Sjeff		kfree(list_entry(e, struct iwcm_work, free_list));
113219820Sjeff}
114219820Sjeff
115219820Sjeffstatic int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
116219820Sjeff{
117219820Sjeff	struct iwcm_work *work;
118219820Sjeff
119219820Sjeff	BUG_ON(!list_empty(&cm_id_priv->work_free_list));
120219820Sjeff	while (count--) {
121219820Sjeff		work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
122219820Sjeff		if (!work) {
123219820Sjeff			dealloc_work_entries(cm_id_priv);
124219820Sjeff			return -ENOMEM;
125219820Sjeff		}
126219820Sjeff		work->cm_id = cm_id_priv;
127219820Sjeff		INIT_LIST_HEAD(&work->list);
128219820Sjeff		put_work(work);
129219820Sjeff	}
130219820Sjeff	return 0;
131219820Sjeff}
132219820Sjeff
133219820Sjeff/*
134219820Sjeff * Save private data from incoming connection requests to
135219820Sjeff * iw_cm_event, so the low level driver doesn't have to. Adjust
136219820Sjeff * the event ptr to point to the local copy.
137219820Sjeff */
138219820Sjeffstatic int copy_private_data(struct iw_cm_event *event)
139219820Sjeff{
140219820Sjeff	void *p;
141219820Sjeff
142219820Sjeff	p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
143219820Sjeff	if (!p)
144219820Sjeff		return -ENOMEM;
145219820Sjeff	event->private_data = p;
146219820Sjeff	return 0;
147219820Sjeff}
148219820Sjeff
149219820Sjeffstatic void free_cm_id(struct iwcm_id_private *cm_id_priv)
150219820Sjeff{
151219820Sjeff	dealloc_work_entries(cm_id_priv);
152219820Sjeff	kfree(cm_id_priv);
153219820Sjeff}
154219820Sjeff
155219820Sjeff/*
156219820Sjeff * Release a reference on cm_id. If the last reference is being
157219820Sjeff * released, enable the waiting thread (in iw_destroy_cm_id) to
158219820Sjeff * get woken up, and return 1 if a thread is already waiting.
159219820Sjeff */
160219820Sjeffstatic int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
161219820Sjeff{
162219820Sjeff	BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
163219820Sjeff	if (atomic_dec_and_test(&cm_id_priv->refcount)) {
164219820Sjeff		BUG_ON(!list_empty(&cm_id_priv->work_list));
165219820Sjeff		complete(&cm_id_priv->destroy_comp);
166219820Sjeff		return 1;
167219820Sjeff	}
168219820Sjeff
169219820Sjeff	return 0;
170219820Sjeff}
171219820Sjeff
172219820Sjeffstatic void add_ref(struct iw_cm_id *cm_id)
173219820Sjeff{
174219820Sjeff	struct iwcm_id_private *cm_id_priv;
175219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
176219820Sjeff	atomic_inc(&cm_id_priv->refcount);
177219820Sjeff}
178219820Sjeff
179219820Sjeffstatic void rem_ref(struct iw_cm_id *cm_id)
180219820Sjeff{
181219820Sjeff	struct iwcm_id_private *cm_id_priv;
182219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
183219820Sjeff	if (iwcm_deref_id(cm_id_priv) &&
184219820Sjeff	    test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
185219820Sjeff		BUG_ON(!list_empty(&cm_id_priv->work_list));
186219820Sjeff		free_cm_id(cm_id_priv);
187219820Sjeff	}
188219820Sjeff}
189219820Sjeff
190219820Sjeffstatic int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
191219820Sjeff
192219820Sjeffstruct iw_cm_id *iw_create_cm_id(struct ib_device *device,
193237263Snp				 struct socket *so,
194219820Sjeff				 iw_cm_handler cm_handler,
195219820Sjeff				 void *context)
196219820Sjeff{
197219820Sjeff	struct iwcm_id_private *cm_id_priv;
198219820Sjeff
199219820Sjeff	cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
200219820Sjeff	if (!cm_id_priv)
201219820Sjeff		return ERR_PTR(-ENOMEM);
202219820Sjeff
203219820Sjeff	cm_id_priv->state = IW_CM_STATE_IDLE;
204219820Sjeff	cm_id_priv->id.device = device;
205219820Sjeff	cm_id_priv->id.cm_handler = cm_handler;
206219820Sjeff	cm_id_priv->id.context = context;
207219820Sjeff	cm_id_priv->id.event_handler = cm_event_handler;
208219820Sjeff	cm_id_priv->id.add_ref = add_ref;
209219820Sjeff	cm_id_priv->id.rem_ref = rem_ref;
210237263Snp	cm_id_priv->id.so = so;
211219820Sjeff	spin_lock_init(&cm_id_priv->lock);
212219820Sjeff	atomic_set(&cm_id_priv->refcount, 1);
213219820Sjeff	init_waitqueue_head(&cm_id_priv->connect_wait);
214219820Sjeff	init_completion(&cm_id_priv->destroy_comp);
215219820Sjeff	INIT_LIST_HEAD(&cm_id_priv->work_list);
216219820Sjeff	INIT_LIST_HEAD(&cm_id_priv->work_free_list);
217219820Sjeff
218219820Sjeff	return &cm_id_priv->id;
219219820Sjeff}
220219820SjeffEXPORT_SYMBOL(iw_create_cm_id);
221219820Sjeff
222219820Sjeff
223219820Sjeffstatic int iwcm_modify_qp_err(struct ib_qp *qp)
224219820Sjeff{
225219820Sjeff	struct ib_qp_attr qp_attr;
226219820Sjeff
227219820Sjeff	if (!qp)
228219820Sjeff		return -EINVAL;
229219820Sjeff
230219820Sjeff	qp_attr.qp_state = IB_QPS_ERR;
231219820Sjeff	return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
232219820Sjeff}
233219820Sjeff
234219820Sjeff/*
235219820Sjeff * This is really the RDMAC CLOSING state. It is most similar to the
236219820Sjeff * IB SQD QP state.
237219820Sjeff */
238219820Sjeffstatic int iwcm_modify_qp_sqd(struct ib_qp *qp)
239219820Sjeff{
240219820Sjeff	struct ib_qp_attr qp_attr;
241219820Sjeff
242219820Sjeff	BUG_ON(qp == NULL);
243219820Sjeff	qp_attr.qp_state = IB_QPS_SQD;
244219820Sjeff	return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
245219820Sjeff}
246219820Sjeff
247219820Sjeff/*
248219820Sjeff * CM_ID <-- CLOSING
249219820Sjeff *
250219820Sjeff * Block if a passive or active connection is currently being processed. Then
251219820Sjeff * process the event as follows:
252219820Sjeff * - If we are ESTABLISHED, move to CLOSING and modify the QP state
253219820Sjeff *   based on the abrupt flag
254219820Sjeff * - If the connection is already in the CLOSING or IDLE state, the peer is
255219820Sjeff *   disconnecting concurrently with us and we've already seen the
256219820Sjeff *   DISCONNECT event -- ignore the request and return 0
257219820Sjeff * - Disconnect on a listening endpoint returns -EINVAL
258219820Sjeff */
259219820Sjeffint iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
260219820Sjeff{
261219820Sjeff	struct iwcm_id_private *cm_id_priv;
262219820Sjeff	unsigned long flags;
263219820Sjeff	int ret = 0;
264219820Sjeff	struct ib_qp *qp = NULL;
265219820Sjeff
266219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
267219820Sjeff	/* Wait if we're currently in a connect or accept downcall */
268219820Sjeff	wait_event(cm_id_priv->connect_wait,
269219820Sjeff		   !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
270219820Sjeff
271219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
272219820Sjeff	switch (cm_id_priv->state) {
273219820Sjeff	case IW_CM_STATE_ESTABLISHED:
274219820Sjeff		cm_id_priv->state = IW_CM_STATE_CLOSING;
275219820Sjeff
276219820Sjeff		/* QP could be <nul> for user-mode client */
277219820Sjeff		if (cm_id_priv->qp)
278219820Sjeff			qp = cm_id_priv->qp;
279219820Sjeff		else
280219820Sjeff			ret = -EINVAL;
281219820Sjeff		break;
282219820Sjeff	case IW_CM_STATE_LISTEN:
283219820Sjeff		ret = -EINVAL;
284219820Sjeff		break;
285219820Sjeff	case IW_CM_STATE_CLOSING:
286219820Sjeff		/* remote peer closed first */
287219820Sjeff	case IW_CM_STATE_IDLE:
288219820Sjeff		/* accept or connect returned !0 */
289219820Sjeff		break;
290219820Sjeff	case IW_CM_STATE_CONN_RECV:
291219820Sjeff		/*
292219820Sjeff		 * App called disconnect before/without calling accept after
293219820Sjeff		 * connect_request event delivered.
294219820Sjeff		 */
295219820Sjeff		break;
296219820Sjeff	case IW_CM_STATE_CONN_SENT:
297219820Sjeff		/* Can only get here if wait above fails */
298219820Sjeff	default:
299219820Sjeff		BUG();
300219820Sjeff	}
301219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
302219820Sjeff
303219820Sjeff	if (qp) {
304219820Sjeff		if (abrupt)
305219820Sjeff			ret = iwcm_modify_qp_err(qp);
306219820Sjeff		else
307219820Sjeff			ret = iwcm_modify_qp_sqd(qp);
308219820Sjeff
309219820Sjeff		/*
310219820Sjeff		 * If both sides are disconnecting the QP could
311219820Sjeff		 * already be in ERR or SQD states
312219820Sjeff		 */
313219820Sjeff		ret = 0;
314219820Sjeff	}
315219820Sjeff
316219820Sjeff	return ret;
317219820Sjeff}
318219820SjeffEXPORT_SYMBOL(iw_cm_disconnect);
319219820Sjeff
320219820Sjeff/*
321219820Sjeff * CM_ID <-- DESTROYING
322219820Sjeff *
323219820Sjeff * Clean up all resources associated with the connection and release
324219820Sjeff * the initial reference taken by iw_create_cm_id.
325219820Sjeff */
326219820Sjeffstatic void destroy_cm_id(struct iw_cm_id *cm_id)
327219820Sjeff{
328219820Sjeff	struct iwcm_id_private *cm_id_priv;
329219820Sjeff	unsigned long flags;
330219820Sjeff	int ret;
331219820Sjeff
332219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
333219820Sjeff	/*
334219820Sjeff	 * Wait if we're currently in a connect or accept downcall. A
335219820Sjeff	 * listening endpoint should never block here.
336219820Sjeff	 */
337219820Sjeff	wait_event(cm_id_priv->connect_wait,
338219820Sjeff		   !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
339219820Sjeff
340219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
341219820Sjeff	switch (cm_id_priv->state) {
342219820Sjeff	case IW_CM_STATE_LISTEN:
343219820Sjeff		cm_id_priv->state = IW_CM_STATE_DESTROYING;
344219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
345219820Sjeff		/* destroy the listening endpoint */
346219820Sjeff		ret = cm_id->device->iwcm->destroy_listen(cm_id);
347219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
348219820Sjeff		break;
349219820Sjeff	case IW_CM_STATE_ESTABLISHED:
350219820Sjeff		cm_id_priv->state = IW_CM_STATE_DESTROYING;
351219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
352219820Sjeff		/* Abrupt close of the connection */
353219820Sjeff		(void)iwcm_modify_qp_err(cm_id_priv->qp);
354219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
355219820Sjeff		break;
356219820Sjeff	case IW_CM_STATE_IDLE:
357219820Sjeff	case IW_CM_STATE_CLOSING:
358219820Sjeff		cm_id_priv->state = IW_CM_STATE_DESTROYING;
359219820Sjeff		break;
360219820Sjeff	case IW_CM_STATE_CONN_RECV:
361219820Sjeff		/*
362219820Sjeff		 * App called destroy before/without calling accept after
363219820Sjeff		 * receiving connection request event notification or
364219820Sjeff		 * returned non zero from the event callback function.
365219820Sjeff		 * In either case, must tell the provider to reject.
366219820Sjeff		 */
367219820Sjeff		cm_id_priv->state = IW_CM_STATE_DESTROYING;
368219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
369219820Sjeff		cm_id->device->iwcm->reject(cm_id, NULL, 0);
370219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
371219820Sjeff		break;
372219820Sjeff	case IW_CM_STATE_CONN_SENT:
373219820Sjeff	case IW_CM_STATE_DESTROYING:
374219820Sjeff	default:
375219820Sjeff		BUG();
376219820Sjeff		break;
377219820Sjeff	}
378219820Sjeff	if (cm_id_priv->qp) {
379219820Sjeff		cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
380219820Sjeff		cm_id_priv->qp = NULL;
381219820Sjeff	}
382219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
383219820Sjeff
384219820Sjeff	(void)iwcm_deref_id(cm_id_priv);
385219820Sjeff}
386219820Sjeff
387219820Sjeff/*
388219820Sjeff * This function is only called by the application thread and cannot
389219820Sjeff * be called by the event thread. The function will wait for all
390219820Sjeff * references to be released on the cm_id and then kfree the cm_id
391219820Sjeff * object.
392219820Sjeff */
393219820Sjeffvoid iw_destroy_cm_id(struct iw_cm_id *cm_id)
394219820Sjeff{
395219820Sjeff	struct iwcm_id_private *cm_id_priv;
396219820Sjeff
397219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
398219820Sjeff	BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
399219820Sjeff
400219820Sjeff	destroy_cm_id(cm_id);
401219820Sjeff
402219820Sjeff	wait_for_completion(&cm_id_priv->destroy_comp);
403219820Sjeff
404219820Sjeff	free_cm_id(cm_id_priv);
405219820Sjeff}
406219820SjeffEXPORT_SYMBOL(iw_destroy_cm_id);
407219820Sjeff
408219820Sjeff/*
409219820Sjeff * CM_ID <-- LISTEN
410219820Sjeff *
411219820Sjeff * Start listening for connect requests. Generates one CONNECT_REQUEST
412219820Sjeff * event for each inbound connect request.
413219820Sjeff */
414219820Sjeffint iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
415219820Sjeff{
416219820Sjeff	struct iwcm_id_private *cm_id_priv;
417219820Sjeff	unsigned long flags;
418219820Sjeff	int ret;
419219820Sjeff
420219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
421219820Sjeff
422219820Sjeff	ret = alloc_work_entries(cm_id_priv, backlog);
423219820Sjeff	if (ret)
424219820Sjeff		return ret;
425219820Sjeff
426219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
427219820Sjeff	switch (cm_id_priv->state) {
428219820Sjeff	case IW_CM_STATE_IDLE:
429219820Sjeff		cm_id_priv->state = IW_CM_STATE_LISTEN;
430219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
431219820Sjeff		ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
432219820Sjeff		if (ret)
433219820Sjeff			cm_id_priv->state = IW_CM_STATE_IDLE;
434219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
435219820Sjeff		break;
436219820Sjeff	default:
437219820Sjeff		ret = -EINVAL;
438219820Sjeff	}
439219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
440219820Sjeff
441219820Sjeff	return ret;
442219820Sjeff}
443219820SjeffEXPORT_SYMBOL(iw_cm_listen);
444219820Sjeff
445219820Sjeff/*
446219820Sjeff * CM_ID <-- IDLE
447219820Sjeff *
448219820Sjeff * Rejects an inbound connection request. No events are generated.
449219820Sjeff */
450219820Sjeffint iw_cm_reject(struct iw_cm_id *cm_id,
451219820Sjeff		 const void *private_data,
452219820Sjeff		 u8 private_data_len)
453219820Sjeff{
454219820Sjeff	struct iwcm_id_private *cm_id_priv;
455219820Sjeff	unsigned long flags;
456219820Sjeff	int ret;
457219820Sjeff
458219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
459219820Sjeff	set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
460219820Sjeff
461219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
462219820Sjeff	if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
463219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
464219820Sjeff		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
465219820Sjeff		wake_up_all(&cm_id_priv->connect_wait);
466219820Sjeff		return -EINVAL;
467219820Sjeff	}
468219820Sjeff	cm_id_priv->state = IW_CM_STATE_IDLE;
469219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
470219820Sjeff
471219820Sjeff	ret = cm_id->device->iwcm->reject(cm_id, private_data,
472219820Sjeff					  private_data_len);
473219820Sjeff
474219820Sjeff	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
475219820Sjeff	wake_up_all(&cm_id_priv->connect_wait);
476219820Sjeff
477219820Sjeff	return ret;
478219820Sjeff}
479219820SjeffEXPORT_SYMBOL(iw_cm_reject);
480219820Sjeff
481219820Sjeff/*
482219820Sjeff * CM_ID <-- ESTABLISHED
483219820Sjeff *
484219820Sjeff * Accepts an inbound connection request and generates an ESTABLISHED
485219820Sjeff * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
486219820Sjeff * until the ESTABLISHED event is received from the provider.
487219820Sjeff */
488219820Sjeffint iw_cm_accept(struct iw_cm_id *cm_id,
489219820Sjeff		 struct iw_cm_conn_param *iw_param)
490219820Sjeff{
491219820Sjeff	struct iwcm_id_private *cm_id_priv;
492219820Sjeff	struct ib_qp *qp;
493219820Sjeff	unsigned long flags;
494219820Sjeff	int ret;
495219820Sjeff
496219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
497219820Sjeff	set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
498219820Sjeff
499219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
500219820Sjeff	if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
501219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
502219820Sjeff		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
503219820Sjeff		wake_up_all(&cm_id_priv->connect_wait);
504219820Sjeff		return -EINVAL;
505219820Sjeff	}
506219820Sjeff	/* Get the ib_qp given the QPN */
507219820Sjeff	qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
508219820Sjeff	if (!qp) {
509219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
510219820Sjeff		return -EINVAL;
511219820Sjeff	}
512219820Sjeff	cm_id->device->iwcm->add_ref(qp);
513219820Sjeff	cm_id_priv->qp = qp;
514219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
515219820Sjeff
516219820Sjeff	ret = cm_id->device->iwcm->accept(cm_id, iw_param);
517219820Sjeff	if (ret) {
518219820Sjeff		/* An error on accept precludes provider events */
519219820Sjeff		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
520219820Sjeff		cm_id_priv->state = IW_CM_STATE_IDLE;
521219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
522219820Sjeff		if (cm_id_priv->qp) {
523219820Sjeff			cm_id->device->iwcm->rem_ref(qp);
524219820Sjeff			cm_id_priv->qp = NULL;
525219820Sjeff		}
526219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
527219820Sjeff		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
528219820Sjeff		wake_up_all(&cm_id_priv->connect_wait);
529219820Sjeff	}
530219820Sjeff
531219820Sjeff	return ret;
532219820Sjeff}
533219820SjeffEXPORT_SYMBOL(iw_cm_accept);
534219820Sjeff
535219820Sjeff/*
536219820Sjeff * Active Side: CM_ID <-- CONN_SENT
537219820Sjeff *
538219820Sjeff * If successful, results in the generation of a CONNECT_REPLY
539219820Sjeff * event. iw_cm_disconnect and iw_cm_destroy will block until the
540219820Sjeff * CONNECT_REPLY event is received from the provider.
541219820Sjeff */
542219820Sjeffint iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
543219820Sjeff{
544219820Sjeff	struct iwcm_id_private *cm_id_priv;
545219820Sjeff	int ret;
546219820Sjeff	unsigned long flags;
547219820Sjeff	struct ib_qp *qp;
548219820Sjeff
549219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
550219820Sjeff
551219820Sjeff	ret = alloc_work_entries(cm_id_priv, 4);
552219820Sjeff	if (ret)
553219820Sjeff		return ret;
554219820Sjeff
555219820Sjeff	set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
556219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
557219820Sjeff
558219820Sjeff	if (cm_id_priv->state != IW_CM_STATE_IDLE) {
559219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
560219820Sjeff		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
561219820Sjeff		wake_up_all(&cm_id_priv->connect_wait);
562219820Sjeff		return -EINVAL;
563219820Sjeff	}
564219820Sjeff
565219820Sjeff	/* Get the ib_qp given the QPN */
566219820Sjeff	qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
567219820Sjeff	if (!qp) {
568219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
569219820Sjeff		return -EINVAL;
570219820Sjeff	}
571219820Sjeff	cm_id->device->iwcm->add_ref(qp);
572219820Sjeff	cm_id_priv->qp = qp;
573219820Sjeff	cm_id_priv->state = IW_CM_STATE_CONN_SENT;
574219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
575219820Sjeff
576219820Sjeff	ret = cm_id->device->iwcm->connect(cm_id, iw_param);
577219820Sjeff	if (ret) {
578219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
579219820Sjeff		if (cm_id_priv->qp) {
580219820Sjeff			cm_id->device->iwcm->rem_ref(qp);
581219820Sjeff			cm_id_priv->qp = NULL;
582219820Sjeff		}
583219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
584219820Sjeff		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
585219820Sjeff		cm_id_priv->state = IW_CM_STATE_IDLE;
586219820Sjeff		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
587219820Sjeff		wake_up_all(&cm_id_priv->connect_wait);
588219820Sjeff	}
589219820Sjeff
590219820Sjeff	return ret;
591219820Sjeff}
592219820SjeffEXPORT_SYMBOL(iw_cm_connect);
593219820Sjeff
594219820Sjeff/*
595219820Sjeff * Passive Side: new CM_ID <-- CONN_RECV
596219820Sjeff *
597219820Sjeff * Handles an inbound connect request. The function creates a new
598219820Sjeff * iw_cm_id to represent the new connection and inherits the client
599219820Sjeff * callback function and other attributes from the listening parent.
600219820Sjeff *
601219820Sjeff * The work item contains a pointer to the listen_cm_id and the event. The
602219820Sjeff * listen_cm_id contains the client cm_handler, context and
603219820Sjeff * device. These are copied when the device is cloned. The event
604219820Sjeff * contains the new four tuple.
605219820Sjeff *
606219820Sjeff * An error on the child should not affect the parent, so this
607219820Sjeff * function does not return a value.
608219820Sjeff */
609219820Sjeffstatic void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
610219820Sjeff				struct iw_cm_event *iw_event)
611219820Sjeff{
612219820Sjeff	unsigned long flags;
613219820Sjeff	struct iw_cm_id *cm_id;
614219820Sjeff	struct iwcm_id_private *cm_id_priv;
615219820Sjeff	int ret;
616219820Sjeff
617219820Sjeff	/*
618219820Sjeff	 * The provider should never generate a connection request
619219820Sjeff	 * event with a bad status.
620219820Sjeff	 */
621219820Sjeff	BUG_ON(iw_event->status);
622219820Sjeff
623219820Sjeff	/*
624219820Sjeff	 * We could be destroying the listening id. If so, ignore this
625219820Sjeff	 * upcall.
626219820Sjeff	 */
627219820Sjeff	spin_lock_irqsave(&listen_id_priv->lock, flags);
628219820Sjeff	if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
629219820Sjeff		spin_unlock_irqrestore(&listen_id_priv->lock, flags);
630219820Sjeff		goto out;
631219820Sjeff	}
632219820Sjeff	spin_unlock_irqrestore(&listen_id_priv->lock, flags);
633219820Sjeff
634219820Sjeff	cm_id = iw_create_cm_id(listen_id_priv->id.device,
635237263Snp				iw_event->so,
636219820Sjeff				listen_id_priv->id.cm_handler,
637219820Sjeff				listen_id_priv->id.context);
638219820Sjeff	/* If the cm_id could not be created, ignore the request */
639219820Sjeff	if (IS_ERR(cm_id))
640219820Sjeff		goto out;
641219820Sjeff
642219820Sjeff	cm_id->provider_data = iw_event->provider_data;
643219820Sjeff	cm_id->local_addr = iw_event->local_addr;
644219820Sjeff	cm_id->remote_addr = iw_event->remote_addr;
645219820Sjeff
646219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
647219820Sjeff	cm_id_priv->state = IW_CM_STATE_CONN_RECV;
648219820Sjeff
649219820Sjeff	ret = alloc_work_entries(cm_id_priv, 3);
650219820Sjeff	if (ret) {
651219820Sjeff		iw_cm_reject(cm_id, NULL, 0);
652219820Sjeff		iw_destroy_cm_id(cm_id);
653219820Sjeff		goto out;
654219820Sjeff	}
655219820Sjeff
656219820Sjeff	/* Call the client CM handler */
657219820Sjeff	ret = cm_id->cm_handler(cm_id, iw_event);
658219820Sjeff	if (ret) {
659219820Sjeff		iw_cm_reject(cm_id, NULL, 0);
660219820Sjeff		set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
661219820Sjeff		destroy_cm_id(cm_id);
662219820Sjeff		if (atomic_read(&cm_id_priv->refcount)==0)
663219820Sjeff			free_cm_id(cm_id_priv);
664219820Sjeff	}
665219820Sjeff
666219820Sjeffout:
667219820Sjeff	if (iw_event->private_data_len)
668219820Sjeff		kfree(iw_event->private_data);
669219820Sjeff}
670219820Sjeff
671219820Sjeff/*
672219820Sjeff * Passive Side: CM_ID <-- ESTABLISHED
673219820Sjeff *
674219820Sjeff * The provider generated an ESTABLISHED event which means that
675219820Sjeff * the MPA negotion has completed successfully and we are now in MPA
676219820Sjeff * FPDU mode.
677219820Sjeff *
678219820Sjeff * This event can only be received in the CONN_RECV state. If the
679219820Sjeff * remote peer closed, the ESTABLISHED event would be received followed
680219820Sjeff * by the CLOSE event. If the app closes, it will block until we wake
681219820Sjeff * it up after processing this event.
682219820Sjeff */
683219820Sjeffstatic int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
684219820Sjeff			       struct iw_cm_event *iw_event)
685219820Sjeff{
686219820Sjeff	unsigned long flags;
687219820Sjeff	int ret;
688219820Sjeff
689219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
690219820Sjeff
691219820Sjeff	/*
692219820Sjeff	 * We clear the CONNECT_WAIT bit here to allow the callback
693219820Sjeff	 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
694219820Sjeff	 * from a callback handler is not allowed.
695219820Sjeff	 */
696219820Sjeff	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
697219820Sjeff	BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
698219820Sjeff	cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
699219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
700219820Sjeff	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
701219820Sjeff	wake_up_all(&cm_id_priv->connect_wait);
702219820Sjeff
703219820Sjeff	return ret;
704219820Sjeff}
705219820Sjeff
706219820Sjeff/*
707219820Sjeff * Active Side: CM_ID <-- ESTABLISHED
708219820Sjeff *
709219820Sjeff * The app has called connect and is waiting for the established event to
710219820Sjeff * post it's requests to the server. This event will wake up anyone
711219820Sjeff * blocked in iw_cm_disconnect or iw_destroy_id.
712219820Sjeff */
713219820Sjeffstatic int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
714219820Sjeff			       struct iw_cm_event *iw_event)
715219820Sjeff{
716219820Sjeff	unsigned long flags;
717219820Sjeff	int ret;
718219820Sjeff
719219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
720219820Sjeff	/*
721219820Sjeff	 * Clear the connect wait bit so a callback function calling
722219820Sjeff	 * iw_cm_disconnect will not wait and deadlock this thread
723219820Sjeff	 */
724219820Sjeff	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
725219820Sjeff	BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
726219820Sjeff	if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
727219820Sjeff		cm_id_priv->id.local_addr = iw_event->local_addr;
728219820Sjeff		cm_id_priv->id.remote_addr = iw_event->remote_addr;
729219820Sjeff		cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
730219820Sjeff	} else {
731219820Sjeff		/* REJECTED or RESET */
732219820Sjeff		cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
733219820Sjeff		cm_id_priv->qp = NULL;
734219820Sjeff		cm_id_priv->state = IW_CM_STATE_IDLE;
735219820Sjeff	}
736219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
737219820Sjeff	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
738219820Sjeff
739219820Sjeff	if (iw_event->private_data_len)
740219820Sjeff		kfree(iw_event->private_data);
741219820Sjeff
742219820Sjeff	/* Wake up waiters on connect complete */
743219820Sjeff	wake_up_all(&cm_id_priv->connect_wait);
744219820Sjeff
745219820Sjeff	return ret;
746219820Sjeff}
747219820Sjeff
748219820Sjeff/*
749219820Sjeff * CM_ID <-- CLOSING
750219820Sjeff *
751219820Sjeff * If in the ESTABLISHED state, move to CLOSING.
752219820Sjeff */
753219820Sjeffstatic void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
754219820Sjeff				  struct iw_cm_event *iw_event)
755219820Sjeff{
756219820Sjeff	unsigned long flags;
757219820Sjeff
758219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
759219820Sjeff	if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
760219820Sjeff		cm_id_priv->state = IW_CM_STATE_CLOSING;
761219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
762219820Sjeff}
763219820Sjeff
764219820Sjeff/*
765219820Sjeff * CM_ID <-- IDLE
766219820Sjeff *
767219820Sjeff * If in the ESTBLISHED or CLOSING states, the QP will have have been
768219820Sjeff * moved by the provider to the ERR state. Disassociate the CM_ID from
769219820Sjeff * the QP,  move to IDLE, and remove the 'connected' reference.
770219820Sjeff *
771219820Sjeff * If in some other state, the cm_id was destroyed asynchronously.
772219820Sjeff * This is the last reference that will result in waking up
773219820Sjeff * the app thread blocked in iw_destroy_cm_id.
774219820Sjeff */
775219820Sjeffstatic int cm_close_handler(struct iwcm_id_private *cm_id_priv,
776219820Sjeff				  struct iw_cm_event *iw_event)
777219820Sjeff{
778219820Sjeff	unsigned long flags;
779219820Sjeff	int ret = 0;
780219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
781219820Sjeff
782219820Sjeff	if (cm_id_priv->qp) {
783219820Sjeff		cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
784219820Sjeff		cm_id_priv->qp = NULL;
785219820Sjeff	}
786219820Sjeff	switch (cm_id_priv->state) {
787219820Sjeff	case IW_CM_STATE_ESTABLISHED:
788219820Sjeff	case IW_CM_STATE_CLOSING:
789219820Sjeff		cm_id_priv->state = IW_CM_STATE_IDLE;
790219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
791219820Sjeff		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
792219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
793219820Sjeff		break;
794219820Sjeff	case IW_CM_STATE_DESTROYING:
795219820Sjeff		break;
796219820Sjeff	default:
797219820Sjeff		BUG();
798219820Sjeff	}
799219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
800219820Sjeff
801219820Sjeff	return ret;
802219820Sjeff}
803219820Sjeff
804219820Sjeffstatic int process_event(struct iwcm_id_private *cm_id_priv,
805219820Sjeff			 struct iw_cm_event *iw_event)
806219820Sjeff{
807219820Sjeff	int ret = 0;
808219820Sjeff
809219820Sjeff	switch (iw_event->event) {
810219820Sjeff	case IW_CM_EVENT_CONNECT_REQUEST:
811219820Sjeff		cm_conn_req_handler(cm_id_priv, iw_event);
812219820Sjeff		break;
813219820Sjeff	case IW_CM_EVENT_CONNECT_REPLY:
814219820Sjeff		ret = cm_conn_rep_handler(cm_id_priv, iw_event);
815219820Sjeff		break;
816219820Sjeff	case IW_CM_EVENT_ESTABLISHED:
817219820Sjeff		ret = cm_conn_est_handler(cm_id_priv, iw_event);
818219820Sjeff		break;
819219820Sjeff	case IW_CM_EVENT_DISCONNECT:
820219820Sjeff		cm_disconnect_handler(cm_id_priv, iw_event);
821219820Sjeff		break;
822219820Sjeff	case IW_CM_EVENT_CLOSE:
823219820Sjeff		ret = cm_close_handler(cm_id_priv, iw_event);
824219820Sjeff		break;
825219820Sjeff	default:
826219820Sjeff		BUG();
827219820Sjeff	}
828219820Sjeff
829219820Sjeff	return ret;
830219820Sjeff}
831219820Sjeff
832219820Sjeff/*
833219820Sjeff * Process events on the work_list for the cm_id. If the callback
834219820Sjeff * function requests that the cm_id be deleted, a flag is set in the
835219820Sjeff * cm_id flags to indicate that when the last reference is
836219820Sjeff * removed, the cm_id is to be destroyed. This is necessary to
837219820Sjeff * distinguish between an object that will be destroyed by the app
838219820Sjeff * thread asleep on the destroy_comp list vs. an object destroyed
839219820Sjeff * here synchronously when the last reference is removed.
840219820Sjeff */
841219820Sjeffstatic void cm_work_handler(struct work_struct *_work)
842219820Sjeff{
843219820Sjeff	struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
844219820Sjeff	struct iw_cm_event levent;
845219820Sjeff	struct iwcm_id_private *cm_id_priv = work->cm_id;
846219820Sjeff	unsigned long flags;
847219820Sjeff	int empty;
848219820Sjeff	int ret = 0;
849219820Sjeff	int destroy_id;
850219820Sjeff
851219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
852219820Sjeff	empty = list_empty(&cm_id_priv->work_list);
853219820Sjeff	while (!empty) {
854219820Sjeff		work = list_entry(cm_id_priv->work_list.next,
855219820Sjeff				  struct iwcm_work, list);
856219820Sjeff		list_del_init(&work->list);
857219820Sjeff		empty = list_empty(&cm_id_priv->work_list);
858219820Sjeff		levent = work->event;
859219820Sjeff		put_work(work);
860219820Sjeff		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
861219820Sjeff
862219820Sjeff		ret = process_event(cm_id_priv, &levent);
863219820Sjeff		if (ret) {
864219820Sjeff			set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
865219820Sjeff			destroy_cm_id(&cm_id_priv->id);
866219820Sjeff		}
867219820Sjeff		BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
868219820Sjeff		destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
869219820Sjeff		if (iwcm_deref_id(cm_id_priv)) {
870219820Sjeff			if (destroy_id) {
871219820Sjeff				BUG_ON(!list_empty(&cm_id_priv->work_list));
872219820Sjeff				free_cm_id(cm_id_priv);
873219820Sjeff			}
874219820Sjeff			return;
875219820Sjeff		}
876219820Sjeff		spin_lock_irqsave(&cm_id_priv->lock, flags);
877219820Sjeff	}
878219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
879219820Sjeff}
880219820Sjeff
881219820Sjeff/*
882219820Sjeff * This function is called on interrupt context. Schedule events on
883219820Sjeff * the iwcm_wq thread to allow callback functions to downcall into
884219820Sjeff * the CM and/or block.  Events are queued to a per-CM_ID
885219820Sjeff * work_list. If this is the first event on the work_list, the work
886219820Sjeff * element is also queued on the iwcm_wq thread.
887219820Sjeff *
888219820Sjeff * Each event holds a reference on the cm_id. Until the last posted
889219820Sjeff * event has been delivered and processed, the cm_id cannot be
890219820Sjeff * deleted.
891219820Sjeff *
892219820Sjeff * Returns:
893219820Sjeff * 	      0	- the event was handled.
894219820Sjeff *	-ENOMEM	- the event was not handled due to lack of resources.
895219820Sjeff */
896219820Sjeffstatic int cm_event_handler(struct iw_cm_id *cm_id,
897219820Sjeff			     struct iw_cm_event *iw_event)
898219820Sjeff{
899219820Sjeff	struct iwcm_work *work;
900219820Sjeff	struct iwcm_id_private *cm_id_priv;
901219820Sjeff	unsigned long flags;
902219820Sjeff	int ret = 0;
903219820Sjeff
904219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
905219820Sjeff
906219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
907219820Sjeff	work = get_work(cm_id_priv);
908219820Sjeff	if (!work) {
909219820Sjeff		ret = -ENOMEM;
910219820Sjeff		goto out;
911219820Sjeff	}
912219820Sjeff
913219820Sjeff	INIT_WORK(&work->work, cm_work_handler);
914219820Sjeff	work->cm_id = cm_id_priv;
915219820Sjeff	work->event = *iw_event;
916219820Sjeff
917219820Sjeff	if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
918219820Sjeff	     work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
919219820Sjeff	    work->event.private_data_len) {
920219820Sjeff		ret = copy_private_data(&work->event);
921219820Sjeff		if (ret) {
922219820Sjeff			put_work(work);
923219820Sjeff			goto out;
924219820Sjeff		}
925219820Sjeff	}
926219820Sjeff
927219820Sjeff	atomic_inc(&cm_id_priv->refcount);
928219820Sjeff	if (list_empty(&cm_id_priv->work_list)) {
929219820Sjeff		list_add_tail(&work->list, &cm_id_priv->work_list);
930219820Sjeff		queue_work(iwcm_wq, &work->work);
931219820Sjeff	} else
932219820Sjeff		list_add_tail(&work->list, &cm_id_priv->work_list);
933219820Sjeffout:
934219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
935219820Sjeff	return ret;
936219820Sjeff}
937219820Sjeff
938219820Sjeffstatic int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
939219820Sjeff				  struct ib_qp_attr *qp_attr,
940219820Sjeff				  int *qp_attr_mask)
941219820Sjeff{
942219820Sjeff	unsigned long flags;
943219820Sjeff	int ret;
944219820Sjeff
945219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
946219820Sjeff	switch (cm_id_priv->state) {
947219820Sjeff	case IW_CM_STATE_IDLE:
948219820Sjeff	case IW_CM_STATE_CONN_SENT:
949219820Sjeff	case IW_CM_STATE_CONN_RECV:
950219820Sjeff	case IW_CM_STATE_ESTABLISHED:
951219820Sjeff		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
952219820Sjeff		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
953219820Sjeff					   IB_ACCESS_REMOTE_READ;
954219820Sjeff		ret = 0;
955219820Sjeff		break;
956219820Sjeff	default:
957219820Sjeff		ret = -EINVAL;
958219820Sjeff		break;
959219820Sjeff	}
960219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
961219820Sjeff	return ret;
962219820Sjeff}
963219820Sjeff
964219820Sjeffstatic int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
965219820Sjeff				  struct ib_qp_attr *qp_attr,
966219820Sjeff				  int *qp_attr_mask)
967219820Sjeff{
968219820Sjeff	unsigned long flags;
969219820Sjeff	int ret;
970219820Sjeff
971219820Sjeff	spin_lock_irqsave(&cm_id_priv->lock, flags);
972219820Sjeff	switch (cm_id_priv->state) {
973219820Sjeff	case IW_CM_STATE_IDLE:
974219820Sjeff	case IW_CM_STATE_CONN_SENT:
975219820Sjeff	case IW_CM_STATE_CONN_RECV:
976219820Sjeff	case IW_CM_STATE_ESTABLISHED:
977219820Sjeff		*qp_attr_mask = 0;
978219820Sjeff		ret = 0;
979219820Sjeff		break;
980219820Sjeff	default:
981219820Sjeff		ret = -EINVAL;
982219820Sjeff		break;
983219820Sjeff	}
984219820Sjeff	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
985219820Sjeff	return ret;
986219820Sjeff}
987219820Sjeff
988219820Sjeffint iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
989219820Sjeff		       struct ib_qp_attr *qp_attr,
990219820Sjeff		       int *qp_attr_mask)
991219820Sjeff{
992219820Sjeff	struct iwcm_id_private *cm_id_priv;
993219820Sjeff	int ret;
994219820Sjeff
995219820Sjeff	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
996219820Sjeff	switch (qp_attr->qp_state) {
997219820Sjeff	case IB_QPS_INIT:
998219820Sjeff	case IB_QPS_RTR:
999219820Sjeff		ret = iwcm_init_qp_init_attr(cm_id_priv,
1000219820Sjeff					     qp_attr, qp_attr_mask);
1001219820Sjeff		break;
1002219820Sjeff	case IB_QPS_RTS:
1003219820Sjeff		ret = iwcm_init_qp_rts_attr(cm_id_priv,
1004219820Sjeff					    qp_attr, qp_attr_mask);
1005219820Sjeff		break;
1006219820Sjeff	default:
1007219820Sjeff		ret = -EINVAL;
1008219820Sjeff		break;
1009219820Sjeff	}
1010219820Sjeff	return ret;
1011219820Sjeff}
1012219820SjeffEXPORT_SYMBOL(iw_cm_init_qp_attr);
1013219820Sjeff
1014219820Sjeffstatic int __init iw_cm_init(void)
1015219820Sjeff{
1016219820Sjeff	iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
1017219820Sjeff	if (!iwcm_wq)
1018219820Sjeff		return -ENOMEM;
1019219820Sjeff
1020219820Sjeff	return 0;
1021219820Sjeff}
1022219820Sjeff
1023219820Sjeffstatic void __exit iw_cm_cleanup(void)
1024219820Sjeff{
1025219820Sjeff	destroy_workqueue(iwcm_wq);
1026219820Sjeff}
1027219820Sjeff
1028219820Sjeffmodule_init(iw_cm_init);
1029219820Sjeffmodule_exit(iw_cm_cleanup);
1030