1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3219820Sjeff * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4219820Sjeff * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5219820Sjeff * Copyright (c) 2009 HNR Consulting. All rights reserved.
6219820Sjeff *
7219820Sjeff * This software is available to you under a choice of one of two
8219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
9219820Sjeff * General Public License (GPL) Version 2, available from the file
10219820Sjeff * COPYING in the main directory of this source tree, or the
11219820Sjeff * OpenIB.org BSD license below:
12219820Sjeff *
13219820Sjeff *     Redistribution and use in source and binary forms, with or
14219820Sjeff *     without modification, are permitted provided that the following
15219820Sjeff *     conditions are met:
16219820Sjeff *
17219820Sjeff *      - Redistributions of source code must retain the above
18219820Sjeff *        copyright notice, this list of conditions and the following
19219820Sjeff *        disclaimer.
20219820Sjeff *
21219820Sjeff *      - Redistributions in binary form must reproduce the above
22219820Sjeff *        copyright notice, this list of conditions and the following
23219820Sjeff *        disclaimer in the documentation and/or other materials
24219820Sjeff *        provided with the distribution.
25219820Sjeff *
26219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33219820Sjeff * SOFTWARE.
34219820Sjeff *
35219820Sjeff */
36219820Sjeff#include <linux/dma-mapping.h>
37219820Sjeff#include <rdma/ib_cache.h>
38219820Sjeff
39219820Sjeff#include "mad_priv.h"
40219820Sjeff#include "mad_rmpp.h"
41219820Sjeff#include "smi.h"
42219820Sjeff#include "agent.h"
43219820Sjeff
44219820SjeffMODULE_LICENSE("Dual BSD/GPL");
45219820SjeffMODULE_DESCRIPTION("kernel IB MAD API");
46219820SjeffMODULE_AUTHOR("Hal Rosenstock");
47219820SjeffMODULE_AUTHOR("Sean Hefty");
48219820Sjeff
49219820Sjeffint mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50219820Sjeffint mad_recvq_size = IB_MAD_QP_RECV_SIZE;
51219820Sjeff
52219820Sjeffmodule_param_named(send_queue_size, mad_sendq_size, int, 0444);
53219820SjeffMODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54219820Sjeffmodule_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55219820SjeffMODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
56219820Sjeff
57219820Sjeffstatic struct kmem_cache *ib_mad_cache;
58219820Sjeff
59219820Sjeffstatic struct list_head ib_mad_port_list;
60219820Sjeffstatic u32 ib_mad_client_id = 0;
61219820Sjeff
62219820Sjeff/* Port list lock */
63219820Sjeffstatic spinlock_t ib_mad_port_list_lock;
64219820Sjeff
65219820Sjeff
66219820Sjeff/* Forward declarations */
67219820Sjeffstatic int method_in_use(struct ib_mad_mgmt_method_table **method,
68219820Sjeff			 struct ib_mad_reg_req *mad_reg_req);
69219820Sjeffstatic void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70219820Sjeffstatic struct ib_mad_agent_private *find_mad_agent(
71219820Sjeff					struct ib_mad_port_private *port_priv,
72219820Sjeff					struct ib_mad *mad);
73219820Sjeffstatic int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74219820Sjeff				    struct ib_mad_private *mad);
75219820Sjeffstatic void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76219820Sjeffstatic void timeout_sends(struct work_struct *work);
77219820Sjeffstatic void local_completions(struct work_struct *work);
78219820Sjeffstatic int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79219820Sjeff			      struct ib_mad_agent_private *agent_priv,
80219820Sjeff			      u8 mgmt_class);
81219820Sjeffstatic int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82219820Sjeff			   struct ib_mad_agent_private *agent_priv);
83219820Sjeff
84219820Sjeff/*
85219820Sjeff * Returns a ib_mad_port_private structure or NULL for a device/port
86219820Sjeff * Assumes ib_mad_port_list_lock is being held
87219820Sjeff */
88219820Sjeffstatic inline struct ib_mad_port_private *
89219820Sjeff__ib_get_mad_port(struct ib_device *device, int port_num)
90219820Sjeff{
91219820Sjeff	struct ib_mad_port_private *entry;
92219820Sjeff
93219820Sjeff	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
94219820Sjeff		if (entry->device == device && entry->port_num == port_num)
95219820Sjeff			return entry;
96219820Sjeff	}
97219820Sjeff	return NULL;
98219820Sjeff}
99219820Sjeff
100219820Sjeff/*
101219820Sjeff * Wrapper function to return a ib_mad_port_private structure or NULL
102219820Sjeff * for a device/port
103219820Sjeff */
104219820Sjeffstatic inline struct ib_mad_port_private *
105219820Sjeffib_get_mad_port(struct ib_device *device, int port_num)
106219820Sjeff{
107219820Sjeff	struct ib_mad_port_private *entry;
108219820Sjeff	unsigned long flags;
109219820Sjeff
110219820Sjeff	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
111219820Sjeff	entry = __ib_get_mad_port(device, port_num);
112219820Sjeff	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
113219820Sjeff
114219820Sjeff	return entry;
115219820Sjeff}
116219820Sjeff
117219820Sjeffstatic inline u8 convert_mgmt_class(u8 mgmt_class)
118219820Sjeff{
119219820Sjeff	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
120219820Sjeff	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
121219820Sjeff		0 : mgmt_class;
122219820Sjeff}
123219820Sjeff
124219820Sjeffstatic int get_spl_qp_index(enum ib_qp_type qp_type)
125219820Sjeff{
126219820Sjeff	switch (qp_type)
127219820Sjeff	{
128219820Sjeff	case IB_QPT_SMI:
129219820Sjeff		return 0;
130219820Sjeff	case IB_QPT_GSI:
131219820Sjeff		return 1;
132219820Sjeff	default:
133219820Sjeff		return -1;
134219820Sjeff	}
135219820Sjeff}
136219820Sjeff
137219820Sjeffstatic int vendor_class_index(u8 mgmt_class)
138219820Sjeff{
139219820Sjeff	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
140219820Sjeff}
141219820Sjeff
142219820Sjeffstatic int is_vendor_class(u8 mgmt_class)
143219820Sjeff{
144219820Sjeff	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
145219820Sjeff	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
146219820Sjeff		return 0;
147219820Sjeff	return 1;
148219820Sjeff}
149219820Sjeff
150219820Sjeffstatic int is_vendor_oui(char *oui)
151219820Sjeff{
152219820Sjeff	if (oui[0] || oui[1] || oui[2])
153219820Sjeff		return 1;
154219820Sjeff	return 0;
155219820Sjeff}
156219820Sjeff
157219820Sjeffstatic int is_vendor_method_in_use(
158219820Sjeff		struct ib_mad_mgmt_vendor_class *vendor_class,
159219820Sjeff		struct ib_mad_reg_req *mad_reg_req)
160219820Sjeff{
161219820Sjeff	struct ib_mad_mgmt_method_table *method;
162219820Sjeff	int i;
163219820Sjeff
164219820Sjeff	for (i = 0; i < MAX_MGMT_OUI; i++) {
165219820Sjeff		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
166219820Sjeff			method = vendor_class->method_table[i];
167219820Sjeff			if (method) {
168219820Sjeff				if (method_in_use(&method, mad_reg_req))
169219820Sjeff					return 1;
170219820Sjeff				else
171219820Sjeff					break;
172219820Sjeff			}
173219820Sjeff		}
174219820Sjeff	}
175219820Sjeff	return 0;
176219820Sjeff}
177219820Sjeff
178219820Sjeffint ib_response_mad(struct ib_mad *mad)
179219820Sjeff{
180219820Sjeff	return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
181219820Sjeff		(mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
182219820Sjeff		((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
183219820Sjeff		 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
184219820Sjeff}
185219820SjeffEXPORT_SYMBOL(ib_response_mad);
186219820Sjeff
187219820Sjeffstatic void timeout_callback(unsigned long data)
188219820Sjeff{
189219820Sjeff	struct ib_mad_agent_private *mad_agent_priv =
190219820Sjeff		(struct ib_mad_agent_private *) data;
191219820Sjeff
192219820Sjeff	queue_work(mad_agent_priv->qp_info->port_priv->wq,
193219820Sjeff		   &mad_agent_priv->timeout_work);
194219820Sjeff}
195219820Sjeff
196219820Sjeff/*
197219820Sjeff * ib_register_mad_agent - Register to send/receive MADs
198219820Sjeff */
199219820Sjeffstruct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
200219820Sjeff					   u8 port_num,
201219820Sjeff					   enum ib_qp_type qp_type,
202219820Sjeff					   struct ib_mad_reg_req *mad_reg_req,
203219820Sjeff					   u8 rmpp_version,
204219820Sjeff					   ib_mad_send_handler send_handler,
205219820Sjeff					   ib_mad_recv_handler recv_handler,
206219820Sjeff					   void *context)
207219820Sjeff{
208219820Sjeff	struct ib_mad_port_private *port_priv;
209219820Sjeff	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
210219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
211219820Sjeff	struct ib_mad_reg_req *reg_req = NULL;
212219820Sjeff	struct ib_mad_mgmt_class_table *class;
213219820Sjeff	struct ib_mad_mgmt_vendor_class_table *vendor;
214219820Sjeff	struct ib_mad_mgmt_vendor_class *vendor_class;
215219820Sjeff	struct ib_mad_mgmt_method_table *method;
216219820Sjeff	int ret2, qpn;
217219820Sjeff	unsigned long flags;
218219820Sjeff	u8 mgmt_class, vclass;
219219820Sjeff
220219820Sjeff	/* Validate parameters */
221219820Sjeff	qpn = get_spl_qp_index(qp_type);
222219820Sjeff	if (qpn == -1)
223219820Sjeff		goto error1;
224219820Sjeff
225219820Sjeff	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
226219820Sjeff		goto error1;
227219820Sjeff
228219820Sjeff	/* Validate MAD registration request if supplied */
229219820Sjeff	if (mad_reg_req) {
230219820Sjeff		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
231219820Sjeff			goto error1;
232219820Sjeff		if (!recv_handler)
233219820Sjeff			goto error1;
234219820Sjeff		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
235219820Sjeff			/*
236219820Sjeff			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
237219820Sjeff			 * one in this range currently allowed
238219820Sjeff			 */
239219820Sjeff			if (mad_reg_req->mgmt_class !=
240219820Sjeff			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
241219820Sjeff				goto error1;
242219820Sjeff		} else if (mad_reg_req->mgmt_class == 0) {
243219820Sjeff			/*
244219820Sjeff			 * Class 0 is reserved in IBA and is used for
245219820Sjeff			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
246219820Sjeff			 */
247219820Sjeff			goto error1;
248219820Sjeff		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
249219820Sjeff			/*
250219820Sjeff			 * If class is in "new" vendor range,
251219820Sjeff			 * ensure supplied OUI is not zero
252219820Sjeff			 */
253219820Sjeff			if (!is_vendor_oui(mad_reg_req->oui))
254219820Sjeff				goto error1;
255219820Sjeff		}
256219820Sjeff		/* Make sure class supplied is consistent with RMPP */
257219820Sjeff		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
258219820Sjeff			if (rmpp_version)
259219820Sjeff				goto error1;
260219820Sjeff		}
261219820Sjeff		/* Make sure class supplied is consistent with QP type */
262219820Sjeff		if (qp_type == IB_QPT_SMI) {
263219820Sjeff			if ((mad_reg_req->mgmt_class !=
264219820Sjeff					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
265219820Sjeff			    (mad_reg_req->mgmt_class !=
266219820Sjeff					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
267219820Sjeff				goto error1;
268219820Sjeff		} else {
269219820Sjeff			if ((mad_reg_req->mgmt_class ==
270219820Sjeff					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
271219820Sjeff			    (mad_reg_req->mgmt_class ==
272219820Sjeff					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
273219820Sjeff				goto error1;
274219820Sjeff		}
275219820Sjeff	} else {
276219820Sjeff		/* No registration request supplied */
277219820Sjeff		if (!send_handler)
278219820Sjeff			goto error1;
279219820Sjeff	}
280219820Sjeff
281219820Sjeff	/* Validate device and port */
282219820Sjeff	port_priv = ib_get_mad_port(device, port_num);
283219820Sjeff	if (!port_priv) {
284219820Sjeff		ret = ERR_PTR(-ENODEV);
285219820Sjeff		goto error1;
286219820Sjeff	}
287219820Sjeff
288219820Sjeff	/* Allocate structures */
289219820Sjeff	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
290219820Sjeff	if (!mad_agent_priv) {
291219820Sjeff		ret = ERR_PTR(-ENOMEM);
292219820Sjeff		goto error1;
293219820Sjeff	}
294219820Sjeff
295219820Sjeff	mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
296219820Sjeff						 IB_ACCESS_LOCAL_WRITE);
297219820Sjeff	if (IS_ERR(mad_agent_priv->agent.mr)) {
298219820Sjeff		ret = ERR_PTR(-ENOMEM);
299219820Sjeff		goto error2;
300219820Sjeff	}
301219820Sjeff
302219820Sjeff	if (mad_reg_req) {
303219820Sjeff		reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
304219820Sjeff		if (!reg_req) {
305219820Sjeff			ret = ERR_PTR(-ENOMEM);
306219820Sjeff			goto error3;
307219820Sjeff		}
308219820Sjeff		/* Make a copy of the MAD registration request */
309219820Sjeff		memcpy(reg_req, mad_reg_req, sizeof *reg_req);
310219820Sjeff	}
311219820Sjeff
312219820Sjeff	/* Now, fill in the various structures */
313219820Sjeff	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
314219820Sjeff	mad_agent_priv->reg_req = reg_req;
315219820Sjeff	mad_agent_priv->agent.rmpp_version = rmpp_version;
316219820Sjeff	mad_agent_priv->agent.device = device;
317219820Sjeff	mad_agent_priv->agent.recv_handler = recv_handler;
318219820Sjeff	mad_agent_priv->agent.send_handler = send_handler;
319219820Sjeff	mad_agent_priv->agent.context = context;
320219820Sjeff	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
321219820Sjeff	mad_agent_priv->agent.port_num = port_num;
322219820Sjeff	spin_lock_init(&mad_agent_priv->lock);
323219820Sjeff	INIT_LIST_HEAD(&mad_agent_priv->send_list);
324219820Sjeff	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
325219820Sjeff	INIT_LIST_HEAD(&mad_agent_priv->done_list);
326219820Sjeff	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
327219820Sjeff	INIT_WORK(&mad_agent_priv->timeout_work, timeout_sends);
328219820Sjeff	setup_timer(&mad_agent_priv->timeout_timer, timeout_callback,
329219820Sjeff		    (unsigned long) mad_agent_priv);
330219820Sjeff	INIT_LIST_HEAD(&mad_agent_priv->local_list);
331219820Sjeff	INIT_WORK(&mad_agent_priv->local_work, local_completions);
332219820Sjeff	atomic_set(&mad_agent_priv->refcount, 1);
333219820Sjeff	init_completion(&mad_agent_priv->comp);
334219820Sjeff
335219820Sjeff	spin_lock_irqsave(&port_priv->reg_lock, flags);
336219820Sjeff	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
337219820Sjeff
338219820Sjeff	/*
339219820Sjeff	 * Make sure MAD registration (if supplied)
340219820Sjeff	 * is non overlapping with any existing ones
341219820Sjeff	 */
342219820Sjeff	if (mad_reg_req) {
343219820Sjeff		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
344219820Sjeff		if (!is_vendor_class(mgmt_class)) {
345219820Sjeff			class = port_priv->version[mad_reg_req->
346219820Sjeff						   mgmt_class_version].class;
347219820Sjeff			if (class) {
348219820Sjeff				method = class->method_table[mgmt_class];
349219820Sjeff				if (method) {
350219820Sjeff					if (method_in_use(&method,
351219820Sjeff							   mad_reg_req))
352219820Sjeff						goto error4;
353219820Sjeff				}
354219820Sjeff			}
355219820Sjeff			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
356219820Sjeff						  mgmt_class);
357219820Sjeff		} else {
358219820Sjeff			/* "New" vendor class range */
359219820Sjeff			vendor = port_priv->version[mad_reg_req->
360219820Sjeff						    mgmt_class_version].vendor;
361219820Sjeff			if (vendor) {
362219820Sjeff				vclass = vendor_class_index(mgmt_class);
363219820Sjeff				vendor_class = vendor->vendor_class[vclass];
364219820Sjeff				if (vendor_class) {
365219820Sjeff					if (is_vendor_method_in_use(
366219820Sjeff							vendor_class,
367219820Sjeff							mad_reg_req))
368219820Sjeff						goto error4;
369219820Sjeff				}
370219820Sjeff			}
371219820Sjeff			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
372219820Sjeff		}
373219820Sjeff		if (ret2) {
374219820Sjeff			ret = ERR_PTR(ret2);
375219820Sjeff			goto error4;
376219820Sjeff		}
377219820Sjeff	}
378219820Sjeff
379219820Sjeff	/* Add mad agent into port's agent list */
380219820Sjeff	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
381219820Sjeff	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
382219820Sjeff
383219820Sjeff	return &mad_agent_priv->agent;
384219820Sjeff
385219820Sjefferror4:
386219820Sjeff	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
387219820Sjeff	kfree(reg_req);
388219820Sjefferror3:
389219820Sjeff	ib_dereg_mr(mad_agent_priv->agent.mr);
390219820Sjefferror2:
391219820Sjeff	kfree(mad_agent_priv);
392219820Sjefferror1:
393219820Sjeff	return ret;
394219820Sjeff}
395219820SjeffEXPORT_SYMBOL(ib_register_mad_agent);
396219820Sjeff
397219820Sjeffstatic inline int is_snooping_sends(int mad_snoop_flags)
398219820Sjeff{
399219820Sjeff	return (mad_snoop_flags &
400219820Sjeff		(/*IB_MAD_SNOOP_POSTED_SENDS |
401219820Sjeff		 IB_MAD_SNOOP_RMPP_SENDS |*/
402219820Sjeff		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
403219820Sjeff		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
404219820Sjeff}
405219820Sjeff
406219820Sjeffstatic inline int is_snooping_recvs(int mad_snoop_flags)
407219820Sjeff{
408219820Sjeff	return (mad_snoop_flags &
409219820Sjeff		(IB_MAD_SNOOP_RECVS /*|
410219820Sjeff		 IB_MAD_SNOOP_RMPP_RECVS*/));
411219820Sjeff}
412219820Sjeff
413219820Sjeffstatic int register_snoop_agent(struct ib_mad_qp_info *qp_info,
414219820Sjeff				struct ib_mad_snoop_private *mad_snoop_priv)
415219820Sjeff{
416219820Sjeff	struct ib_mad_snoop_private **new_snoop_table;
417219820Sjeff	unsigned long flags;
418219820Sjeff	int i;
419219820Sjeff
420219820Sjeff	spin_lock_irqsave(&qp_info->snoop_lock, flags);
421219820Sjeff	/* Check for empty slot in array. */
422219820Sjeff	for (i = 0; i < qp_info->snoop_table_size; i++)
423219820Sjeff		if (!qp_info->snoop_table[i])
424219820Sjeff			break;
425219820Sjeff
426219820Sjeff	if (i == qp_info->snoop_table_size) {
427219820Sjeff		/* Grow table. */
428219820Sjeff		new_snoop_table = krealloc(qp_info->snoop_table,
429219820Sjeff					   sizeof mad_snoop_priv *
430219820Sjeff					   (qp_info->snoop_table_size + 1),
431219820Sjeff					   GFP_ATOMIC);
432219820Sjeff		if (!new_snoop_table) {
433219820Sjeff			i = -ENOMEM;
434219820Sjeff			goto out;
435219820Sjeff		}
436219820Sjeff
437219820Sjeff		qp_info->snoop_table = new_snoop_table;
438219820Sjeff		qp_info->snoop_table_size++;
439219820Sjeff	}
440219820Sjeff	qp_info->snoop_table[i] = mad_snoop_priv;
441219820Sjeff	atomic_inc(&qp_info->snoop_count);
442219820Sjeffout:
443219820Sjeff	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
444219820Sjeff	return i;
445219820Sjeff}
446219820Sjeff
447219820Sjeffstruct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
448219820Sjeff					   u8 port_num,
449219820Sjeff					   enum ib_qp_type qp_type,
450219820Sjeff					   int mad_snoop_flags,
451219820Sjeff					   ib_mad_snoop_handler snoop_handler,
452219820Sjeff					   ib_mad_recv_handler recv_handler,
453219820Sjeff					   void *context)
454219820Sjeff{
455219820Sjeff	struct ib_mad_port_private *port_priv;
456219820Sjeff	struct ib_mad_agent *ret;
457219820Sjeff	struct ib_mad_snoop_private *mad_snoop_priv;
458219820Sjeff	int qpn;
459219820Sjeff
460219820Sjeff	/* Validate parameters */
461219820Sjeff	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
462219820Sjeff	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
463219820Sjeff		ret = ERR_PTR(-EINVAL);
464219820Sjeff		goto error1;
465219820Sjeff	}
466219820Sjeff	qpn = get_spl_qp_index(qp_type);
467219820Sjeff	if (qpn == -1) {
468219820Sjeff		ret = ERR_PTR(-EINVAL);
469219820Sjeff		goto error1;
470219820Sjeff	}
471219820Sjeff	port_priv = ib_get_mad_port(device, port_num);
472219820Sjeff	if (!port_priv) {
473219820Sjeff		ret = ERR_PTR(-ENODEV);
474219820Sjeff		goto error1;
475219820Sjeff	}
476219820Sjeff	/* Allocate structures */
477219820Sjeff	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
478219820Sjeff	if (!mad_snoop_priv) {
479219820Sjeff		ret = ERR_PTR(-ENOMEM);
480219820Sjeff		goto error1;
481219820Sjeff	}
482219820Sjeff
483219820Sjeff	/* Now, fill in the various structures */
484219820Sjeff	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
485219820Sjeff	mad_snoop_priv->agent.device = device;
486219820Sjeff	mad_snoop_priv->agent.recv_handler = recv_handler;
487219820Sjeff	mad_snoop_priv->agent.snoop_handler = snoop_handler;
488219820Sjeff	mad_snoop_priv->agent.context = context;
489219820Sjeff	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
490219820Sjeff	mad_snoop_priv->agent.port_num = port_num;
491219820Sjeff	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
492219820Sjeff	init_completion(&mad_snoop_priv->comp);
493219820Sjeff	mad_snoop_priv->snoop_index = register_snoop_agent(
494219820Sjeff						&port_priv->qp_info[qpn],
495219820Sjeff						mad_snoop_priv);
496219820Sjeff	if (mad_snoop_priv->snoop_index < 0) {
497219820Sjeff		ret = ERR_PTR(mad_snoop_priv->snoop_index);
498219820Sjeff		goto error2;
499219820Sjeff	}
500219820Sjeff
501219820Sjeff	atomic_set(&mad_snoop_priv->refcount, 1);
502219820Sjeff	return &mad_snoop_priv->agent;
503219820Sjeff
504219820Sjefferror2:
505219820Sjeff	kfree(mad_snoop_priv);
506219820Sjefferror1:
507219820Sjeff	return ret;
508219820Sjeff}
509219820SjeffEXPORT_SYMBOL(ib_register_mad_snoop);
510219820Sjeff
511219820Sjeffstatic inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
512219820Sjeff{
513219820Sjeff	if (atomic_dec_and_test(&mad_agent_priv->refcount))
514219820Sjeff		complete(&mad_agent_priv->comp);
515219820Sjeff}
516219820Sjeff
517219820Sjeffstatic inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
518219820Sjeff{
519219820Sjeff	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
520219820Sjeff		complete(&mad_snoop_priv->comp);
521219820Sjeff}
522219820Sjeff
523219820Sjeffstatic void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
524219820Sjeff{
525219820Sjeff	struct ib_mad_port_private *port_priv;
526219820Sjeff	unsigned long flags;
527219820Sjeff
528219820Sjeff	/* Note that we could still be handling received MADs */
529219820Sjeff
530219820Sjeff	/*
531219820Sjeff	 * Canceling all sends results in dropping received response
532219820Sjeff	 * MADs, preventing us from queuing additional work
533219820Sjeff	 */
534219820Sjeff	cancel_mads(mad_agent_priv);
535219820Sjeff	port_priv = mad_agent_priv->qp_info->port_priv;
536219820Sjeff	del_timer_sync(&mad_agent_priv->timeout_timer);
537219820Sjeff	cancel_work_sync(&mad_agent_priv->timeout_work);
538219820Sjeff
539219820Sjeff	spin_lock_irqsave(&port_priv->reg_lock, flags);
540219820Sjeff	remove_mad_reg_req(mad_agent_priv);
541219820Sjeff	list_del(&mad_agent_priv->agent_list);
542219820Sjeff	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
543219820Sjeff
544219820Sjeff	flush_workqueue(port_priv->wq);
545219820Sjeff	ib_cancel_rmpp_recvs(mad_agent_priv);
546219820Sjeff
547219820Sjeff	deref_mad_agent(mad_agent_priv);
548219820Sjeff	wait_for_completion(&mad_agent_priv->comp);
549219820Sjeff
550219820Sjeff	kfree(mad_agent_priv->reg_req);
551219820Sjeff	ib_dereg_mr(mad_agent_priv->agent.mr);
552219820Sjeff	kfree(mad_agent_priv);
553219820Sjeff}
554219820Sjeff
555219820Sjeffstatic void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
556219820Sjeff{
557219820Sjeff	struct ib_mad_qp_info *qp_info;
558219820Sjeff	unsigned long flags;
559219820Sjeff
560219820Sjeff	qp_info = mad_snoop_priv->qp_info;
561219820Sjeff	spin_lock_irqsave(&qp_info->snoop_lock, flags);
562219820Sjeff	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
563219820Sjeff	atomic_dec(&qp_info->snoop_count);
564219820Sjeff	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
565219820Sjeff
566219820Sjeff	deref_snoop_agent(mad_snoop_priv);
567219820Sjeff	wait_for_completion(&mad_snoop_priv->comp);
568219820Sjeff
569219820Sjeff	kfree(mad_snoop_priv);
570219820Sjeff}
571219820Sjeff
572219820Sjeff/*
573219820Sjeff * ib_unregister_mad_agent - Unregisters a client from using MAD services
574219820Sjeff */
575219820Sjeffint ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
576219820Sjeff{
577219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
578219820Sjeff	struct ib_mad_snoop_private *mad_snoop_priv;
579219820Sjeff
580219820Sjeff	/* If the TID is zero, the agent can only snoop. */
581219820Sjeff	if (mad_agent->hi_tid) {
582219820Sjeff		mad_agent_priv = container_of(mad_agent,
583219820Sjeff					      struct ib_mad_agent_private,
584219820Sjeff					      agent);
585219820Sjeff		unregister_mad_agent(mad_agent_priv);
586219820Sjeff	} else {
587219820Sjeff		mad_snoop_priv = container_of(mad_agent,
588219820Sjeff					      struct ib_mad_snoop_private,
589219820Sjeff					      agent);
590219820Sjeff		unregister_mad_snoop(mad_snoop_priv);
591219820Sjeff	}
592219820Sjeff	return 0;
593219820Sjeff}
594219820SjeffEXPORT_SYMBOL(ib_unregister_mad_agent);
595219820Sjeff
596219820Sjeffstatic void dequeue_mad(struct ib_mad_list_head *mad_list)
597219820Sjeff{
598219820Sjeff	struct ib_mad_queue *mad_queue;
599219820Sjeff	unsigned long flags;
600219820Sjeff
601219820Sjeff	BUG_ON(!mad_list->mad_queue);
602219820Sjeff	mad_queue = mad_list->mad_queue;
603219820Sjeff	spin_lock_irqsave(&mad_queue->lock, flags);
604219820Sjeff	list_del(&mad_list->list);
605219820Sjeff	mad_queue->count--;
606219820Sjeff	spin_unlock_irqrestore(&mad_queue->lock, flags);
607219820Sjeff}
608219820Sjeff
609219820Sjeffstatic void snoop_send(struct ib_mad_qp_info *qp_info,
610219820Sjeff		       struct ib_mad_send_buf *send_buf,
611219820Sjeff		       struct ib_mad_send_wc *mad_send_wc,
612219820Sjeff		       int mad_snoop_flags)
613219820Sjeff{
614219820Sjeff	struct ib_mad_snoop_private *mad_snoop_priv;
615219820Sjeff	unsigned long flags;
616219820Sjeff	int i;
617219820Sjeff
618219820Sjeff	spin_lock_irqsave(&qp_info->snoop_lock, flags);
619219820Sjeff	for (i = 0; i < qp_info->snoop_table_size; i++) {
620219820Sjeff		mad_snoop_priv = qp_info->snoop_table[i];
621219820Sjeff		if (!mad_snoop_priv ||
622219820Sjeff		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
623219820Sjeff			continue;
624219820Sjeff
625219820Sjeff		atomic_inc(&mad_snoop_priv->refcount);
626219820Sjeff		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627219820Sjeff		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
628219820Sjeff						    send_buf, mad_send_wc);
629219820Sjeff		deref_snoop_agent(mad_snoop_priv);
630219820Sjeff		spin_lock_irqsave(&qp_info->snoop_lock, flags);
631219820Sjeff	}
632219820Sjeff	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
633219820Sjeff}
634219820Sjeff
635219820Sjeffstatic void snoop_recv(struct ib_mad_qp_info *qp_info,
636219820Sjeff		       struct ib_mad_recv_wc *mad_recv_wc,
637219820Sjeff		       int mad_snoop_flags)
638219820Sjeff{
639219820Sjeff	struct ib_mad_snoop_private *mad_snoop_priv;
640219820Sjeff	unsigned long flags;
641219820Sjeff	int i;
642219820Sjeff
643219820Sjeff	spin_lock_irqsave(&qp_info->snoop_lock, flags);
644219820Sjeff	for (i = 0; i < qp_info->snoop_table_size; i++) {
645219820Sjeff		mad_snoop_priv = qp_info->snoop_table[i];
646219820Sjeff		if (!mad_snoop_priv ||
647219820Sjeff		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
648219820Sjeff			continue;
649219820Sjeff
650219820Sjeff		atomic_inc(&mad_snoop_priv->refcount);
651219820Sjeff		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
652219820Sjeff		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
653219820Sjeff						   mad_recv_wc);
654219820Sjeff		deref_snoop_agent(mad_snoop_priv);
655219820Sjeff		spin_lock_irqsave(&qp_info->snoop_lock, flags);
656219820Sjeff	}
657219820Sjeff	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
658219820Sjeff}
659219820Sjeff
660219820Sjeffstatic void build_smp_wc(struct ib_qp *qp,
661219820Sjeff			 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
662219820Sjeff			 struct ib_wc *wc)
663219820Sjeff{
664219820Sjeff	memset(wc, 0, sizeof *wc);
665219820Sjeff	wc->wr_id = wr_id;
666219820Sjeff	wc->status = IB_WC_SUCCESS;
667219820Sjeff	wc->opcode = IB_WC_RECV;
668219820Sjeff	wc->pkey_index = pkey_index;
669219820Sjeff	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
670219820Sjeff	wc->src_qp = IB_QP0;
671219820Sjeff	wc->qp = qp;
672219820Sjeff	wc->slid = slid;
673219820Sjeff	wc->sl = 0;
674219820Sjeff	wc->dlid_path_bits = 0;
675219820Sjeff	wc->port_num = port_num;
676219820Sjeff}
677219820Sjeff
678219820Sjeff/*
679219820Sjeff * Return 0 if SMP is to be sent
680219820Sjeff * Return 1 if SMP was consumed locally (whether or not solicited)
681219820Sjeff * Return < 0 if error
682219820Sjeff */
683219820Sjeffstatic int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
684219820Sjeff				  struct ib_mad_send_wr_private *mad_send_wr)
685219820Sjeff{
686219820Sjeff	int ret = 0;
687219820Sjeff	struct ib_smp *smp = mad_send_wr->send_buf.mad;
688219820Sjeff	unsigned long flags;
689219820Sjeff	struct ib_mad_local_private *local;
690219820Sjeff	struct ib_mad_private *mad_priv;
691219820Sjeff	struct ib_mad_port_private *port_priv;
692219820Sjeff	struct ib_mad_agent_private *recv_mad_agent = NULL;
693219820Sjeff	struct ib_device *device = mad_agent_priv->agent.device;
694219820Sjeff	u8 port_num;
695219820Sjeff	struct ib_wc mad_wc;
696219820Sjeff	struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
697219820Sjeff
698219820Sjeff	if (device->node_type == RDMA_NODE_IB_SWITCH)
699219820Sjeff		port_num = send_wr->wr.ud.port_num;
700219820Sjeff	else
701219820Sjeff		port_num = mad_agent_priv->agent.port_num;
702219820Sjeff
703219820Sjeff	/*
704219820Sjeff	 * Directed route handling starts if the initial LID routed part of
705219820Sjeff	 * a request or the ending LID routed part of a response is empty.
706219820Sjeff	 * If we are at the start of the LID routed part, don't update the
707219820Sjeff	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
708219820Sjeff	 */
709219820Sjeff	if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) !=
710219820Sjeff	     IB_LID_PERMISSIVE)
711219820Sjeff		goto out;
712219820Sjeff	if (smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
713219820Sjeff	     IB_SMI_DISCARD) {
714219820Sjeff		ret = -EINVAL;
715219820Sjeff		printk(KERN_ERR PFX "Invalid directed route\n");
716219820Sjeff		goto out;
717219820Sjeff	}
718219820Sjeff
719219820Sjeff	/* Check to post send on QP or process locally */
720219820Sjeff	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
721219820Sjeff	    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
722219820Sjeff		goto out;
723219820Sjeff
724219820Sjeff	local = kmalloc(sizeof *local, GFP_ATOMIC);
725219820Sjeff	if (!local) {
726219820Sjeff		ret = -ENOMEM;
727219820Sjeff		printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
728219820Sjeff		goto out;
729219820Sjeff	}
730219820Sjeff	local->mad_priv = NULL;
731219820Sjeff	local->recv_mad_agent = NULL;
732219820Sjeff	mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
733219820Sjeff	if (!mad_priv) {
734219820Sjeff		ret = -ENOMEM;
735219820Sjeff		printk(KERN_ERR PFX "No memory for local response MAD\n");
736219820Sjeff		kfree(local);
737219820Sjeff		goto out;
738219820Sjeff	}
739219820Sjeff
740219820Sjeff	build_smp_wc(mad_agent_priv->agent.qp,
741219820Sjeff		     send_wr->wr_id, be16_to_cpu(smp->dr_slid),
742219820Sjeff		     send_wr->wr.ud.pkey_index,
743219820Sjeff		     send_wr->wr.ud.port_num, &mad_wc);
744219820Sjeff
745219820Sjeff	/* No GRH for DR SMP */
746219820Sjeff	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
747219820Sjeff				  (struct ib_mad *)smp,
748219820Sjeff				  (struct ib_mad *)&mad_priv->mad);
749219820Sjeff	switch (ret)
750219820Sjeff	{
751219820Sjeff	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
752219820Sjeff		if (ib_response_mad(&mad_priv->mad.mad) &&
753219820Sjeff		    mad_agent_priv->agent.recv_handler) {
754219820Sjeff			local->mad_priv = mad_priv;
755219820Sjeff			local->recv_mad_agent = mad_agent_priv;
756219820Sjeff			/*
757219820Sjeff			 * Reference MAD agent until receive
758219820Sjeff			 * side of local completion handled
759219820Sjeff			 */
760219820Sjeff			atomic_inc(&mad_agent_priv->refcount);
761219820Sjeff		} else
762219820Sjeff			kmem_cache_free(ib_mad_cache, mad_priv);
763219820Sjeff		break;
764219820Sjeff	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
765219820Sjeff		kmem_cache_free(ib_mad_cache, mad_priv);
766219820Sjeff		break;
767219820Sjeff	case IB_MAD_RESULT_SUCCESS:
768219820Sjeff		/* Treat like an incoming receive MAD */
769219820Sjeff		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
770219820Sjeff					    mad_agent_priv->agent.port_num);
771219820Sjeff		if (port_priv) {
772219820Sjeff			memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
773219820Sjeff			recv_mad_agent = find_mad_agent(port_priv,
774219820Sjeff						        &mad_priv->mad.mad);
775219820Sjeff		}
776219820Sjeff		if (!port_priv || !recv_mad_agent) {
777219820Sjeff			/*
778219820Sjeff			 * No receiving agent so drop packet and
779219820Sjeff			 * generate send completion.
780219820Sjeff			 */
781219820Sjeff			kmem_cache_free(ib_mad_cache, mad_priv);
782219820Sjeff			break;
783219820Sjeff		}
784219820Sjeff		local->mad_priv = mad_priv;
785219820Sjeff		local->recv_mad_agent = recv_mad_agent;
786219820Sjeff		break;
787219820Sjeff	default:
788219820Sjeff		kmem_cache_free(ib_mad_cache, mad_priv);
789219820Sjeff		kfree(local);
790219820Sjeff		ret = -EINVAL;
791219820Sjeff		goto out;
792219820Sjeff	}
793219820Sjeff
794219820Sjeff	local->mad_send_wr = mad_send_wr;
795219820Sjeff	/* Reference MAD agent until send side of local completion handled */
796219820Sjeff	atomic_inc(&mad_agent_priv->refcount);
797219820Sjeff	/* Queue local completion to local list */
798219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
799219820Sjeff	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
800219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
801219820Sjeff	queue_work(mad_agent_priv->qp_info->port_priv->wq,
802219820Sjeff		   &mad_agent_priv->local_work);
803219820Sjeff	ret = 1;
804219820Sjeffout:
805219820Sjeff	return ret;
806219820Sjeff}
807219820Sjeff
808219820Sjeffstatic int get_pad_size(int hdr_len, int data_len)
809219820Sjeff{
810219820Sjeff	int seg_size, pad;
811219820Sjeff
812219820Sjeff	seg_size = sizeof(struct ib_mad) - hdr_len;
813219820Sjeff	if (data_len && seg_size) {
814219820Sjeff		pad = seg_size - data_len % seg_size;
815219820Sjeff		return pad == seg_size ? 0 : pad;
816219820Sjeff	} else
817219820Sjeff		return seg_size;
818219820Sjeff}
819219820Sjeff
820219820Sjeffstatic void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
821219820Sjeff{
822219820Sjeff	struct ib_rmpp_segment *s, *t;
823219820Sjeff
824219820Sjeff	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
825219820Sjeff		list_del(&s->list);
826219820Sjeff		kfree(s);
827219820Sjeff	}
828219820Sjeff}
829219820Sjeff
830219820Sjeffstatic int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
831219820Sjeff				gfp_t gfp_mask)
832219820Sjeff{
833219820Sjeff	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
834219820Sjeff	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
835219820Sjeff	struct ib_rmpp_segment *seg = NULL;
836219820Sjeff	int left, seg_size, pad;
837219820Sjeff
838219820Sjeff	send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
839219820Sjeff	seg_size = send_buf->seg_size;
840219820Sjeff	pad = send_wr->pad;
841219820Sjeff
842219820Sjeff	/* Allocate data segments. */
843219820Sjeff	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
844219820Sjeff		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
845219820Sjeff		if (!seg) {
846219820Sjeff			printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
847219820Sjeff			       "alloc failed for len %zd, gfp %#x\n",
848219820Sjeff			       sizeof (*seg) + seg_size, gfp_mask);
849219820Sjeff			free_send_rmpp_list(send_wr);
850219820Sjeff			return -ENOMEM;
851219820Sjeff		}
852219820Sjeff		seg->num = ++send_buf->seg_count;
853219820Sjeff		list_add_tail(&seg->list, &send_wr->rmpp_list);
854219820Sjeff	}
855219820Sjeff
856219820Sjeff	/* Zero any padding */
857219820Sjeff	if (pad)
858219820Sjeff		memset(seg->data + seg_size - pad, 0, pad);
859219820Sjeff
860219820Sjeff	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
861219820Sjeff					  agent.rmpp_version;
862219820Sjeff	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
863219820Sjeff	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
864219820Sjeff
865219820Sjeff	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
866219820Sjeff					struct ib_rmpp_segment, list);
867219820Sjeff	send_wr->last_ack_seg = send_wr->cur_seg;
868219820Sjeff	return 0;
869219820Sjeff}
870219820Sjeff
871219820Sjeffstruct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
872219820Sjeff					    u32 remote_qpn, u16 pkey_index,
873219820Sjeff					    int rmpp_active,
874219820Sjeff					    int hdr_len, int data_len,
875219820Sjeff					    gfp_t gfp_mask)
876219820Sjeff{
877219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
878219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
879219820Sjeff	int pad, message_size, ret, size;
880219820Sjeff	void *buf;
881219820Sjeff
882219820Sjeff	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
883219820Sjeff				      agent);
884219820Sjeff	pad = get_pad_size(hdr_len, data_len);
885219820Sjeff	message_size = hdr_len + data_len + pad;
886219820Sjeff
887219820Sjeff	if ((!mad_agent->rmpp_version &&
888219820Sjeff	     (rmpp_active || message_size > sizeof(struct ib_mad))) ||
889219820Sjeff	    (!rmpp_active && message_size > sizeof(struct ib_mad)))
890219820Sjeff		return ERR_PTR(-EINVAL);
891219820Sjeff
892219820Sjeff	size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
893219820Sjeff	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
894219820Sjeff	if (!buf)
895219820Sjeff		return ERR_PTR(-ENOMEM);
896219820Sjeff
897219820Sjeff	mad_send_wr = buf + size;
898219820Sjeff	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
899219820Sjeff	mad_send_wr->send_buf.mad = buf;
900219820Sjeff	mad_send_wr->send_buf.hdr_len = hdr_len;
901219820Sjeff	mad_send_wr->send_buf.data_len = data_len;
902219820Sjeff	mad_send_wr->pad = pad;
903219820Sjeff
904219820Sjeff	mad_send_wr->mad_agent_priv = mad_agent_priv;
905219820Sjeff	mad_send_wr->sg_list[0].length = hdr_len;
906219820Sjeff	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
907219820Sjeff	mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
908219820Sjeff	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
909219820Sjeff
910219820Sjeff	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
911219820Sjeff	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
912219820Sjeff	mad_send_wr->send_wr.num_sge = 2;
913219820Sjeff	mad_send_wr->send_wr.opcode = IB_WR_SEND;
914219820Sjeff	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
915219820Sjeff	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
916219820Sjeff	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
917219820Sjeff	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
918219820Sjeff
919219820Sjeff	if (rmpp_active) {
920219820Sjeff		ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
921219820Sjeff		if (ret) {
922219820Sjeff			kfree(buf);
923219820Sjeff			return ERR_PTR(ret);
924219820Sjeff		}
925219820Sjeff	}
926219820Sjeff
927219820Sjeff	mad_send_wr->send_buf.mad_agent = mad_agent;
928219820Sjeff	atomic_inc(&mad_agent_priv->refcount);
929219820Sjeff	return &mad_send_wr->send_buf;
930219820Sjeff}
931219820SjeffEXPORT_SYMBOL(ib_create_send_mad);
932219820Sjeff
933219820Sjeffint ib_get_mad_data_offset(u8 mgmt_class)
934219820Sjeff{
935219820Sjeff	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
936219820Sjeff		return IB_MGMT_SA_HDR;
937219820Sjeff	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
938219820Sjeff		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
939219820Sjeff		 (mgmt_class == IB_MGMT_CLASS_BIS))
940219820Sjeff		return IB_MGMT_DEVICE_HDR;
941219820Sjeff	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
942219820Sjeff		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
943219820Sjeff		return IB_MGMT_VENDOR_HDR;
944219820Sjeff	else
945219820Sjeff		return IB_MGMT_MAD_HDR;
946219820Sjeff}
947219820SjeffEXPORT_SYMBOL(ib_get_mad_data_offset);
948219820Sjeff
949219820Sjeffint ib_is_mad_class_rmpp(u8 mgmt_class)
950219820Sjeff{
951219820Sjeff	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
952219820Sjeff	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
953219820Sjeff	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
954219820Sjeff	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
955219820Sjeff	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
956219820Sjeff	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
957219820Sjeff		return 1;
958219820Sjeff	return 0;
959219820Sjeff}
960219820SjeffEXPORT_SYMBOL(ib_is_mad_class_rmpp);
961219820Sjeff
962219820Sjeffvoid *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
963219820Sjeff{
964219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
965219820Sjeff	struct list_head *list;
966219820Sjeff
967219820Sjeff	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
968219820Sjeff				   send_buf);
969219820Sjeff	list = &mad_send_wr->cur_seg->list;
970219820Sjeff
971219820Sjeff	if (mad_send_wr->cur_seg->num < seg_num) {
972219820Sjeff		list_for_each_entry(mad_send_wr->cur_seg, list, list)
973219820Sjeff			if (mad_send_wr->cur_seg->num == seg_num)
974219820Sjeff				break;
975219820Sjeff	} else if (mad_send_wr->cur_seg->num > seg_num) {
976219820Sjeff		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
977219820Sjeff			if (mad_send_wr->cur_seg->num == seg_num)
978219820Sjeff				break;
979219820Sjeff	}
980219820Sjeff	return mad_send_wr->cur_seg->data;
981219820Sjeff}
982219820SjeffEXPORT_SYMBOL(ib_get_rmpp_segment);
983219820Sjeff
984219820Sjeffstatic inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
985219820Sjeff{
986219820Sjeff	if (mad_send_wr->send_buf.seg_count)
987219820Sjeff		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
988219820Sjeff					   mad_send_wr->seg_num);
989219820Sjeff	else
990219820Sjeff		return mad_send_wr->send_buf.mad +
991219820Sjeff		       mad_send_wr->send_buf.hdr_len;
992219820Sjeff}
993219820Sjeff
994219820Sjeffvoid ib_free_send_mad(struct ib_mad_send_buf *send_buf)
995219820Sjeff{
996219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
997219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
998219820Sjeff
999219820Sjeff	mad_agent_priv = container_of(send_buf->mad_agent,
1000219820Sjeff				      struct ib_mad_agent_private, agent);
1001219820Sjeff	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1002219820Sjeff				   send_buf);
1003219820Sjeff
1004219820Sjeff	free_send_rmpp_list(mad_send_wr);
1005219820Sjeff	kfree(send_buf->mad);
1006219820Sjeff	deref_mad_agent(mad_agent_priv);
1007219820Sjeff}
1008219820SjeffEXPORT_SYMBOL(ib_free_send_mad);
1009219820Sjeff
1010219820Sjeffint ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1011219820Sjeff{
1012219820Sjeff	struct ib_mad_qp_info *qp_info;
1013219820Sjeff	struct list_head *list;
1014219820Sjeff	struct ib_send_wr *bad_send_wr;
1015219820Sjeff	struct ib_mad_agent *mad_agent;
1016219820Sjeff	struct ib_sge *sge;
1017219820Sjeff	unsigned long flags;
1018219820Sjeff	int ret;
1019219820Sjeff
1020219820Sjeff	/* Set WR ID to find mad_send_wr upon completion */
1021219820Sjeff	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1022219820Sjeff	mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1023219820Sjeff	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1024219820Sjeff
1025219820Sjeff	mad_agent = mad_send_wr->send_buf.mad_agent;
1026219820Sjeff	sge = mad_send_wr->sg_list;
1027219820Sjeff	sge[0].addr = ib_dma_map_single(mad_agent->device,
1028219820Sjeff					mad_send_wr->send_buf.mad,
1029219820Sjeff					sge[0].length,
1030219820Sjeff					DMA_TO_DEVICE);
1031219820Sjeff	mad_send_wr->header_mapping = sge[0].addr;
1032219820Sjeff
1033219820Sjeff	sge[1].addr = ib_dma_map_single(mad_agent->device,
1034219820Sjeff					ib_get_payload(mad_send_wr),
1035219820Sjeff					sge[1].length,
1036219820Sjeff					DMA_TO_DEVICE);
1037219820Sjeff	mad_send_wr->payload_mapping = sge[1].addr;
1038219820Sjeff
1039219820Sjeff	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1040219820Sjeff	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1041219820Sjeff		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1042219820Sjeff				   &bad_send_wr);
1043219820Sjeff		list = &qp_info->send_queue.list;
1044219820Sjeff	} else {
1045219820Sjeff		ret = 0;
1046219820Sjeff		list = &qp_info->overflow_list;
1047219820Sjeff	}
1048219820Sjeff
1049219820Sjeff	if (!ret) {
1050219820Sjeff		qp_info->send_queue.count++;
1051219820Sjeff		list_add_tail(&mad_send_wr->mad_list.list, list);
1052219820Sjeff	}
1053219820Sjeff	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1054219820Sjeff	if (ret) {
1055219820Sjeff		ib_dma_unmap_single(mad_agent->device,
1056219820Sjeff				    mad_send_wr->header_mapping,
1057219820Sjeff				    sge[0].length, DMA_TO_DEVICE);
1058219820Sjeff		ib_dma_unmap_single(mad_agent->device,
1059219820Sjeff				    mad_send_wr->payload_mapping,
1060219820Sjeff				    sge[1].length, DMA_TO_DEVICE);
1061219820Sjeff	}
1062219820Sjeff	return ret;
1063219820Sjeff}
1064219820Sjeff
1065219820Sjeff/*
1066219820Sjeff * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1067219820Sjeff *  with the registered client
1068219820Sjeff */
1069219820Sjeffint ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1070219820Sjeff		     struct ib_mad_send_buf **bad_send_buf)
1071219820Sjeff{
1072219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
1073219820Sjeff	struct ib_mad_send_buf *next_send_buf;
1074219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
1075219820Sjeff	unsigned long flags;
1076219820Sjeff	int ret = -EINVAL;
1077219820Sjeff
1078219820Sjeff	/* Walk list of send WRs and post each on send list */
1079219820Sjeff	for (; send_buf; send_buf = next_send_buf) {
1080219820Sjeff
1081219820Sjeff		mad_send_wr = container_of(send_buf,
1082219820Sjeff					   struct ib_mad_send_wr_private,
1083219820Sjeff					   send_buf);
1084219820Sjeff		mad_agent_priv = mad_send_wr->mad_agent_priv;
1085219820Sjeff
1086219820Sjeff		if (!send_buf->mad_agent->send_handler ||
1087219820Sjeff		    (send_buf->timeout_ms &&
1088219820Sjeff		     !send_buf->mad_agent->recv_handler)) {
1089219820Sjeff			ret = -EINVAL;
1090219820Sjeff			goto error;
1091219820Sjeff		}
1092219820Sjeff
1093219820Sjeff		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1094219820Sjeff			if (mad_agent_priv->agent.rmpp_version) {
1095219820Sjeff				ret = -EINVAL;
1096219820Sjeff				goto error;
1097219820Sjeff			}
1098219820Sjeff		}
1099219820Sjeff
1100219820Sjeff		/*
1101219820Sjeff		 * Save pointer to next work request to post in case the
1102219820Sjeff		 * current one completes, and the user modifies the work
1103219820Sjeff		 * request associated with the completion
1104219820Sjeff		 */
1105219820Sjeff		next_send_buf = send_buf->next;
1106219820Sjeff		mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1107219820Sjeff
1108219820Sjeff		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1109219820Sjeff		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1110219820Sjeff			ret = handle_outgoing_dr_smp(mad_agent_priv,
1111219820Sjeff						     mad_send_wr);
1112219820Sjeff			if (ret < 0)		/* error */
1113219820Sjeff				goto error;
1114219820Sjeff			else if (ret == 1)	/* locally consumed */
1115219820Sjeff				continue;
1116219820Sjeff		}
1117219820Sjeff
1118219820Sjeff		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1119219820Sjeff		/* Timeout will be updated after send completes */
1120219820Sjeff		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1121219820Sjeff		mad_send_wr->max_retries = send_buf->retries;
1122219820Sjeff		mad_send_wr->retries_left = send_buf->retries;
1123219820Sjeff		send_buf->retries = 0;
1124219820Sjeff		/* Reference for work request to QP + response */
1125219820Sjeff		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1126219820Sjeff		mad_send_wr->status = IB_WC_SUCCESS;
1127219820Sjeff
1128219820Sjeff		/* Reference MAD agent until send completes */
1129219820Sjeff		atomic_inc(&mad_agent_priv->refcount);
1130219820Sjeff		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1131219820Sjeff		list_add_tail(&mad_send_wr->agent_list,
1132219820Sjeff			      &mad_agent_priv->send_list);
1133219820Sjeff		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1134219820Sjeff
1135219820Sjeff		if (mad_agent_priv->agent.rmpp_version) {
1136219820Sjeff			ret = ib_send_rmpp_mad(mad_send_wr);
1137219820Sjeff			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1138219820Sjeff				ret = ib_send_mad(mad_send_wr);
1139219820Sjeff		} else
1140219820Sjeff			ret = ib_send_mad(mad_send_wr);
1141219820Sjeff		if (ret < 0) {
1142219820Sjeff			/* Fail send request */
1143219820Sjeff			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1144219820Sjeff			list_del(&mad_send_wr->agent_list);
1145219820Sjeff			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1146219820Sjeff			atomic_dec(&mad_agent_priv->refcount);
1147219820Sjeff			goto error;
1148219820Sjeff		}
1149219820Sjeff	}
1150219820Sjeff	return 0;
1151219820Sjefferror:
1152219820Sjeff	if (bad_send_buf)
1153219820Sjeff		*bad_send_buf = send_buf;
1154219820Sjeff	return ret;
1155219820Sjeff}
1156219820SjeffEXPORT_SYMBOL(ib_post_send_mad);
1157219820Sjeff
1158219820Sjeff/*
1159219820Sjeff * ib_free_recv_mad - Returns data buffers used to receive
1160219820Sjeff *  a MAD to the access layer
1161219820Sjeff */
1162219820Sjeffvoid ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1163219820Sjeff{
1164219820Sjeff	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1165219820Sjeff	struct ib_mad_private_header *mad_priv_hdr;
1166219820Sjeff	struct ib_mad_private *priv;
1167219820Sjeff	struct list_head free_list;
1168219820Sjeff
1169219820Sjeff	INIT_LIST_HEAD(&free_list);
1170219820Sjeff	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1171219820Sjeff
1172219820Sjeff	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1173219820Sjeff					&free_list, list) {
1174219820Sjeff		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1175219820Sjeff					   recv_buf);
1176219820Sjeff		mad_priv_hdr = container_of(mad_recv_wc,
1177219820Sjeff					    struct ib_mad_private_header,
1178219820Sjeff					    recv_wc);
1179219820Sjeff		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1180219820Sjeff				    header);
1181219820Sjeff		kmem_cache_free(ib_mad_cache, priv);
1182219820Sjeff	}
1183219820Sjeff}
1184219820SjeffEXPORT_SYMBOL(ib_free_recv_mad);
1185219820Sjeff
1186219820Sjeffstruct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1187219820Sjeff					u8 rmpp_version,
1188219820Sjeff					ib_mad_send_handler send_handler,
1189219820Sjeff					ib_mad_recv_handler recv_handler,
1190219820Sjeff					void *context)
1191219820Sjeff{
1192219820Sjeff	return ERR_PTR(-EINVAL);	/* XXX: for now */
1193219820Sjeff}
1194219820SjeffEXPORT_SYMBOL(ib_redirect_mad_qp);
1195219820Sjeff
1196219820Sjeffint ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1197219820Sjeff		      struct ib_wc *wc)
1198219820Sjeff{
1199219820Sjeff	printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1200219820Sjeff	return 0;
1201219820Sjeff}
1202219820SjeffEXPORT_SYMBOL(ib_process_mad_wc);
1203219820Sjeff
1204219820Sjeffstatic int method_in_use(struct ib_mad_mgmt_method_table **method,
1205219820Sjeff			 struct ib_mad_reg_req *mad_reg_req)
1206219820Sjeff{
1207219820Sjeff	int i;
1208219820Sjeff
1209219820Sjeff	for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1210219820Sjeff	     i < IB_MGMT_MAX_METHODS;
1211219820Sjeff	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1212219820Sjeff			       1+i)) {
1213219820Sjeff		if ((*method)->agent[i]) {
1214219820Sjeff			printk(KERN_ERR PFX "Method %d already in use\n", i);
1215219820Sjeff			return -EINVAL;
1216219820Sjeff		}
1217219820Sjeff	}
1218219820Sjeff	return 0;
1219219820Sjeff}
1220219820Sjeff
1221219820Sjeffstatic int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1222219820Sjeff{
1223219820Sjeff	/* Allocate management method table */
1224219820Sjeff	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1225219820Sjeff	if (!*method) {
1226219820Sjeff		printk(KERN_ERR PFX "No memory for "
1227219820Sjeff		       "ib_mad_mgmt_method_table\n");
1228219820Sjeff		return -ENOMEM;
1229219820Sjeff	}
1230219820Sjeff
1231219820Sjeff	return 0;
1232219820Sjeff}
1233219820Sjeff
1234219820Sjeff/*
1235219820Sjeff * Check to see if there are any methods still in use
1236219820Sjeff */
1237219820Sjeffstatic int check_method_table(struct ib_mad_mgmt_method_table *method)
1238219820Sjeff{
1239219820Sjeff	int i;
1240219820Sjeff
1241219820Sjeff	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1242219820Sjeff		if (method->agent[i])
1243219820Sjeff			return 1;
1244219820Sjeff	return 0;
1245219820Sjeff}
1246219820Sjeff
1247219820Sjeff/*
1248219820Sjeff * Check to see if there are any method tables for this class still in use
1249219820Sjeff */
1250219820Sjeffstatic int check_class_table(struct ib_mad_mgmt_class_table *class)
1251219820Sjeff{
1252219820Sjeff	int i;
1253219820Sjeff
1254219820Sjeff	for (i = 0; i < MAX_MGMT_CLASS; i++)
1255219820Sjeff		if (class->method_table[i])
1256219820Sjeff			return 1;
1257219820Sjeff	return 0;
1258219820Sjeff}
1259219820Sjeff
1260219820Sjeffstatic int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1261219820Sjeff{
1262219820Sjeff	int i;
1263219820Sjeff
1264219820Sjeff	for (i = 0; i < MAX_MGMT_OUI; i++)
1265219820Sjeff		if (vendor_class->method_table[i])
1266219820Sjeff			return 1;
1267219820Sjeff	return 0;
1268219820Sjeff}
1269219820Sjeff
1270219820Sjeffstatic int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1271219820Sjeff			   char *oui)
1272219820Sjeff{
1273219820Sjeff	int i;
1274219820Sjeff
1275219820Sjeff	for (i = 0; i < MAX_MGMT_OUI; i++)
1276219820Sjeff		/* Is there matching OUI for this vendor class ? */
1277219820Sjeff		if (!memcmp(vendor_class->oui[i], oui, 3))
1278219820Sjeff			return i;
1279219820Sjeff
1280219820Sjeff	return -1;
1281219820Sjeff}
1282219820Sjeff
1283219820Sjeffstatic int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1284219820Sjeff{
1285219820Sjeff	int i;
1286219820Sjeff
1287219820Sjeff	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1288219820Sjeff		if (vendor->vendor_class[i])
1289219820Sjeff			return 1;
1290219820Sjeff
1291219820Sjeff	return 0;
1292219820Sjeff}
1293219820Sjeff
1294219820Sjeffstatic void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1295219820Sjeff				     struct ib_mad_agent_private *agent)
1296219820Sjeff{
1297219820Sjeff	int i;
1298219820Sjeff
1299219820Sjeff	/* Remove any methods for this mad agent */
1300219820Sjeff	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1301219820Sjeff		if (method->agent[i] == agent) {
1302219820Sjeff			method->agent[i] = NULL;
1303219820Sjeff		}
1304219820Sjeff	}
1305219820Sjeff}
1306219820Sjeff
1307219820Sjeffstatic int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1308219820Sjeff			      struct ib_mad_agent_private *agent_priv,
1309219820Sjeff			      u8 mgmt_class)
1310219820Sjeff{
1311219820Sjeff	struct ib_mad_port_private *port_priv;
1312219820Sjeff	struct ib_mad_mgmt_class_table **class;
1313219820Sjeff	struct ib_mad_mgmt_method_table **method;
1314219820Sjeff	int i, ret;
1315219820Sjeff
1316219820Sjeff	port_priv = agent_priv->qp_info->port_priv;
1317219820Sjeff	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1318219820Sjeff	if (!*class) {
1319219820Sjeff		/* Allocate management class table for "new" class version */
1320219820Sjeff		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1321219820Sjeff		if (!*class) {
1322219820Sjeff			printk(KERN_ERR PFX "No memory for "
1323219820Sjeff			       "ib_mad_mgmt_class_table\n");
1324219820Sjeff			ret = -ENOMEM;
1325219820Sjeff			goto error1;
1326219820Sjeff		}
1327219820Sjeff
1328219820Sjeff		/* Allocate method table for this management class */
1329219820Sjeff		method = &(*class)->method_table[mgmt_class];
1330219820Sjeff		if ((ret = allocate_method_table(method)))
1331219820Sjeff			goto error2;
1332219820Sjeff	} else {
1333219820Sjeff		method = &(*class)->method_table[mgmt_class];
1334219820Sjeff		if (!*method) {
1335219820Sjeff			/* Allocate method table for this management class */
1336219820Sjeff			if ((ret = allocate_method_table(method)))
1337219820Sjeff				goto error1;
1338219820Sjeff		}
1339219820Sjeff	}
1340219820Sjeff
1341219820Sjeff	/* Now, make sure methods are not already in use */
1342219820Sjeff	if (method_in_use(method, mad_reg_req))
1343219820Sjeff		goto error3;
1344219820Sjeff
1345219820Sjeff	/* Finally, add in methods being registered */
1346219820Sjeff	for (i = find_first_bit(mad_reg_req->method_mask,
1347219820Sjeff				IB_MGMT_MAX_METHODS);
1348219820Sjeff	     i < IB_MGMT_MAX_METHODS;
1349219820Sjeff	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1350219820Sjeff			       1+i)) {
1351219820Sjeff		(*method)->agent[i] = agent_priv;
1352219820Sjeff	}
1353219820Sjeff	return 0;
1354219820Sjeff
1355219820Sjefferror3:
1356219820Sjeff	/* Remove any methods for this mad agent */
1357219820Sjeff	remove_methods_mad_agent(*method, agent_priv);
1358219820Sjeff	/* Now, check to see if there are any methods in use */
1359219820Sjeff	if (!check_method_table(*method)) {
1360219820Sjeff		/* If not, release management method table */
1361219820Sjeff		kfree(*method);
1362219820Sjeff		*method = NULL;
1363219820Sjeff	}
1364219820Sjeff	ret = -EINVAL;
1365219820Sjeff	goto error1;
1366219820Sjefferror2:
1367219820Sjeff	kfree(*class);
1368219820Sjeff	*class = NULL;
1369219820Sjefferror1:
1370219820Sjeff	return ret;
1371219820Sjeff}
1372219820Sjeff
1373219820Sjeffstatic int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1374219820Sjeff			   struct ib_mad_agent_private *agent_priv)
1375219820Sjeff{
1376219820Sjeff	struct ib_mad_port_private *port_priv;
1377219820Sjeff	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1378219820Sjeff	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1379219820Sjeff	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1380219820Sjeff	struct ib_mad_mgmt_method_table **method;
1381219820Sjeff	int i, ret = -ENOMEM;
1382219820Sjeff	u8 vclass;
1383219820Sjeff
1384219820Sjeff	/* "New" vendor (with OUI) class */
1385219820Sjeff	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1386219820Sjeff	port_priv = agent_priv->qp_info->port_priv;
1387219820Sjeff	vendor_table = &port_priv->version[
1388219820Sjeff				mad_reg_req->mgmt_class_version].vendor;
1389219820Sjeff	if (!*vendor_table) {
1390219820Sjeff		/* Allocate mgmt vendor class table for "new" class version */
1391219820Sjeff		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1392219820Sjeff		if (!vendor) {
1393219820Sjeff			printk(KERN_ERR PFX "No memory for "
1394219820Sjeff			       "ib_mad_mgmt_vendor_class_table\n");
1395219820Sjeff			goto error1;
1396219820Sjeff		}
1397219820Sjeff
1398219820Sjeff		*vendor_table = vendor;
1399219820Sjeff	}
1400219820Sjeff	if (!(*vendor_table)->vendor_class[vclass]) {
1401219820Sjeff		/* Allocate table for this management vendor class */
1402219820Sjeff		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1403219820Sjeff		if (!vendor_class) {
1404219820Sjeff			printk(KERN_ERR PFX "No memory for "
1405219820Sjeff			       "ib_mad_mgmt_vendor_class\n");
1406219820Sjeff			goto error2;
1407219820Sjeff		}
1408219820Sjeff
1409219820Sjeff		(*vendor_table)->vendor_class[vclass] = vendor_class;
1410219820Sjeff	}
1411219820Sjeff	for (i = 0; i < MAX_MGMT_OUI; i++) {
1412219820Sjeff		/* Is there matching OUI for this vendor class ? */
1413219820Sjeff		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1414219820Sjeff			    mad_reg_req->oui, 3)) {
1415219820Sjeff			method = &(*vendor_table)->vendor_class[
1416219820Sjeff						vclass]->method_table[i];
1417219820Sjeff			BUG_ON(!*method);
1418219820Sjeff			goto check_in_use;
1419219820Sjeff		}
1420219820Sjeff	}
1421219820Sjeff	for (i = 0; i < MAX_MGMT_OUI; i++) {
1422219820Sjeff		/* OUI slot available ? */
1423219820Sjeff		if (!is_vendor_oui((*vendor_table)->vendor_class[
1424219820Sjeff				vclass]->oui[i])) {
1425219820Sjeff			method = &(*vendor_table)->vendor_class[
1426219820Sjeff				vclass]->method_table[i];
1427219820Sjeff			BUG_ON(*method);
1428219820Sjeff			/* Allocate method table for this OUI */
1429219820Sjeff			if ((ret = allocate_method_table(method)))
1430219820Sjeff				goto error3;
1431219820Sjeff			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1432219820Sjeff			       mad_reg_req->oui, 3);
1433219820Sjeff			goto check_in_use;
1434219820Sjeff		}
1435219820Sjeff	}
1436219820Sjeff	printk(KERN_ERR PFX "All OUI slots in use\n");
1437219820Sjeff	goto error3;
1438219820Sjeff
1439219820Sjeffcheck_in_use:
1440219820Sjeff	/* Now, make sure methods are not already in use */
1441219820Sjeff	if (method_in_use(method, mad_reg_req))
1442219820Sjeff		goto error4;
1443219820Sjeff
1444219820Sjeff	/* Finally, add in methods being registered */
1445219820Sjeff	for (i = find_first_bit(mad_reg_req->method_mask,
1446219820Sjeff				IB_MGMT_MAX_METHODS);
1447219820Sjeff	     i < IB_MGMT_MAX_METHODS;
1448219820Sjeff	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1449219820Sjeff			       1+i)) {
1450219820Sjeff		(*method)->agent[i] = agent_priv;
1451219820Sjeff	}
1452219820Sjeff	return 0;
1453219820Sjeff
1454219820Sjefferror4:
1455219820Sjeff	/* Remove any methods for this mad agent */
1456219820Sjeff	remove_methods_mad_agent(*method, agent_priv);
1457219820Sjeff	/* Now, check to see if there are any methods in use */
1458219820Sjeff	if (!check_method_table(*method)) {
1459219820Sjeff		/* If not, release management method table */
1460219820Sjeff		kfree(*method);
1461219820Sjeff		*method = NULL;
1462219820Sjeff	}
1463219820Sjeff	ret = -EINVAL;
1464219820Sjefferror3:
1465219820Sjeff	if (vendor_class) {
1466219820Sjeff		(*vendor_table)->vendor_class[vclass] = NULL;
1467219820Sjeff		kfree(vendor_class);
1468219820Sjeff	}
1469219820Sjefferror2:
1470219820Sjeff	if (vendor) {
1471219820Sjeff		*vendor_table = NULL;
1472219820Sjeff		kfree(vendor);
1473219820Sjeff	}
1474219820Sjefferror1:
1475219820Sjeff	return ret;
1476219820Sjeff}
1477219820Sjeff
1478219820Sjeffstatic void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1479219820Sjeff{
1480219820Sjeff	struct ib_mad_port_private *port_priv;
1481219820Sjeff	struct ib_mad_mgmt_class_table *class;
1482219820Sjeff	struct ib_mad_mgmt_method_table *method;
1483219820Sjeff	struct ib_mad_mgmt_vendor_class_table *vendor;
1484219820Sjeff	struct ib_mad_mgmt_vendor_class *vendor_class;
1485219820Sjeff	int index;
1486219820Sjeff	u8 mgmt_class;
1487219820Sjeff
1488219820Sjeff	/*
1489219820Sjeff	 * Was MAD registration request supplied
1490219820Sjeff	 * with original registration ?
1491219820Sjeff	 */
1492219820Sjeff	if (!agent_priv->reg_req) {
1493219820Sjeff		goto out;
1494219820Sjeff	}
1495219820Sjeff
1496219820Sjeff	port_priv = agent_priv->qp_info->port_priv;
1497219820Sjeff	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1498219820Sjeff	class = port_priv->version[
1499219820Sjeff			agent_priv->reg_req->mgmt_class_version].class;
1500219820Sjeff	if (!class)
1501219820Sjeff		goto vendor_check;
1502219820Sjeff
1503219820Sjeff	method = class->method_table[mgmt_class];
1504219820Sjeff	if (method) {
1505219820Sjeff		/* Remove any methods for this mad agent */
1506219820Sjeff		remove_methods_mad_agent(method, agent_priv);
1507219820Sjeff		/* Now, check to see if there are any methods still in use */
1508219820Sjeff		if (!check_method_table(method)) {
1509219820Sjeff			/* If not, release management method table */
1510219820Sjeff			 kfree(method);
1511219820Sjeff			 class->method_table[mgmt_class] = NULL;
1512219820Sjeff			 /* Any management classes left ? */
1513219820Sjeff			if (!check_class_table(class)) {
1514219820Sjeff				/* If not, release management class table */
1515219820Sjeff				kfree(class);
1516219820Sjeff				port_priv->version[
1517219820Sjeff					agent_priv->reg_req->
1518219820Sjeff					mgmt_class_version].class = NULL;
1519219820Sjeff			}
1520219820Sjeff		}
1521219820Sjeff	}
1522219820Sjeff
1523219820Sjeffvendor_check:
1524219820Sjeff	if (!is_vendor_class(mgmt_class))
1525219820Sjeff		goto out;
1526219820Sjeff
1527219820Sjeff	/* normalize mgmt_class to vendor range 2 */
1528219820Sjeff	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1529219820Sjeff	vendor = port_priv->version[
1530219820Sjeff			agent_priv->reg_req->mgmt_class_version].vendor;
1531219820Sjeff
1532219820Sjeff	if (!vendor)
1533219820Sjeff		goto out;
1534219820Sjeff
1535219820Sjeff	vendor_class = vendor->vendor_class[mgmt_class];
1536219820Sjeff	if (vendor_class) {
1537219820Sjeff		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1538219820Sjeff		if (index < 0)
1539219820Sjeff			goto out;
1540219820Sjeff		method = vendor_class->method_table[index];
1541219820Sjeff		if (method) {
1542219820Sjeff			/* Remove any methods for this mad agent */
1543219820Sjeff			remove_methods_mad_agent(method, agent_priv);
1544219820Sjeff			/*
1545219820Sjeff			 * Now, check to see if there are
1546219820Sjeff			 * any methods still in use
1547219820Sjeff			 */
1548219820Sjeff			if (!check_method_table(method)) {
1549219820Sjeff				/* If not, release management method table */
1550219820Sjeff				kfree(method);
1551219820Sjeff				vendor_class->method_table[index] = NULL;
1552219820Sjeff				memset(vendor_class->oui[index], 0, 3);
1553219820Sjeff				/* Any OUIs left ? */
1554219820Sjeff				if (!check_vendor_class(vendor_class)) {
1555219820Sjeff					/* If not, release vendor class table */
1556219820Sjeff					kfree(vendor_class);
1557219820Sjeff					vendor->vendor_class[mgmt_class] = NULL;
1558219820Sjeff					/* Any other vendor classes left ? */
1559219820Sjeff					if (!check_vendor_table(vendor)) {
1560219820Sjeff						kfree(vendor);
1561219820Sjeff						port_priv->version[
1562219820Sjeff							agent_priv->reg_req->
1563219820Sjeff							mgmt_class_version].
1564219820Sjeff							vendor = NULL;
1565219820Sjeff					}
1566219820Sjeff				}
1567219820Sjeff			}
1568219820Sjeff		}
1569219820Sjeff	}
1570219820Sjeff
1571219820Sjeffout:
1572219820Sjeff	return;
1573219820Sjeff}
1574219820Sjeff
1575219820Sjeffstatic struct ib_mad_agent_private *
1576219820Sjefffind_mad_agent(struct ib_mad_port_private *port_priv,
1577219820Sjeff	       struct ib_mad *mad)
1578219820Sjeff{
1579219820Sjeff	struct ib_mad_agent_private *mad_agent = NULL;
1580219820Sjeff	unsigned long flags;
1581219820Sjeff
1582219820Sjeff	spin_lock_irqsave(&port_priv->reg_lock, flags);
1583219820Sjeff	if (ib_response_mad(mad)) {
1584219820Sjeff		u32 hi_tid;
1585219820Sjeff		struct ib_mad_agent_private *entry;
1586219820Sjeff
1587219820Sjeff		/*
1588219820Sjeff		 * Routing is based on high 32 bits of transaction ID
1589219820Sjeff		 * of MAD.
1590219820Sjeff		 */
1591219820Sjeff		hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1592219820Sjeff		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1593219820Sjeff			if (entry->agent.hi_tid == hi_tid) {
1594219820Sjeff				mad_agent = entry;
1595219820Sjeff				break;
1596219820Sjeff			}
1597219820Sjeff		}
1598219820Sjeff	} else {
1599219820Sjeff		struct ib_mad_mgmt_class_table *class;
1600219820Sjeff		struct ib_mad_mgmt_method_table *method;
1601219820Sjeff		struct ib_mad_mgmt_vendor_class_table *vendor;
1602219820Sjeff		struct ib_mad_mgmt_vendor_class *vendor_class;
1603219820Sjeff		struct ib_vendor_mad *vendor_mad;
1604219820Sjeff		int index;
1605219820Sjeff
1606219820Sjeff		/*
1607219820Sjeff		 * Routing is based on version, class, and method
1608219820Sjeff		 * For "newer" vendor MADs, also based on OUI
1609219820Sjeff		 */
1610219820Sjeff		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1611219820Sjeff			goto out;
1612219820Sjeff		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1613219820Sjeff			class = port_priv->version[
1614219820Sjeff					mad->mad_hdr.class_version].class;
1615219820Sjeff			if (!class)
1616219820Sjeff				goto out;
1617219820Sjeff			method = class->method_table[convert_mgmt_class(
1618219820Sjeff							mad->mad_hdr.mgmt_class)];
1619219820Sjeff			if (method)
1620219820Sjeff				mad_agent = method->agent[mad->mad_hdr.method &
1621219820Sjeff							  ~IB_MGMT_METHOD_RESP];
1622219820Sjeff		} else {
1623219820Sjeff			vendor = port_priv->version[
1624219820Sjeff					mad->mad_hdr.class_version].vendor;
1625219820Sjeff			if (!vendor)
1626219820Sjeff				goto out;
1627219820Sjeff			vendor_class = vendor->vendor_class[vendor_class_index(
1628219820Sjeff						mad->mad_hdr.mgmt_class)];
1629219820Sjeff			if (!vendor_class)
1630219820Sjeff				goto out;
1631219820Sjeff			/* Find matching OUI */
1632219820Sjeff			vendor_mad = (struct ib_vendor_mad *)mad;
1633219820Sjeff			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1634219820Sjeff			if (index == -1)
1635219820Sjeff				goto out;
1636219820Sjeff			method = vendor_class->method_table[index];
1637219820Sjeff			if (method) {
1638219820Sjeff				mad_agent = method->agent[mad->mad_hdr.method &
1639219820Sjeff							  ~IB_MGMT_METHOD_RESP];
1640219820Sjeff			}
1641219820Sjeff		}
1642219820Sjeff	}
1643219820Sjeff
1644219820Sjeff	if (mad_agent) {
1645219820Sjeff		if (mad_agent->agent.recv_handler)
1646219820Sjeff			atomic_inc(&mad_agent->refcount);
1647219820Sjeff		else {
1648219820Sjeff			printk(KERN_NOTICE PFX "No receive handler for client "
1649219820Sjeff			       "%p on port %d\n",
1650219820Sjeff			       &mad_agent->agent, port_priv->port_num);
1651219820Sjeff			mad_agent = NULL;
1652219820Sjeff		}
1653219820Sjeff	}
1654219820Sjeffout:
1655219820Sjeff	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1656219820Sjeff
1657219820Sjeff	return mad_agent;
1658219820Sjeff}
1659219820Sjeff
1660219820Sjeffstatic int validate_mad(struct ib_mad *mad, u32 qp_num)
1661219820Sjeff{
1662219820Sjeff	int valid = 0;
1663219820Sjeff
1664219820Sjeff	/* Make sure MAD base version is understood */
1665219820Sjeff	if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1666219820Sjeff		printk(KERN_ERR PFX "MAD received with unsupported base "
1667219820Sjeff		       "version %d\n", mad->mad_hdr.base_version);
1668219820Sjeff		goto out;
1669219820Sjeff	}
1670219820Sjeff
1671219820Sjeff	/* Filter SMI packets sent to other than QP0 */
1672219820Sjeff	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1673219820Sjeff	    (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1674219820Sjeff		if (qp_num == 0)
1675219820Sjeff			valid = 1;
1676219820Sjeff	} else {
1677219820Sjeff		/* Filter GSI packets sent to QP0 */
1678219820Sjeff		if (qp_num != 0)
1679219820Sjeff			valid = 1;
1680219820Sjeff	}
1681219820Sjeff
1682219820Sjeffout:
1683219820Sjeff	return valid;
1684219820Sjeff}
1685219820Sjeff
1686219820Sjeffstatic int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1687219820Sjeff		       struct ib_mad_hdr *mad_hdr)
1688219820Sjeff{
1689219820Sjeff	struct ib_rmpp_mad *rmpp_mad;
1690219820Sjeff
1691219820Sjeff	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1692219820Sjeff	return !mad_agent_priv->agent.rmpp_version ||
1693219820Sjeff		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1694219820Sjeff				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1695219820Sjeff		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1696219820Sjeff}
1697219820Sjeff
1698219820Sjeffstatic inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1699219820Sjeff				     struct ib_mad_recv_wc *rwc)
1700219820Sjeff{
1701219820Sjeff	return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1702219820Sjeff		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1703219820Sjeff}
1704219820Sjeff
1705219820Sjeffstatic inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1706219820Sjeff				   struct ib_mad_send_wr_private *wr,
1707219820Sjeff				   struct ib_mad_recv_wc *rwc )
1708219820Sjeff{
1709219820Sjeff	struct ib_ah_attr attr;
1710219820Sjeff	u8 send_resp, rcv_resp;
1711219820Sjeff	union ib_gid sgid;
1712219820Sjeff	struct ib_device *device = mad_agent_priv->agent.device;
1713219820Sjeff	u8 port_num = mad_agent_priv->agent.port_num;
1714219820Sjeff	u8 lmc;
1715219820Sjeff
1716219820Sjeff	send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1717219820Sjeff	rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1718219820Sjeff
1719219820Sjeff	if (send_resp == rcv_resp)
1720219820Sjeff		/* both requests, or both responses. GIDs different */
1721219820Sjeff		return 0;
1722219820Sjeff
1723219820Sjeff	if (ib_query_ah(wr->send_buf.ah, &attr))
1724219820Sjeff		/* Assume not equal, to avoid false positives. */
1725219820Sjeff		return 0;
1726219820Sjeff
1727219820Sjeff	if (!!(attr.ah_flags & IB_AH_GRH) !=
1728219820Sjeff	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1729219820Sjeff		/* one has GID, other does not.  Assume different */
1730219820Sjeff		return 0;
1731219820Sjeff
1732219820Sjeff	if (!send_resp && rcv_resp) {
1733219820Sjeff		/* is request/response. */
1734219820Sjeff		if (!(attr.ah_flags & IB_AH_GRH)) {
1735219820Sjeff			if (ib_get_cached_lmc(device, port_num, &lmc))
1736219820Sjeff				return 0;
1737219820Sjeff			return (!lmc || !((attr.src_path_bits ^
1738219820Sjeff					   rwc->wc->dlid_path_bits) &
1739219820Sjeff					  ((1 << lmc) - 1)));
1740219820Sjeff		} else {
1741219820Sjeff			if (ib_get_cached_gid(device, port_num,
1742219820Sjeff					      attr.grh.sgid_index, &sgid))
1743219820Sjeff				return 0;
1744219820Sjeff			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1745219820Sjeff				       16);
1746219820Sjeff		}
1747219820Sjeff	}
1748219820Sjeff
1749219820Sjeff	if (!(attr.ah_flags & IB_AH_GRH))
1750219820Sjeff		return attr.dlid == rwc->wc->slid;
1751219820Sjeff	else
1752219820Sjeff		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1753219820Sjeff			       16);
1754219820Sjeff}
1755219820Sjeff
1756219820Sjeffstatic inline int is_direct(u8 class)
1757219820Sjeff{
1758219820Sjeff	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1759219820Sjeff}
1760219820Sjeff
1761219820Sjeffstruct ib_mad_send_wr_private*
1762219820Sjeffib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1763219820Sjeff		 struct ib_mad_recv_wc *wc)
1764219820Sjeff{
1765219820Sjeff	struct ib_mad_send_wr_private *wr;
1766219820Sjeff	struct ib_mad *mad;
1767219820Sjeff
1768219820Sjeff	mad = (struct ib_mad *)wc->recv_buf.mad;
1769219820Sjeff
1770219820Sjeff	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1771219820Sjeff		if ((wr->tid == mad->mad_hdr.tid) &&
1772219820Sjeff		    rcv_has_same_class(wr, wc) &&
1773219820Sjeff		    /*
1774219820Sjeff		     * Don't check GID for direct routed MADs.
1775219820Sjeff		     * These might have permissive LIDs.
1776219820Sjeff		     */
1777219820Sjeff		    (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1778219820Sjeff		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1779219820Sjeff			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1780219820Sjeff	}
1781219820Sjeff
1782219820Sjeff	/*
1783219820Sjeff	 * It's possible to receive the response before we've
1784219820Sjeff	 * been notified that the send has completed
1785219820Sjeff	 */
1786219820Sjeff	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1787219820Sjeff		if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1788219820Sjeff		    wr->tid == mad->mad_hdr.tid &&
1789219820Sjeff		    wr->timeout &&
1790219820Sjeff		    rcv_has_same_class(wr, wc) &&
1791219820Sjeff		    /*
1792219820Sjeff		     * Don't check GID for direct routed MADs.
1793219820Sjeff		     * These might have permissive LIDs.
1794219820Sjeff		     */
1795219820Sjeff		    (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1796219820Sjeff		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1797219820Sjeff			/* Verify request has not been canceled */
1798219820Sjeff			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1799219820Sjeff	}
1800219820Sjeff	return NULL;
1801219820Sjeff}
1802219820Sjeff
1803219820Sjeffvoid ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1804219820Sjeff{
1805219820Sjeff	mad_send_wr->timeout = 0;
1806219820Sjeff	if (mad_send_wr->refcount == 1)
1807219820Sjeff		list_move_tail(&mad_send_wr->agent_list,
1808219820Sjeff			      &mad_send_wr->mad_agent_priv->done_list);
1809219820Sjeff}
1810219820Sjeff
1811219820Sjeffstatic void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1812219820Sjeff				 struct ib_mad_recv_wc *mad_recv_wc)
1813219820Sjeff{
1814219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
1815219820Sjeff	struct ib_mad_send_wc mad_send_wc;
1816219820Sjeff	unsigned long flags;
1817219820Sjeff
1818219820Sjeff	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1819219820Sjeff	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1820219820Sjeff	if (mad_agent_priv->agent.rmpp_version) {
1821219820Sjeff		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1822219820Sjeff						      mad_recv_wc);
1823219820Sjeff		if (!mad_recv_wc) {
1824219820Sjeff			deref_mad_agent(mad_agent_priv);
1825219820Sjeff			return;
1826219820Sjeff		}
1827219820Sjeff	}
1828219820Sjeff
1829219820Sjeff	/* Complete corresponding request */
1830219820Sjeff	if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1831219820Sjeff		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1832219820Sjeff		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1833219820Sjeff		if (!mad_send_wr) {
1834219820Sjeff			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1835219820Sjeff			ib_free_recv_mad(mad_recv_wc);
1836219820Sjeff			deref_mad_agent(mad_agent_priv);
1837219820Sjeff			return;
1838219820Sjeff		}
1839219820Sjeff		ib_mark_mad_done(mad_send_wr);
1840219820Sjeff		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1841219820Sjeff
1842219820Sjeff		/* Defined behavior is to complete response before request */
1843219820Sjeff		mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1844219820Sjeff		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1845219820Sjeff						   mad_recv_wc);
1846219820Sjeff		atomic_dec(&mad_agent_priv->refcount);
1847219820Sjeff
1848219820Sjeff		mad_send_wc.status = IB_WC_SUCCESS;
1849219820Sjeff		mad_send_wc.vendor_err = 0;
1850219820Sjeff		mad_send_wc.send_buf = &mad_send_wr->send_buf;
1851219820Sjeff		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1852219820Sjeff	} else {
1853219820Sjeff		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1854219820Sjeff						   mad_recv_wc);
1855219820Sjeff		deref_mad_agent(mad_agent_priv);
1856219820Sjeff	}
1857219820Sjeff}
1858219820Sjeff
1859219820Sjeffstatic void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1860219820Sjeff				     struct ib_wc *wc)
1861219820Sjeff{
1862219820Sjeff	struct ib_mad_qp_info *qp_info;
1863219820Sjeff	struct ib_mad_private_header *mad_priv_hdr;
1864219820Sjeff	struct ib_mad_private *recv, *response = NULL;
1865219820Sjeff	struct ib_mad_list_head *mad_list;
1866219820Sjeff	struct ib_mad_agent_private *mad_agent;
1867219820Sjeff	int port_num;
1868219820Sjeff
1869219820Sjeff	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1870219820Sjeff	qp_info = mad_list->mad_queue->qp_info;
1871219820Sjeff	dequeue_mad(mad_list);
1872219820Sjeff
1873219820Sjeff	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1874219820Sjeff				    mad_list);
1875219820Sjeff	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1876219820Sjeff	ib_dma_unmap_single(port_priv->device,
1877219820Sjeff			    recv->header.mapping,
1878219820Sjeff			    sizeof(struct ib_mad_private) -
1879219820Sjeff			      sizeof(struct ib_mad_private_header),
1880219820Sjeff			    DMA_FROM_DEVICE);
1881219820Sjeff
1882219820Sjeff	/* Setup MAD receive work completion from "normal" work completion */
1883219820Sjeff	recv->header.wc = *wc;
1884219820Sjeff	recv->header.recv_wc.wc = &recv->header.wc;
1885219820Sjeff	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1886219820Sjeff	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1887219820Sjeff	recv->header.recv_wc.recv_buf.grh = &recv->grh;
1888219820Sjeff
1889219820Sjeff	if (atomic_read(&qp_info->snoop_count))
1890219820Sjeff		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1891219820Sjeff
1892219820Sjeff	/* Validate MAD */
1893219820Sjeff	if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1894219820Sjeff		goto out;
1895219820Sjeff
1896219820Sjeff	response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1897219820Sjeff	if (!response) {
1898219820Sjeff		printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1899219820Sjeff		       "for response buffer\n");
1900219820Sjeff		goto out;
1901219820Sjeff	}
1902219820Sjeff
1903219820Sjeff	if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1904219820Sjeff		port_num = wc->port_num;
1905219820Sjeff	else
1906219820Sjeff		port_num = port_priv->port_num;
1907219820Sjeff
1908219820Sjeff	if (recv->mad.mad.mad_hdr.mgmt_class ==
1909219820Sjeff	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1910219820Sjeff		enum smi_forward_action retsmi;
1911219820Sjeff
1912219820Sjeff		if (smi_handle_dr_smp_recv(&recv->mad.smp,
1913219820Sjeff					   port_priv->device->node_type,
1914219820Sjeff					   port_num,
1915219820Sjeff					   port_priv->device->phys_port_cnt) ==
1916219820Sjeff					   IB_SMI_DISCARD)
1917219820Sjeff			goto out;
1918219820Sjeff
1919219820Sjeff		retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1920219820Sjeff		if (retsmi == IB_SMI_LOCAL)
1921219820Sjeff			goto local;
1922219820Sjeff
1923219820Sjeff		if (retsmi == IB_SMI_SEND) { /* don't forward */
1924219820Sjeff			if (smi_handle_dr_smp_send(&recv->mad.smp,
1925219820Sjeff						   port_priv->device->node_type,
1926219820Sjeff						   port_num) == IB_SMI_DISCARD)
1927219820Sjeff				goto out;
1928219820Sjeff
1929219820Sjeff			if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1930219820Sjeff				goto out;
1931219820Sjeff		} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1932219820Sjeff			/* forward case for switches */
1933219820Sjeff			memcpy(response, recv, sizeof(*response));
1934219820Sjeff			response->header.recv_wc.wc = &response->header.wc;
1935219820Sjeff			response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1936219820Sjeff			response->header.recv_wc.recv_buf.grh = &response->grh;
1937219820Sjeff
1938219820Sjeff			agent_send_response(&response->mad.mad,
1939219820Sjeff					    &response->grh, wc,
1940219820Sjeff					    port_priv->device,
1941219820Sjeff					    smi_get_fwd_port(&recv->mad.smp),
1942219820Sjeff					    qp_info->qp->qp_num);
1943219820Sjeff
1944219820Sjeff			goto out;
1945219820Sjeff		}
1946219820Sjeff	}
1947219820Sjeff
1948219820Sjefflocal:
1949219820Sjeff	/* Give driver "right of first refusal" on incoming MAD */
1950219820Sjeff	if (port_priv->device->process_mad) {
1951219820Sjeff		int ret;
1952219820Sjeff
1953219820Sjeff		ret = port_priv->device->process_mad(port_priv->device, 0,
1954219820Sjeff						     port_priv->port_num,
1955219820Sjeff						     wc, &recv->grh,
1956219820Sjeff						     &recv->mad.mad,
1957219820Sjeff						     &response->mad.mad);
1958219820Sjeff		if (ret & IB_MAD_RESULT_SUCCESS) {
1959219820Sjeff			if (ret & IB_MAD_RESULT_CONSUMED)
1960219820Sjeff				goto out;
1961219820Sjeff			if (ret & IB_MAD_RESULT_REPLY) {
1962219820Sjeff				agent_send_response(&response->mad.mad,
1963219820Sjeff						    &recv->grh, wc,
1964219820Sjeff						    port_priv->device,
1965219820Sjeff						    port_num,
1966219820Sjeff						    qp_info->qp->qp_num);
1967219820Sjeff				goto out;
1968219820Sjeff			}
1969219820Sjeff		}
1970219820Sjeff	}
1971219820Sjeff
1972219820Sjeff	mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1973219820Sjeff	if (mad_agent) {
1974219820Sjeff		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1975219820Sjeff		/*
1976219820Sjeff		 * recv is freed up in error cases in ib_mad_complete_recv
1977219820Sjeff		 * or via recv_handler in ib_mad_complete_recv()
1978219820Sjeff		 */
1979219820Sjeff		recv = NULL;
1980219820Sjeff	}
1981219820Sjeff
1982219820Sjeffout:
1983219820Sjeff	/* Post another receive request for this QP */
1984219820Sjeff	if (response) {
1985219820Sjeff		ib_mad_post_receive_mads(qp_info, response);
1986219820Sjeff		if (recv)
1987219820Sjeff			kmem_cache_free(ib_mad_cache, recv);
1988219820Sjeff	} else
1989219820Sjeff		ib_mad_post_receive_mads(qp_info, recv);
1990219820Sjeff}
1991219820Sjeff
1992219820Sjeffstatic void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1993219820Sjeff{
1994219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
1995219820Sjeff
1996219820Sjeff	if (list_empty(&mad_agent_priv->wait_list)) {
1997219820Sjeff		del_timer(&mad_agent_priv->timeout_timer);
1998219820Sjeff	} else {
1999219820Sjeff		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2000219820Sjeff					 struct ib_mad_send_wr_private,
2001219820Sjeff					 agent_list);
2002219820Sjeff
2003219820Sjeff		if (time_after(mad_agent_priv->timeout,
2004219820Sjeff			       mad_send_wr->timeout)) {
2005219820Sjeff			mad_agent_priv->timeout = mad_send_wr->timeout;
2006219820Sjeff			mod_timer(&mad_agent_priv->timeout_timer,
2007219820Sjeff				  mad_send_wr->timeout);
2008219820Sjeff		}
2009219820Sjeff	}
2010219820Sjeff}
2011219820Sjeff
2012219820Sjeffstatic void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2013219820Sjeff{
2014219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
2015219820Sjeff	struct ib_mad_send_wr_private *temp_mad_send_wr;
2016219820Sjeff	struct list_head *list_item;
2017219820Sjeff	unsigned long delay;
2018219820Sjeff
2019219820Sjeff	mad_agent_priv = mad_send_wr->mad_agent_priv;
2020219820Sjeff	list_del(&mad_send_wr->agent_list);
2021219820Sjeff
2022219820Sjeff	delay = mad_send_wr->timeout;
2023219820Sjeff	mad_send_wr->timeout += jiffies;
2024219820Sjeff
2025219820Sjeff	if (delay) {
2026219820Sjeff		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2027219820Sjeff			temp_mad_send_wr = list_entry(list_item,
2028219820Sjeff						struct ib_mad_send_wr_private,
2029219820Sjeff						agent_list);
2030219820Sjeff			if (time_after(mad_send_wr->timeout,
2031219820Sjeff				       temp_mad_send_wr->timeout))
2032219820Sjeff				break;
2033219820Sjeff		}
2034219820Sjeff	} else
2035219820Sjeff		list_item = &mad_agent_priv->wait_list;
2036219820Sjeff	list_add(&mad_send_wr->agent_list, list_item);
2037219820Sjeff
2038219820Sjeff	/* Reschedule a work item if we have a shorter timeout */
2039219820Sjeff	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2040219820Sjeff		mod_timer(&mad_agent_priv->timeout_timer,
2041219820Sjeff			  mad_send_wr->timeout);
2042219820Sjeff}
2043219820Sjeff
2044219820Sjeffvoid ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2045219820Sjeff			  int timeout_ms)
2046219820Sjeff{
2047219820Sjeff	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2048219820Sjeff	wait_for_response(mad_send_wr);
2049219820Sjeff}
2050219820Sjeff
2051219820Sjeff/*
2052219820Sjeff * Process a send work completion
2053219820Sjeff */
2054219820Sjeffvoid ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2055219820Sjeff			     struct ib_mad_send_wc *mad_send_wc)
2056219820Sjeff{
2057219820Sjeff	struct ib_mad_agent_private	*mad_agent_priv;
2058219820Sjeff	unsigned long			flags;
2059219820Sjeff	int				ret;
2060219820Sjeff
2061219820Sjeff	mad_agent_priv = mad_send_wr->mad_agent_priv;
2062219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2063219820Sjeff	if (mad_agent_priv->agent.rmpp_version) {
2064219820Sjeff		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2065219820Sjeff		if (ret == IB_RMPP_RESULT_CONSUMED)
2066219820Sjeff			goto done;
2067219820Sjeff	} else
2068219820Sjeff		ret = IB_RMPP_RESULT_UNHANDLED;
2069219820Sjeff
2070219820Sjeff	if (mad_send_wc->status != IB_WC_SUCCESS &&
2071219820Sjeff	    mad_send_wr->status == IB_WC_SUCCESS) {
2072219820Sjeff		mad_send_wr->status = mad_send_wc->status;
2073219820Sjeff		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2074219820Sjeff	}
2075219820Sjeff
2076219820Sjeff	if (--mad_send_wr->refcount > 0) {
2077219820Sjeff		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2078219820Sjeff		    mad_send_wr->status == IB_WC_SUCCESS) {
2079219820Sjeff			wait_for_response(mad_send_wr);
2080219820Sjeff		}
2081219820Sjeff		goto done;
2082219820Sjeff	}
2083219820Sjeff
2084219820Sjeff	/* Remove send from MAD agent and notify client of completion */
2085219820Sjeff	list_del(&mad_send_wr->agent_list);
2086219820Sjeff	adjust_timeout(mad_agent_priv);
2087219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2088219820Sjeff
2089219820Sjeff	if (mad_send_wr->status != IB_WC_SUCCESS )
2090219820Sjeff		mad_send_wc->status = mad_send_wr->status;
2091219820Sjeff	if (ret == IB_RMPP_RESULT_INTERNAL)
2092219820Sjeff		ib_rmpp_send_handler(mad_send_wc);
2093219820Sjeff	else
2094219820Sjeff		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2095219820Sjeff						   mad_send_wc);
2096219820Sjeff
2097219820Sjeff	/* Release reference on agent taken when sending */
2098219820Sjeff	deref_mad_agent(mad_agent_priv);
2099219820Sjeff	return;
2100219820Sjeffdone:
2101219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2102219820Sjeff}
2103219820Sjeff
2104219820Sjeffstatic void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2105219820Sjeff				     struct ib_wc *wc)
2106219820Sjeff{
2107219820Sjeff	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2108219820Sjeff	struct ib_mad_list_head		*mad_list;
2109219820Sjeff	struct ib_mad_qp_info		*qp_info;
2110219820Sjeff	struct ib_mad_queue		*send_queue;
2111219820Sjeff	struct ib_send_wr		*bad_send_wr;
2112219820Sjeff	struct ib_mad_send_wc		mad_send_wc;
2113219820Sjeff	unsigned long flags;
2114219820Sjeff	int ret;
2115219820Sjeff
2116219820Sjeff	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2117219820Sjeff	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2118219820Sjeff				   mad_list);
2119219820Sjeff	send_queue = mad_list->mad_queue;
2120219820Sjeff	qp_info = send_queue->qp_info;
2121219820Sjeff
2122219820Sjeffretry:
2123219820Sjeff	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2124219820Sjeff			    mad_send_wr->header_mapping,
2125219820Sjeff			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2126219820Sjeff	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2127219820Sjeff			    mad_send_wr->payload_mapping,
2128219820Sjeff			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2129219820Sjeff	queued_send_wr = NULL;
2130219820Sjeff	spin_lock_irqsave(&send_queue->lock, flags);
2131219820Sjeff	list_del(&mad_list->list);
2132219820Sjeff
2133219820Sjeff	/* Move queued send to the send queue */
2134219820Sjeff	if (send_queue->count-- > send_queue->max_active) {
2135219820Sjeff		mad_list = container_of(qp_info->overflow_list.next,
2136219820Sjeff					struct ib_mad_list_head, list);
2137219820Sjeff		queued_send_wr = container_of(mad_list,
2138219820Sjeff					struct ib_mad_send_wr_private,
2139219820Sjeff					mad_list);
2140219820Sjeff		list_move_tail(&mad_list->list, &send_queue->list);
2141219820Sjeff	}
2142219820Sjeff	spin_unlock_irqrestore(&send_queue->lock, flags);
2143219820Sjeff
2144219820Sjeff	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2145219820Sjeff	mad_send_wc.status = wc->status;
2146219820Sjeff	mad_send_wc.vendor_err = wc->vendor_err;
2147219820Sjeff	if (atomic_read(&qp_info->snoop_count))
2148219820Sjeff		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2149219820Sjeff			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2150219820Sjeff	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2151219820Sjeff
2152219820Sjeff	if (queued_send_wr) {
2153219820Sjeff		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2154219820Sjeff				   &bad_send_wr);
2155219820Sjeff		if (ret) {
2156219820Sjeff			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2157219820Sjeff			mad_send_wr = queued_send_wr;
2158219820Sjeff			wc->status = IB_WC_LOC_QP_OP_ERR;
2159219820Sjeff			goto retry;
2160219820Sjeff		}
2161219820Sjeff	}
2162219820Sjeff}
2163219820Sjeff
2164219820Sjeffstatic void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2165219820Sjeff{
2166219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
2167219820Sjeff	struct ib_mad_list_head *mad_list;
2168219820Sjeff	unsigned long flags;
2169219820Sjeff
2170219820Sjeff	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2171219820Sjeff	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2172219820Sjeff		mad_send_wr = container_of(mad_list,
2173219820Sjeff					   struct ib_mad_send_wr_private,
2174219820Sjeff					   mad_list);
2175219820Sjeff		mad_send_wr->retry = 1;
2176219820Sjeff	}
2177219820Sjeff	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2178219820Sjeff}
2179219820Sjeff
2180219820Sjeffstatic void mad_error_handler(struct ib_mad_port_private *port_priv,
2181219820Sjeff			      struct ib_wc *wc)
2182219820Sjeff{
2183219820Sjeff	struct ib_mad_list_head *mad_list;
2184219820Sjeff	struct ib_mad_qp_info *qp_info;
2185219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
2186219820Sjeff	int ret;
2187219820Sjeff
2188219820Sjeff	/* Determine if failure was a send or receive */
2189219820Sjeff	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2190219820Sjeff	qp_info = mad_list->mad_queue->qp_info;
2191219820Sjeff	if (mad_list->mad_queue == &qp_info->recv_queue)
2192219820Sjeff		/*
2193219820Sjeff		 * Receive errors indicate that the QP has entered the error
2194219820Sjeff		 * state - error handling/shutdown code will cleanup
2195219820Sjeff		 */
2196219820Sjeff		return;
2197219820Sjeff
2198219820Sjeff	/*
2199219820Sjeff	 * Send errors will transition the QP to SQE - move
2200219820Sjeff	 * QP to RTS and repost flushed work requests
2201219820Sjeff	 */
2202219820Sjeff	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2203219820Sjeff				   mad_list);
2204219820Sjeff	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2205219820Sjeff		if (mad_send_wr->retry) {
2206219820Sjeff			/* Repost send */
2207219820Sjeff			struct ib_send_wr *bad_send_wr;
2208219820Sjeff
2209219820Sjeff			mad_send_wr->retry = 0;
2210219820Sjeff			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2211219820Sjeff					&bad_send_wr);
2212219820Sjeff			if (ret)
2213219820Sjeff				ib_mad_send_done_handler(port_priv, wc);
2214219820Sjeff		} else
2215219820Sjeff			ib_mad_send_done_handler(port_priv, wc);
2216219820Sjeff	} else {
2217219820Sjeff		struct ib_qp_attr *attr;
2218219820Sjeff
2219219820Sjeff		/* Transition QP to RTS and fail offending send */
2220219820Sjeff		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2221219820Sjeff		if (attr) {
2222219820Sjeff			attr->qp_state = IB_QPS_RTS;
2223219820Sjeff			attr->cur_qp_state = IB_QPS_SQE;
2224219820Sjeff			ret = ib_modify_qp(qp_info->qp, attr,
2225219820Sjeff					   IB_QP_STATE | IB_QP_CUR_STATE);
2226219820Sjeff			kfree(attr);
2227219820Sjeff			if (ret)
2228219820Sjeff				printk(KERN_ERR PFX "mad_error_handler - "
2229219820Sjeff				       "ib_modify_qp to RTS : %d\n", ret);
2230219820Sjeff			else
2231219820Sjeff				mark_sends_for_retry(qp_info);
2232219820Sjeff		}
2233219820Sjeff		ib_mad_send_done_handler(port_priv, wc);
2234219820Sjeff	}
2235219820Sjeff}
2236219820Sjeff
2237219820Sjeff/*
2238219820Sjeff * IB MAD completion callback
2239219820Sjeff */
2240219820Sjeffstatic void ib_mad_completion_handler(struct work_struct *work)
2241219820Sjeff{
2242219820Sjeff	struct ib_mad_port_private *port_priv;
2243219820Sjeff	struct ib_wc wc;
2244219820Sjeff
2245219820Sjeff	port_priv = container_of(work, struct ib_mad_port_private, work);
2246219820Sjeff	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2247219820Sjeff
2248219820Sjeff	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2249219820Sjeff		if (wc.status == IB_WC_SUCCESS) {
2250219820Sjeff			switch (wc.opcode) {
2251219820Sjeff			case IB_WC_SEND:
2252219820Sjeff				ib_mad_send_done_handler(port_priv, &wc);
2253219820Sjeff				break;
2254219820Sjeff			case IB_WC_RECV:
2255219820Sjeff				ib_mad_recv_done_handler(port_priv, &wc);
2256219820Sjeff				break;
2257219820Sjeff			default:
2258219820Sjeff				BUG_ON(1);
2259219820Sjeff				break;
2260219820Sjeff			}
2261219820Sjeff		} else
2262219820Sjeff			mad_error_handler(port_priv, &wc);
2263219820Sjeff	}
2264219820Sjeff}
2265219820Sjeff
2266219820Sjeffstatic void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2267219820Sjeff{
2268219820Sjeff	unsigned long flags;
2269219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2270219820Sjeff	struct ib_mad_send_wc mad_send_wc;
2271219820Sjeff	struct list_head cancel_list;
2272219820Sjeff
2273219820Sjeff	INIT_LIST_HEAD(&cancel_list);
2274219820Sjeff
2275219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2276219820Sjeff	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2277219820Sjeff				 &mad_agent_priv->send_list, agent_list) {
2278219820Sjeff		if (mad_send_wr->status == IB_WC_SUCCESS) {
2279219820Sjeff			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2280219820Sjeff			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2281219820Sjeff		}
2282219820Sjeff	}
2283219820Sjeff
2284219820Sjeff	/* Empty wait list to prevent receives from finding a request */
2285219820Sjeff	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2286219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2287219820Sjeff
2288219820Sjeff	/* Report all cancelled requests */
2289219820Sjeff	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2290219820Sjeff	mad_send_wc.vendor_err = 0;
2291219820Sjeff
2292219820Sjeff	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2293219820Sjeff				 &cancel_list, agent_list) {
2294219820Sjeff		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2295219820Sjeff		list_del(&mad_send_wr->agent_list);
2296219820Sjeff		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2297219820Sjeff						   &mad_send_wc);
2298219820Sjeff		atomic_dec(&mad_agent_priv->refcount);
2299219820Sjeff	}
2300219820Sjeff}
2301219820Sjeff
2302219820Sjeffstatic struct ib_mad_send_wr_private*
2303219820Sjefffind_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2304219820Sjeff	     struct ib_mad_send_buf *send_buf)
2305219820Sjeff{
2306219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
2307219820Sjeff
2308219820Sjeff	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2309219820Sjeff			    agent_list) {
2310219820Sjeff		if (&mad_send_wr->send_buf == send_buf)
2311219820Sjeff			return mad_send_wr;
2312219820Sjeff	}
2313219820Sjeff
2314219820Sjeff	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2315219820Sjeff			    agent_list) {
2316219820Sjeff		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2317219820Sjeff		    &mad_send_wr->send_buf == send_buf)
2318219820Sjeff			return mad_send_wr;
2319219820Sjeff	}
2320219820Sjeff	return NULL;
2321219820Sjeff}
2322219820Sjeff
2323219820Sjeffint ib_modify_mad(struct ib_mad_agent *mad_agent,
2324219820Sjeff		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2325219820Sjeff{
2326219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
2327219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
2328219820Sjeff	unsigned long flags;
2329219820Sjeff	int active;
2330219820Sjeff
2331219820Sjeff	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2332219820Sjeff				      agent);
2333219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2334219820Sjeff	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2335219820Sjeff	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2336219820Sjeff		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2337219820Sjeff		return -EINVAL;
2338219820Sjeff	}
2339219820Sjeff
2340219820Sjeff	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2341219820Sjeff	if (!timeout_ms) {
2342219820Sjeff		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2343219820Sjeff		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2344219820Sjeff	}
2345219820Sjeff
2346219820Sjeff	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2347219820Sjeff	if (active)
2348219820Sjeff		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2349219820Sjeff	else
2350219820Sjeff		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2351219820Sjeff
2352219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2353219820Sjeff	return 0;
2354219820Sjeff}
2355219820SjeffEXPORT_SYMBOL(ib_modify_mad);
2356219820Sjeff
2357219820Sjeffvoid ib_cancel_mad(struct ib_mad_agent *mad_agent,
2358219820Sjeff		   struct ib_mad_send_buf *send_buf)
2359219820Sjeff{
2360219820Sjeff	ib_modify_mad(mad_agent, send_buf, 0);
2361219820Sjeff}
2362219820SjeffEXPORT_SYMBOL(ib_cancel_mad);
2363219820Sjeff
2364219820Sjeffstatic void local_completions(struct work_struct *work)
2365219820Sjeff{
2366219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
2367219820Sjeff	struct ib_mad_local_private *local;
2368219820Sjeff	struct ib_mad_agent_private *recv_mad_agent;
2369219820Sjeff	unsigned long flags;
2370219820Sjeff	int free_mad;
2371219820Sjeff	struct ib_wc wc;
2372219820Sjeff	struct ib_mad_send_wc mad_send_wc;
2373219820Sjeff
2374219820Sjeff	mad_agent_priv =
2375219820Sjeff		container_of(work, struct ib_mad_agent_private, local_work);
2376219820Sjeff
2377219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2378219820Sjeff	while (!list_empty(&mad_agent_priv->local_list)) {
2379219820Sjeff		local = list_entry(mad_agent_priv->local_list.next,
2380219820Sjeff				   struct ib_mad_local_private,
2381219820Sjeff				   completion_list);
2382219820Sjeff		list_del(&local->completion_list);
2383219820Sjeff		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2384219820Sjeff		free_mad = 0;
2385219820Sjeff		if (local->mad_priv) {
2386219820Sjeff			recv_mad_agent = local->recv_mad_agent;
2387219820Sjeff			if (!recv_mad_agent) {
2388219820Sjeff				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2389219820Sjeff				free_mad = 1;
2390219820Sjeff				goto local_send_completion;
2391219820Sjeff			}
2392219820Sjeff
2393219820Sjeff			/*
2394219820Sjeff			 * Defined behavior is to complete response
2395219820Sjeff			 * before request
2396219820Sjeff			 */
2397219820Sjeff			build_smp_wc(recv_mad_agent->agent.qp,
2398219820Sjeff				     (unsigned long) local->mad_send_wr,
2399219820Sjeff				     be16_to_cpu(IB_LID_PERMISSIVE),
2400219820Sjeff				     0, recv_mad_agent->agent.port_num, &wc);
2401219820Sjeff
2402219820Sjeff			local->mad_priv->header.recv_wc.wc = &wc;
2403219820Sjeff			local->mad_priv->header.recv_wc.mad_len =
2404219820Sjeff						sizeof(struct ib_mad);
2405219820Sjeff			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2406219820Sjeff			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2407219820Sjeff				 &local->mad_priv->header.recv_wc.rmpp_list);
2408219820Sjeff			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2409219820Sjeff			local->mad_priv->header.recv_wc.recv_buf.mad =
2410219820Sjeff						&local->mad_priv->mad.mad;
2411219820Sjeff			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2412219820Sjeff				snoop_recv(recv_mad_agent->qp_info,
2413219820Sjeff					  &local->mad_priv->header.recv_wc,
2414219820Sjeff					   IB_MAD_SNOOP_RECVS);
2415219820Sjeff			recv_mad_agent->agent.recv_handler(
2416219820Sjeff						&recv_mad_agent->agent,
2417219820Sjeff						&local->mad_priv->header.recv_wc);
2418219820Sjeff			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2419219820Sjeff			atomic_dec(&recv_mad_agent->refcount);
2420219820Sjeff			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2421219820Sjeff		}
2422219820Sjeff
2423219820Sjefflocal_send_completion:
2424219820Sjeff		/* Complete send */
2425219820Sjeff		mad_send_wc.status = IB_WC_SUCCESS;
2426219820Sjeff		mad_send_wc.vendor_err = 0;
2427219820Sjeff		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2428219820Sjeff		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2429219820Sjeff			snoop_send(mad_agent_priv->qp_info,
2430219820Sjeff				   &local->mad_send_wr->send_buf,
2431219820Sjeff				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2432219820Sjeff		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2433219820Sjeff						   &mad_send_wc);
2434219820Sjeff
2435219820Sjeff		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2436219820Sjeff		atomic_dec(&mad_agent_priv->refcount);
2437219820Sjeff		if (free_mad)
2438219820Sjeff			kmem_cache_free(ib_mad_cache, local->mad_priv);
2439219820Sjeff		kfree(local);
2440219820Sjeff	}
2441219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2442219820Sjeff}
2443219820Sjeff
2444219820Sjeffstatic int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2445219820Sjeff{
2446219820Sjeff	int ret;
2447219820Sjeff
2448219820Sjeff	if (!mad_send_wr->retries_left)
2449219820Sjeff		return -ETIMEDOUT;
2450219820Sjeff
2451219820Sjeff	mad_send_wr->retries_left--;
2452219820Sjeff	mad_send_wr->send_buf.retries++;
2453219820Sjeff
2454219820Sjeff	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2455219820Sjeff
2456219820Sjeff	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2457219820Sjeff		ret = ib_retry_rmpp(mad_send_wr);
2458219820Sjeff		switch (ret) {
2459219820Sjeff		case IB_RMPP_RESULT_UNHANDLED:
2460219820Sjeff			ret = ib_send_mad(mad_send_wr);
2461219820Sjeff			break;
2462219820Sjeff		case IB_RMPP_RESULT_CONSUMED:
2463219820Sjeff			ret = 0;
2464219820Sjeff			break;
2465219820Sjeff		default:
2466219820Sjeff			ret = -ECOMM;
2467219820Sjeff			break;
2468219820Sjeff		}
2469219820Sjeff	} else
2470219820Sjeff		ret = ib_send_mad(mad_send_wr);
2471219820Sjeff
2472219820Sjeff	if (!ret) {
2473219820Sjeff		mad_send_wr->refcount++;
2474219820Sjeff		list_add_tail(&mad_send_wr->agent_list,
2475219820Sjeff			      &mad_send_wr->mad_agent_priv->send_list);
2476219820Sjeff	}
2477219820Sjeff	return ret;
2478219820Sjeff}
2479219820Sjeff
2480219820Sjeffstatic void timeout_sends(struct work_struct *work)
2481219820Sjeff{
2482219820Sjeff	struct ib_mad_agent_private *mad_agent_priv;
2483219820Sjeff	struct ib_mad_send_wr_private *mad_send_wr;
2484219820Sjeff	struct ib_mad_send_wc mad_send_wc;
2485219820Sjeff	unsigned long flags;
2486219820Sjeff
2487219820Sjeff	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2488219820Sjeff				      timeout_work);
2489219820Sjeff	mad_send_wc.vendor_err = 0;
2490219820Sjeff
2491219820Sjeff	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2492219820Sjeff	while (!list_empty(&mad_agent_priv->wait_list)) {
2493219820Sjeff		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2494219820Sjeff					 struct ib_mad_send_wr_private,
2495219820Sjeff					 agent_list);
2496219820Sjeff
2497219820Sjeff		if (time_after(mad_send_wr->timeout, jiffies)) {
2498219820Sjeff			mod_timer(&mad_agent_priv->timeout_timer,
2499219820Sjeff				  mad_send_wr->timeout);
2500219820Sjeff			break;
2501219820Sjeff		}
2502219820Sjeff
2503219820Sjeff		list_del(&mad_send_wr->agent_list);
2504219820Sjeff		if (mad_send_wr->status == IB_WC_SUCCESS &&
2505219820Sjeff		    !retry_send(mad_send_wr))
2506219820Sjeff			continue;
2507219820Sjeff
2508219820Sjeff		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2509219820Sjeff
2510219820Sjeff		if (mad_send_wr->status == IB_WC_SUCCESS)
2511219820Sjeff			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2512219820Sjeff		else
2513219820Sjeff			mad_send_wc.status = mad_send_wr->status;
2514219820Sjeff		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2515219820Sjeff		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2516219820Sjeff						   &mad_send_wc);
2517219820Sjeff
2518219820Sjeff		atomic_dec(&mad_agent_priv->refcount);
2519219820Sjeff		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2520219820Sjeff	}
2521219820Sjeff	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2522219820Sjeff}
2523219820Sjeff
2524219820Sjeffstatic void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2525219820Sjeff{
2526219820Sjeff	struct ib_mad_port_private *port_priv = cq->cq_context;
2527219820Sjeff	unsigned long flags;
2528219820Sjeff
2529219820Sjeff	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2530219820Sjeff	if (!list_empty(&port_priv->port_list))
2531219820Sjeff		queue_work(port_priv->wq, &port_priv->work);
2532219820Sjeff	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2533219820Sjeff}
2534219820Sjeff
2535219820Sjeff/*
2536219820Sjeff * Allocate receive MADs and post receive WRs for them
2537219820Sjeff */
2538219820Sjeffstatic int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2539219820Sjeff				    struct ib_mad_private *mad)
2540219820Sjeff{
2541219820Sjeff	unsigned long flags;
2542219820Sjeff	int post, ret;
2543219820Sjeff	struct ib_mad_private *mad_priv;
2544219820Sjeff	struct ib_sge sg_list;
2545219820Sjeff	struct ib_recv_wr recv_wr, *bad_recv_wr;
2546219820Sjeff	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2547219820Sjeff
2548219820Sjeff	/* Initialize common scatter list fields */
2549219820Sjeff	sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2550219820Sjeff	sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2551219820Sjeff
2552219820Sjeff	/* Initialize common receive WR fields */
2553219820Sjeff	recv_wr.next = NULL;
2554219820Sjeff	recv_wr.sg_list = &sg_list;
2555219820Sjeff	recv_wr.num_sge = 1;
2556219820Sjeff
2557219820Sjeff	do {
2558219820Sjeff		/* Allocate and map receive buffer */
2559219820Sjeff		if (mad) {
2560219820Sjeff			mad_priv = mad;
2561219820Sjeff			mad = NULL;
2562219820Sjeff		} else {
2563219820Sjeff			mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2564219820Sjeff			if (!mad_priv) {
2565219820Sjeff				printk(KERN_ERR PFX "No memory for receive buffer\n");
2566219820Sjeff				ret = -ENOMEM;
2567219820Sjeff				break;
2568219820Sjeff			}
2569219820Sjeff		}
2570219820Sjeff		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2571219820Sjeff						 &mad_priv->grh,
2572219820Sjeff						 sizeof *mad_priv -
2573219820Sjeff						   sizeof mad_priv->header,
2574219820Sjeff						 DMA_FROM_DEVICE);
2575219820Sjeff		mad_priv->header.mapping = sg_list.addr;
2576219820Sjeff		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2577219820Sjeff		mad_priv->header.mad_list.mad_queue = recv_queue;
2578219820Sjeff
2579219820Sjeff		/* Post receive WR */
2580219820Sjeff		spin_lock_irqsave(&recv_queue->lock, flags);
2581219820Sjeff		post = (++recv_queue->count < recv_queue->max_active);
2582219820Sjeff		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2583219820Sjeff		spin_unlock_irqrestore(&recv_queue->lock, flags);
2584219820Sjeff		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2585219820Sjeff		if (ret) {
2586219820Sjeff			spin_lock_irqsave(&recv_queue->lock, flags);
2587219820Sjeff			list_del(&mad_priv->header.mad_list.list);
2588219820Sjeff			recv_queue->count--;
2589219820Sjeff			spin_unlock_irqrestore(&recv_queue->lock, flags);
2590219820Sjeff			ib_dma_unmap_single(qp_info->port_priv->device,
2591219820Sjeff					    mad_priv->header.mapping,
2592219820Sjeff					    sizeof *mad_priv -
2593219820Sjeff					      sizeof mad_priv->header,
2594219820Sjeff					    DMA_FROM_DEVICE);
2595219820Sjeff			kmem_cache_free(ib_mad_cache, mad_priv);
2596219820Sjeff			printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2597219820Sjeff			break;
2598219820Sjeff		}
2599219820Sjeff	} while (post);
2600219820Sjeff
2601219820Sjeff	return ret;
2602219820Sjeff}
2603219820Sjeff
2604219820Sjeff/*
2605219820Sjeff * Return all the posted receive MADs
2606219820Sjeff */
2607219820Sjeffstatic void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2608219820Sjeff{
2609219820Sjeff	struct ib_mad_private_header *mad_priv_hdr;
2610219820Sjeff	struct ib_mad_private *recv;
2611219820Sjeff	struct ib_mad_list_head *mad_list;
2612219820Sjeff
2613219820Sjeff	if (!qp_info->qp)
2614219820Sjeff		return;
2615219820Sjeff
2616219820Sjeff	while (!list_empty(&qp_info->recv_queue.list)) {
2617219820Sjeff
2618219820Sjeff		mad_list = list_entry(qp_info->recv_queue.list.next,
2619219820Sjeff				      struct ib_mad_list_head, list);
2620219820Sjeff		mad_priv_hdr = container_of(mad_list,
2621219820Sjeff					    struct ib_mad_private_header,
2622219820Sjeff					    mad_list);
2623219820Sjeff		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2624219820Sjeff				    header);
2625219820Sjeff
2626219820Sjeff		/* Remove from posted receive MAD list */
2627219820Sjeff		list_del(&mad_list->list);
2628219820Sjeff
2629219820Sjeff		ib_dma_unmap_single(qp_info->port_priv->device,
2630219820Sjeff				    recv->header.mapping,
2631219820Sjeff				    sizeof(struct ib_mad_private) -
2632219820Sjeff				      sizeof(struct ib_mad_private_header),
2633219820Sjeff				    DMA_FROM_DEVICE);
2634219820Sjeff		kmem_cache_free(ib_mad_cache, recv);
2635219820Sjeff	}
2636219820Sjeff
2637219820Sjeff	qp_info->recv_queue.count = 0;
2638219820Sjeff}
2639219820Sjeff
2640219820Sjeff/*
2641219820Sjeff * Start the port
2642219820Sjeff */
2643219820Sjeffstatic int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2644219820Sjeff{
2645219820Sjeff	int ret, i;
2646219820Sjeff	struct ib_qp_attr *attr;
2647219820Sjeff	struct ib_qp *qp;
2648219820Sjeff
2649219820Sjeff	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2650219820Sjeff	if (!attr) {
2651219820Sjeff		printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2652219820Sjeff		return -ENOMEM;
2653219820Sjeff	}
2654219820Sjeff
2655219820Sjeff	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2656219820Sjeff		qp = port_priv->qp_info[i].qp;
2657219820Sjeff		if (!qp)
2658219820Sjeff			continue;
2659219820Sjeff
2660219820Sjeff		/*
2661219820Sjeff		 * PKey index for QP1 is irrelevant but
2662219820Sjeff		 * one is needed for the Reset to Init transition
2663219820Sjeff		 */
2664219820Sjeff		attr->qp_state = IB_QPS_INIT;
2665219820Sjeff		attr->pkey_index = 0;
2666219820Sjeff		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2667219820Sjeff		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2668219820Sjeff					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
2669219820Sjeff		if (ret) {
2670219820Sjeff			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2671219820Sjeff			       "INIT: %d\n", i, ret);
2672219820Sjeff			goto out;
2673219820Sjeff		}
2674219820Sjeff
2675219820Sjeff		attr->qp_state = IB_QPS_RTR;
2676219820Sjeff		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2677219820Sjeff		if (ret) {
2678219820Sjeff			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2679219820Sjeff			       "RTR: %d\n", i, ret);
2680219820Sjeff			goto out;
2681219820Sjeff		}
2682219820Sjeff
2683219820Sjeff		attr->qp_state = IB_QPS_RTS;
2684219820Sjeff		attr->sq_psn = IB_MAD_SEND_Q_PSN;
2685219820Sjeff		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2686219820Sjeff		if (ret) {
2687219820Sjeff			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2688219820Sjeff			       "RTS: %d\n", i, ret);
2689219820Sjeff			goto out;
2690219820Sjeff		}
2691219820Sjeff	}
2692219820Sjeff
2693219820Sjeff	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2694219820Sjeff	if (ret) {
2695219820Sjeff		printk(KERN_ERR PFX "Failed to request completion "
2696219820Sjeff		       "notification: %d\n", ret);
2697219820Sjeff		goto out;
2698219820Sjeff	}
2699219820Sjeff
2700219820Sjeff	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2701219820Sjeff		if (!port_priv->qp_info[i].qp)
2702219820Sjeff			continue;
2703219820Sjeff
2704219820Sjeff		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2705219820Sjeff		if (ret) {
2706219820Sjeff			printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2707219820Sjeff			goto out;
2708219820Sjeff		}
2709219820Sjeff	}
2710219820Sjeffout:
2711219820Sjeff	kfree(attr);
2712219820Sjeff	return ret;
2713219820Sjeff}
2714219820Sjeff
2715219820Sjeffstatic void qp_event_handler(struct ib_event *event, void *qp_context)
2716219820Sjeff{
2717219820Sjeff	struct ib_mad_qp_info	*qp_info = qp_context;
2718219820Sjeff
2719219820Sjeff	/* It's worse than that! He's dead, Jim! */
2720219820Sjeff	printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2721219820Sjeff		event->event, qp_info->qp->qp_num);
2722219820Sjeff}
2723219820Sjeff
2724219820Sjeffstatic void init_mad_queue(struct ib_mad_qp_info *qp_info,
2725219820Sjeff			   struct ib_mad_queue *mad_queue)
2726219820Sjeff{
2727219820Sjeff	mad_queue->qp_info = qp_info;
2728219820Sjeff	mad_queue->count = 0;
2729219820Sjeff	spin_lock_init(&mad_queue->lock);
2730219820Sjeff	INIT_LIST_HEAD(&mad_queue->list);
2731219820Sjeff}
2732219820Sjeff
2733219820Sjeffstatic void init_mad_qp(struct ib_mad_port_private *port_priv,
2734219820Sjeff			struct ib_mad_qp_info *qp_info)
2735219820Sjeff{
2736219820Sjeff	qp_info->port_priv = port_priv;
2737219820Sjeff	init_mad_queue(qp_info, &qp_info->send_queue);
2738219820Sjeff	init_mad_queue(qp_info, &qp_info->recv_queue);
2739219820Sjeff	INIT_LIST_HEAD(&qp_info->overflow_list);
2740219820Sjeff	spin_lock_init(&qp_info->snoop_lock);
2741219820Sjeff	qp_info->snoop_table = NULL;
2742219820Sjeff	qp_info->snoop_table_size = 0;
2743219820Sjeff	atomic_set(&qp_info->snoop_count, 0);
2744219820Sjeff}
2745219820Sjeff
2746219820Sjeffstatic int create_mad_qp(struct ib_mad_qp_info *qp_info,
2747219820Sjeff			 enum ib_qp_type qp_type)
2748219820Sjeff{
2749219820Sjeff	struct ib_qp_init_attr	qp_init_attr;
2750219820Sjeff	int ret;
2751219820Sjeff
2752219820Sjeff	memset(&qp_init_attr, 0, sizeof qp_init_attr);
2753219820Sjeff	qp_init_attr.send_cq = qp_info->port_priv->cq;
2754219820Sjeff	qp_init_attr.recv_cq = qp_info->port_priv->cq;
2755219820Sjeff	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2756219820Sjeff	qp_init_attr.cap.max_send_wr = mad_sendq_size;
2757219820Sjeff	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2758219820Sjeff	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2759219820Sjeff	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2760219820Sjeff	qp_init_attr.qp_type = qp_type;
2761219820Sjeff	qp_init_attr.port_num = qp_info->port_priv->port_num;
2762219820Sjeff	qp_init_attr.qp_context = qp_info;
2763219820Sjeff	qp_init_attr.event_handler = qp_event_handler;
2764219820Sjeff	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2765219820Sjeff	if (IS_ERR(qp_info->qp)) {
2766219820Sjeff		printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2767219820Sjeff		       get_spl_qp_index(qp_type));
2768219820Sjeff		ret = PTR_ERR(qp_info->qp);
2769219820Sjeff		goto error;
2770219820Sjeff	}
2771219820Sjeff	/* Use minimum queue sizes unless the CQ is resized */
2772219820Sjeff	qp_info->send_queue.max_active = mad_sendq_size;
2773219820Sjeff	qp_info->recv_queue.max_active = mad_recvq_size;
2774219820Sjeff	return 0;
2775219820Sjeff
2776219820Sjefferror:
2777219820Sjeff	return ret;
2778219820Sjeff}
2779219820Sjeff
2780219820Sjeffstatic void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2781219820Sjeff{
2782219820Sjeff	if (!qp_info->qp)
2783219820Sjeff		return;
2784219820Sjeff
2785219820Sjeff	ib_destroy_qp(qp_info->qp);
2786219820Sjeff	kfree(qp_info->snoop_table);
2787219820Sjeff}
2788219820Sjeff
2789219820Sjeff/*
2790219820Sjeff * Open the port
2791219820Sjeff * Create the QP, PD, MR, and CQ if needed
2792219820Sjeff */
2793219820Sjeffstatic int ib_mad_port_open(struct ib_device *device,
2794219820Sjeff			    int port_num)
2795219820Sjeff{
2796219820Sjeff	int ret, cq_size;
2797219820Sjeff	struct ib_mad_port_private *port_priv;
2798219820Sjeff	unsigned long flags;
2799219820Sjeff	char name[sizeof "ib_mad123"];
2800219820Sjeff	int has_smi;
2801219820Sjeff
2802219820Sjeff	/* Create new device info */
2803219820Sjeff	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2804219820Sjeff	if (!port_priv) {
2805219820Sjeff		printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2806219820Sjeff		return -ENOMEM;
2807219820Sjeff	}
2808219820Sjeff
2809219820Sjeff	port_priv->device = device;
2810219820Sjeff	port_priv->port_num = port_num;
2811219820Sjeff	spin_lock_init(&port_priv->reg_lock);
2812219820Sjeff	INIT_LIST_HEAD(&port_priv->agent_list);
2813219820Sjeff	init_mad_qp(port_priv, &port_priv->qp_info[0]);
2814219820Sjeff	init_mad_qp(port_priv, &port_priv->qp_info[1]);
2815219820Sjeff
2816219820Sjeff	cq_size = mad_sendq_size + mad_recvq_size;
2817219820Sjeff	has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2818219820Sjeff	if (has_smi)
2819219820Sjeff		cq_size *= 2;
2820219820Sjeff
2821219820Sjeff	port_priv->cq = ib_create_cq(port_priv->device,
2822219820Sjeff				     ib_mad_thread_completion_handler,
2823219820Sjeff				     NULL, port_priv, cq_size, 0);
2824219820Sjeff	if (IS_ERR(port_priv->cq)) {
2825219820Sjeff		printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2826219820Sjeff		ret = PTR_ERR(port_priv->cq);
2827219820Sjeff		goto error3;
2828219820Sjeff	}
2829219820Sjeff
2830219820Sjeff	port_priv->pd = ib_alloc_pd(device);
2831219820Sjeff	if (IS_ERR(port_priv->pd)) {
2832219820Sjeff		printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2833219820Sjeff		ret = PTR_ERR(port_priv->pd);
2834219820Sjeff		goto error4;
2835219820Sjeff	}
2836219820Sjeff
2837219820Sjeff	port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2838219820Sjeff	if (IS_ERR(port_priv->mr)) {
2839219820Sjeff		printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2840219820Sjeff		ret = PTR_ERR(port_priv->mr);
2841219820Sjeff		goto error5;
2842219820Sjeff	}
2843219820Sjeff
2844219820Sjeff	if (has_smi) {
2845219820Sjeff		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2846219820Sjeff		if (ret)
2847219820Sjeff			goto error6;
2848219820Sjeff	}
2849219820Sjeff	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2850219820Sjeff	if (ret)
2851219820Sjeff		goto error7;
2852219820Sjeff
2853219820Sjeff	snprintf(name, sizeof name, "ib_mad%d", port_num);
2854219820Sjeff	port_priv->wq = create_singlethread_workqueue(name);
2855219820Sjeff	if (!port_priv->wq) {
2856219820Sjeff		ret = -ENOMEM;
2857219820Sjeff		goto error8;
2858219820Sjeff	}
2859219820Sjeff	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2860219820Sjeff
2861219820Sjeff	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2862219820Sjeff	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2863219820Sjeff	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2864219820Sjeff
2865219820Sjeff	ret = ib_mad_port_start(port_priv);
2866219820Sjeff	if (ret) {
2867219820Sjeff		printk(KERN_ERR PFX "Couldn't start port\n");
2868219820Sjeff		goto error9;
2869219820Sjeff	}
2870219820Sjeff
2871219820Sjeff	return 0;
2872219820Sjeff
2873219820Sjefferror9:
2874219820Sjeff	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2875219820Sjeff	list_del_init(&port_priv->port_list);
2876219820Sjeff	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2877219820Sjeff
2878219820Sjeff	destroy_workqueue(port_priv->wq);
2879219820Sjefferror8:
2880219820Sjeff	destroy_mad_qp(&port_priv->qp_info[1]);
2881219820Sjefferror7:
2882219820Sjeff	destroy_mad_qp(&port_priv->qp_info[0]);
2883219820Sjefferror6:
2884219820Sjeff	ib_dereg_mr(port_priv->mr);
2885219820Sjefferror5:
2886219820Sjeff	ib_dealloc_pd(port_priv->pd);
2887219820Sjefferror4:
2888219820Sjeff	ib_destroy_cq(port_priv->cq);
2889219820Sjeff	cleanup_recv_queue(&port_priv->qp_info[1]);
2890219820Sjeff	cleanup_recv_queue(&port_priv->qp_info[0]);
2891219820Sjefferror3:
2892219820Sjeff	kfree(port_priv);
2893219820Sjeff
2894219820Sjeff	return ret;
2895219820Sjeff}
2896219820Sjeff
2897219820Sjeff/*
2898219820Sjeff * Close the port
2899219820Sjeff * If there are no classes using the port, free the port
2900219820Sjeff * resources (CQ, MR, PD, QP) and remove the port's info structure
2901219820Sjeff */
2902219820Sjeffstatic int ib_mad_port_close(struct ib_device *device, int port_num)
2903219820Sjeff{
2904219820Sjeff	struct ib_mad_port_private *port_priv;
2905219820Sjeff	unsigned long flags;
2906219820Sjeff
2907219820Sjeff	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2908219820Sjeff	port_priv = __ib_get_mad_port(device, port_num);
2909219820Sjeff	if (port_priv == NULL) {
2910219820Sjeff		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2911219820Sjeff		printk(KERN_ERR PFX "Port %d not found\n", port_num);
2912219820Sjeff		return -ENODEV;
2913219820Sjeff	}
2914219820Sjeff	list_del_init(&port_priv->port_list);
2915219820Sjeff	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2916219820Sjeff
2917219820Sjeff	destroy_workqueue(port_priv->wq);
2918219820Sjeff	destroy_mad_qp(&port_priv->qp_info[1]);
2919219820Sjeff	destroy_mad_qp(&port_priv->qp_info[0]);
2920219820Sjeff	ib_dereg_mr(port_priv->mr);
2921219820Sjeff	ib_dealloc_pd(port_priv->pd);
2922219820Sjeff	ib_destroy_cq(port_priv->cq);
2923219820Sjeff	cleanup_recv_queue(&port_priv->qp_info[1]);
2924219820Sjeff	cleanup_recv_queue(&port_priv->qp_info[0]);
2925219820Sjeff	/* XXX: Handle deallocation of MAD registration tables */
2926219820Sjeff
2927219820Sjeff	kfree(port_priv);
2928219820Sjeff
2929219820Sjeff	return 0;
2930219820Sjeff}
2931219820Sjeff
2932219820Sjeffstatic void ib_mad_init_device(struct ib_device *device)
2933219820Sjeff{
2934219820Sjeff	int start, end, i;
2935219820Sjeff
2936219820Sjeff	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2937219820Sjeff		return;
2938219820Sjeff
2939219820Sjeff	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2940219820Sjeff		start = 0;
2941219820Sjeff		end   = 0;
2942219820Sjeff	} else {
2943219820Sjeff		start = 1;
2944219820Sjeff		end   = device->phys_port_cnt;
2945219820Sjeff	}
2946219820Sjeff
2947219820Sjeff	for (i = start; i <= end; i++) {
2948219820Sjeff		if (ib_mad_port_open(device, i)) {
2949219820Sjeff			printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2950219820Sjeff			       device->name, i);
2951219820Sjeff			goto error;
2952219820Sjeff		}
2953219820Sjeff		if (ib_agent_port_open(device, i)) {
2954219820Sjeff			printk(KERN_ERR PFX "Couldn't open %s port %d "
2955219820Sjeff			       "for agents\n",
2956219820Sjeff			       device->name, i);
2957219820Sjeff			goto error_agent;
2958219820Sjeff		}
2959219820Sjeff	}
2960219820Sjeff	return;
2961219820Sjeff
2962219820Sjefferror_agent:
2963219820Sjeff	if (ib_mad_port_close(device, i))
2964219820Sjeff		printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2965219820Sjeff		       device->name, i);
2966219820Sjeff
2967219820Sjefferror:
2968219820Sjeff	i--;
2969219820Sjeff
2970219820Sjeff	while (i >= start) {
2971219820Sjeff		if (ib_agent_port_close(device, i))
2972219820Sjeff			printk(KERN_ERR PFX "Couldn't close %s port %d "
2973219820Sjeff			       "for agents\n",
2974219820Sjeff			       device->name, i);
2975219820Sjeff		if (ib_mad_port_close(device, i))
2976219820Sjeff			printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2977219820Sjeff			       device->name, i);
2978219820Sjeff		i--;
2979219820Sjeff	}
2980219820Sjeff}
2981219820Sjeff
2982219820Sjeffstatic void ib_mad_remove_device(struct ib_device *device)
2983219820Sjeff{
2984219820Sjeff	int i, num_ports, cur_port;
2985219820Sjeff
2986219820Sjeff	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2987219820Sjeff		num_ports = 1;
2988219820Sjeff		cur_port = 0;
2989219820Sjeff	} else {
2990219820Sjeff		num_ports = device->phys_port_cnt;
2991219820Sjeff		cur_port = 1;
2992219820Sjeff	}
2993219820Sjeff	for (i = 0; i < num_ports; i++, cur_port++) {
2994219820Sjeff		if (ib_agent_port_close(device, cur_port))
2995219820Sjeff			printk(KERN_ERR PFX "Couldn't close %s port %d "
2996219820Sjeff			       "for agents\n",
2997219820Sjeff			       device->name, cur_port);
2998219820Sjeff		if (ib_mad_port_close(device, cur_port))
2999219820Sjeff			printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3000219820Sjeff			       device->name, cur_port);
3001219820Sjeff	}
3002219820Sjeff}
3003219820Sjeff
3004219820Sjeffstatic struct ib_client mad_client = {
3005219820Sjeff	.name   = "mad",
3006219820Sjeff	.add = ib_mad_init_device,
3007219820Sjeff	.remove = ib_mad_remove_device
3008219820Sjeff};
3009219820Sjeff
3010219820Sjeffstatic int __init ib_mad_init_module(void)
3011219820Sjeff{
3012219820Sjeff	int ret;
3013219820Sjeff
3014219820Sjeff	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3015219820Sjeff	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3016219820Sjeff
3017219820Sjeff	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3018219820Sjeff	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3019219820Sjeff
3020219820Sjeff	spin_lock_init(&ib_mad_port_list_lock);
3021219820Sjeff
3022219820Sjeff	ib_mad_cache = kmem_cache_create("ib_mad",
3023219820Sjeff					 sizeof(struct ib_mad_private),
3024219820Sjeff					 0,
3025219820Sjeff					 SLAB_HWCACHE_ALIGN,
3026219820Sjeff					 NULL);
3027219820Sjeff	if (!ib_mad_cache) {
3028219820Sjeff		printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3029219820Sjeff		ret = -ENOMEM;
3030219820Sjeff		goto error1;
3031219820Sjeff	}
3032219820Sjeff
3033219820Sjeff	INIT_LIST_HEAD(&ib_mad_port_list);
3034219820Sjeff
3035219820Sjeff	if (ib_register_client(&mad_client)) {
3036219820Sjeff		printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3037219820Sjeff		ret = -EINVAL;
3038219820Sjeff		goto error2;
3039219820Sjeff	}
3040219820Sjeff
3041219820Sjeff	return 0;
3042219820Sjeff
3043219820Sjefferror2:
3044219820Sjeff	kmem_cache_destroy(ib_mad_cache);
3045219820Sjefferror1:
3046219820Sjeff	return ret;
3047219820Sjeff}
3048219820Sjeff
3049219820Sjeffstatic void __exit ib_mad_cleanup_module(void)
3050219820Sjeff{
3051219820Sjeff	ib_unregister_client(&mad_client);
3052219820Sjeff	kmem_cache_destroy(ib_mad_cache);
3053219820Sjeff}
3054219820Sjeff
3055219820Sjeffmodule_init(ib_mad_init_module);
3056219820Sjeffmodule_exit(ib_mad_cleanup_module);
3057219820Sjeff
3058