1/*
2
3 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
6 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses.  You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 *     Redistribution and use in source and binary forms, with or
15 *     without modification, are permitted provided that the following
16 *     conditions are met:
17 *
18 *      - Redistributions of source code must retain the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer.
21 *
22 *      - Redistributions in binary form must reproduce the above
23 *        copyright notice, this list of conditions and the following
24 *        disclaimer in the documentation and/or other materials
25 *        provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36
37 #include <linux/dma-mapping.h>
38 #include <rdma/ib_cache.h>
39 */
40#include <linux/gfp.h>
41#include <linux/workqueue.h>
42#include <linux/bitops.h>
43
44#include <rdma/ib_cache.h>
45
46#include "mad_priv.h"
47#include "mad_rmpp.h"
48/*
49 #include "mad_rmpp.h"
50 */
51#include "smi.h"
52#include "agent.h"
53/*
54 MODULE_LICENSE("Dual BSD/GPL");
55 MODULE_DESCRIPTION("kernel IB MAD API");
56 MODULE_AUTHOR("Hal Rosenstock");
57 MODULE_AUTHOR("Sean Hefty");
58 */
59int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
60int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
61/*
62 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
63 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
64 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
65 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
66
67 static struct kmem_cache *ib_mad_cache;
68 */
69static struct list_head ib_mad_port_list;
70
71static u32 ib_mad_client_id = 0;
72/*
73 Port list lock
74 static spinlock_t ib_mad_port_list_lock;
75
76
77 Forward declarations*/
78static int method_in_use(struct ib_mad_mgmt_method_table **method,
79		struct ib_mad_reg_req *mad_reg_req);
80/*
81 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
82 */
83static struct ib_mad_agent_private *find_mad_agent(
84		struct ib_mad_port_private *port_priv, struct ib_mad *mad);
85/*
86 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
87 struct ib_mad_private *mad);
88 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
89 static void timeout_sends(struct work_struct *work);
90 static void local_completions(struct work_struct *work);
91
92 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
93 struct ib_mad_agent_private *agent_priv, u8 mgmt_class);
94
95 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
96 struct ib_mad_agent_private *agent_priv);
97
98
99 * Returns a ib_mad_port_private structure or NULL for a device/port
100 * Assumes ib_mad_port_list_lock is being held
101 */
102static inline struct ib_mad_port_private *
103__ib_get_mad_port(struct ib_device *device, int port_num) {
104	struct ib_mad_port_private *entry;
105
106	list_for_each_entry(entry, &ib_mad_port_list, port_list)
107	{
108		if (entry->device == device && entry->port_num == port_num)
109			return entry;
110	}
111	return NULL;
112}
113/*
114
115 * Wrapper function to return a ib_mad_port_private structure or NULL
116 * for a device/port
117 */
118static inline struct ib_mad_port_private *
119ib_get_mad_port(struct ib_device *device, int port_num) {
120	struct ib_mad_port_private *entry;
121	/*unsigned long flags;*/
122
123	/*spin_lock_irqsave(&ib_mad_port_list_lock, flags);*/
124	entry = __ib_get_mad_port(device, port_num);
125	/*spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);*/
126
127	return entry;
128}
129
130static inline u8 convert_mgmt_class(u8 mgmt_class) {
131	/*Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0*/
132	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 0 : mgmt_class;
133}
134
135static int get_spl_qp_index(enum ib_qp_type qp_type) {
136	switch (qp_type) {
137	case IB_QPT_SMI:
138		return 0;
139	case IB_QPT_GSI:
140		return 1;
141	default:
142		return -1;
143	}
144}
145
146static int vendor_class_index(u8 mgmt_class) {
147	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
148}
149
150static int is_vendor_class(u8 mgmt_class) {
151	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START)
152			|| (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
153		return 0;
154	return 1;
155}
156
157static inline int is_vendor_oui(u8 *oui) {
158	if (oui[0] || oui[1] || oui[2])
159		return 1;
160	return 0;
161}
162
163static inline int is_vendor_method_in_use(
164		struct ib_mad_mgmt_vendor_class *vendor_class,
165		struct ib_mad_reg_req *mad_reg_req) {
166	struct ib_mad_mgmt_method_table *method;
167	int i;
168
169	for (i = 0; i < MAX_MGMT_OUI; i++) {
170		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
171			method = vendor_class->method_table[i];
172			if (method) {
173				if (method_in_use(&method, mad_reg_req))
174					return 1;
175				else
176					break;
177			}
178		}
179	}
180	return 0;
181}
182
183int ib_response_mad(struct ib_mad *mad) {
184	return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP)
185			|| (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
186			|| ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM)
187					&& (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
188}
189/*
190 EXPORT_SYMBOL(ib_response_mad);
191
192 static void timeout_callback(unsigned long data)
193 {
194 struct ib_mad_agent_private *mad_agent_priv =
195 (struct ib_mad_agent_private *) data;
196
197 queue_work(mad_agent_priv->qp_info->port_priv->wq,
198 &mad_agent_priv->timeout_work);
199 }
200
201
202 * ib_register_mad_agent - Register to send/receive MADs
203 */
204struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
205		u8 port_num, enum ib_qp_type qp_type,
206		struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version,
207		ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler,
208		void *context) {
209	struct ib_mad_port_private *port_priv;
210	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
211	struct ib_mad_agent_private *mad_agent_priv;
212	struct ib_mad_reg_req *reg_req = NULL;
213	/*struct ib_mad_mgmt_class_table *class;
214	 struct ib_mad_mgmt_vendor_class_table *vendor;
215	 struct ib_mad_mgmt_vendor_class *vendor_class;
216	 struct ib_mad_mgmt_method_table *method;*/
217	int /*ret2,*/qpn;
218	/*unsigned long flags;
219	 u8 mgmt_class, vclass;*/
220
221	/*Validate parameters*/
222	qpn = get_spl_qp_index(qp_type);
223	if (qpn == -1)
224		goto error1;
225
226	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
227		goto error1;
228
229	/*Validate MAD registration request if supplied*/
230	if (mad_reg_req) {
231		assert(!"NYI");
232		/*if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
233		 goto error1;
234		 if (!recv_handler)
235		 goto error1;
236		 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
237
238		 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
239		 * one in this range currently allowed
240
241		 if (mad_reg_req->mgmt_class !=
242		 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
243		 goto error1;
244		 } else if (mad_reg_req->mgmt_class == 0) {
245
246		 * Class 0 is reserved in IBA and is used for
247		 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
248
249		 goto error1;
250		 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
251
252		 * If class is in "new" vendor range,
253		 * ensure supplied OUI is not zero
254
255		 if (!is_vendor_oui(mad_reg_req->oui))
256		 goto error1;
257		 }
258		 Make sure class supplied is consistent with RMPP
259		 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
260		 if (rmpp_version)
261		 goto error1;
262		 }
263		 Make sure class supplied is consistent with QP type
264		 if (qp_type == IB_QPT_SMI) {
265		 if ((mad_reg_req->mgmt_class !=
266		 IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class !=
267		 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
268		 goto error1;
269		 } else {
270		 if ((mad_reg_req->mgmt_class ==
271		 IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class ==
272		 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
273		 goto error1;
274		 }*/
275	} else {
276		/*No registration request supplied*/
277		if (!send_handler)
278			goto error1;
279	}
280
281	/*Validate device and port*/
282	port_priv = ib_get_mad_port(device, port_num);
283	if (!port_priv) {
284		ret = ERR_PTR(-ENODEV);
285		goto error1;
286	}
287
288	/*Allocate structures*/
289	mad_agent_priv = calloc(1, sizeof *mad_agent_priv);
290	if (!mad_agent_priv) {
291		ret = ERR_PTR(-ENOMEM);
292		goto error1;
293	}
294
295	mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
296			IB_ACCESS_LOCAL_WRITE);
297	if (IS_ERR(mad_agent_priv->agent.mr)) {
298		ret = ERR_PTR(-ENOMEM);
299		goto error2;
300	}
301
302	if (mad_reg_req) {
303		assert(!"NYI");
304		/*reg_req = malloc(sizeof *reg_req);
305		 if (!reg_req) {
306		 ret = ERR_PTR(-ENOMEM);
307		 goto error3;
308		 }
309		 Make a copy of the MAD registration request
310		 memcpy(reg_req, mad_reg_req, sizeof *reg_req);*/
311	}
312
313	/*Now, fill in the various structures*/
314	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
315	mad_agent_priv->reg_req = reg_req;
316	mad_agent_priv->agent.rmpp_version = rmpp_version;
317	mad_agent_priv->agent.device = device;
318	mad_agent_priv->agent.recv_handler = recv_handler;
319	mad_agent_priv->agent.send_handler = send_handler;
320	mad_agent_priv->agent.context = context;
321	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
322	mad_agent_priv->agent.port_num = port_num;
323	/*spin_lock_init(&mad_agent_priv->lock);*/
324	INIT_LIST_HEAD(&mad_agent_priv->send_list);
325	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
326	INIT_LIST_HEAD(&mad_agent_priv->done_list);
327	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
328	/*INIT_WORK(&mad_agent_priv->timeout_work, timeout_sends);
329	 setup_timer(&mad_agent_priv->timeout_timer, timeout_callback,
330	 (unsigned long) mad_agent_priv);
331	 INIT_LIST_HEAD(&mad_agent_priv->local_list);
332	 INIT_WORK(&mad_agent_priv->local_work, local_completions);
333	 atomic_set(&mad_agent_priv->refcount, 1);
334	 init_completion(&mad_agent_priv->comp);
335
336	 spin_lock_irqsave(&port_priv->reg_lock, flags);*/
337	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
338
339	/** Make sure MAD registration (if supplied)
340	 * is non overlapping with any existing ones*/
341
342	if (mad_reg_req) {
343		assert(!"NYI");
344		/*mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
345		 if (!is_vendor_class(mgmt_class)) {
346		 class = port_priv->version[mad_reg_req->mgmt_class_version].class;
347		 if (class) {
348		 method = class->method_table[mgmt_class];
349		 if (method) {
350		 if (method_in_use(&method, mad_reg_req))
351		 goto error4;
352		 }
353		 }
354		 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, mgmt_class);
355		 } else {
356		 "New" vendor class range
357		 vendor = port_priv->version[mad_reg_req->mgmt_class_version].vendor;
358		 if (vendor) {
359		 vclass = vendor_class_index(mgmt_class);
360		 vendor_class = vendor->vendor_class[vclass];
361		 if (vendor_class) {
362		 if (is_vendor_method_in_use(vendor_class, mad_reg_req))
363		 goto error4;
364		 }
365		 }
366		 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
367		 }
368		 if (ret2) {
369		 ret = ERR_PTR(ret2);
370		 goto error4;
371		 }*/
372	}
373
374	/*Add mad agent into port's agent list*/
375	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
376	/*spin_unlock_irqrestore(&port_priv->reg_lock, flags);*/
377
378	return &mad_agent_priv->agent;
379
380	/*TODO: cleanup*/
381	/*error4: spin_unlock_irqrestore(&port_priv->reg_lock, flags);
382	 free(reg_req);
383	 error3: ib_dereg_mr(mad_agent_priv->agent.mr);*/
384	error2: free(mad_agent_priv);
385	error1: return ret;
386}
387/*
388 EXPORT_SYMBOL(ib_register_mad_agent);
389
390 static inline int is_snooping_sends(int mad_snoop_flags)
391 {
392 return (mad_snoop_flags &
393 (IB_MAD_SNOOP_POSTED_SENDS |
394 IB_MAD_SNOOP_RMPP_SENDS |
395 IB_MAD_SNOOP_SEND_COMPLETIONS |
396 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS));
397 }
398
399 static inline int is_snooping_recvs(int mad_snoop_flags)
400 {
401 return (mad_snoop_flags &
402 (IB_MAD_SNOOP_RECVS |
403 IB_MAD_SNOOP_RMPP_RECVS));
404 }
405
406 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
407 struct ib_mad_snoop_private *mad_snoop_priv)
408 {
409 struct ib_mad_snoop_private **new_snoop_table;
410 unsigned long flags;
411 int i;
412
413 spin_lock_irqsave(&qp_info->snoop_lock, flags);
414 Check for empty slot in array.
415 for (i = 0; i < qp_info->snoop_table_size; i++)
416 if (!qp_info->snoop_table[i])
417 break;
418
419 if (i == qp_info->snoop_table_size) {
420 Grow table.
421 new_snoop_table = krealloc(qp_info->snoop_table,
422 sizeof mad_snoop_priv *
423 (qp_info->snoop_table_size + 1),
424 GFP_ATOMIC);
425 if (!new_snoop_table) {
426 i = -ENOMEM;
427 goto out;
428 }
429
430 qp_info->snoop_table = new_snoop_table;
431 qp_info->snoop_table_size++;
432 }
433 qp_info->snoop_table[i] = mad_snoop_priv;
434 atomic_inc(&qp_info->snoop_count);
435 out:
436 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
437 return i;
438 }
439
440 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
441 u8 port_num,
442 enum ib_qp_type qp_type,
443 int mad_snoop_flags,
444 ib_mad_snoop_handler snoop_handler,
445 ib_mad_recv_handler recv_handler,
446 void *context)
447 {
448 struct ib_mad_port_private *port_priv;
449 struct ib_mad_agent *ret;
450 struct ib_mad_snoop_private *mad_snoop_priv;
451 int qpn;
452
453 Validate parameters
454 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
455 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
456 ret = ERR_PTR(-EINVAL);
457 goto error1;
458 }
459 qpn = get_spl_qp_index(qp_type);
460 if (qpn == -1) {
461 ret = ERR_PTR(-EINVAL);
462 goto error1;
463 }
464 port_priv = ib_get_mad_port(device, port_num);
465 if (!port_priv) {
466 ret = ERR_PTR(-ENODEV);
467 goto error1;
468 }
469 Allocate structures
470 mad_snoop_priv = calloc(1,sizeof *mad_snoop_priv);
471 if (!mad_snoop_priv) {
472 ret = ERR_PTR(-ENOMEM);
473 goto error1;
474 }
475
476 Now, fill in the various structures
477 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
478 mad_snoop_priv->agent.device = device;
479 mad_snoop_priv->agent.recv_handler = recv_handler;
480 mad_snoop_priv->agent.snoop_handler = snoop_handler;
481 mad_snoop_priv->agent.context = context;
482 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
483 mad_snoop_priv->agent.port_num = port_num;
484 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
485 init_completion(&mad_snoop_priv->comp);
486 mad_snoop_priv->snoop_index = register_snoop_agent(
487 &port_priv->qp_info[qpn],
488 mad_snoop_priv);
489 if (mad_snoop_priv->snoop_index < 0) {
490 ret = ERR_PTR(mad_snoop_priv->snoop_index);
491 goto error2;
492 }
493
494 atomic_set(&mad_snoop_priv->refcount, 1);
495 return &mad_snoop_priv->agent;
496
497 error2:
498 free(mad_snoop_priv);
499 error1:
500 return ret;
501 }
502 EXPORT_SYMBOL(ib_register_mad_snoop);
503 */
504static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) {
505	/*if (atomic_dec_and_test(&mad_agent_priv->refcount))
506	 complete(&mad_agent_priv->comp);*/
507}
508/*
509 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
510 {
511 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
512 complete(&mad_snoop_priv->comp);
513 }
514
515 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
516 {
517 struct ib_mad_port_private *port_priv;
518 unsigned long flags;
519
520 Note that we could still be handling received MADs
521
522
523 * Canceling all sends results in dropping received response
524 * MADs, preventing us from queuing additional work
525
526 cancel_mads(mad_agent_priv);
527 port_priv = mad_agent_priv->qp_info->port_priv;
528 del_timer_sync(&mad_agent_priv->timeout_timer);
529 cancel_work_sync(&mad_agent_priv->timeout_work);
530
531 spin_lock_irqsave(&port_priv->reg_lock, flags);
532 remove_mad_reg_req(mad_agent_priv);
533 list_del(&mad_agent_priv->agent_list);
534 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
535
536 flush_workqueue(port_priv->wq);
537 ib_cancel_rmpp_recvs(mad_agent_priv);
538
539 deref_mad_agent(mad_agent_priv);
540 wait_for_completion(&mad_agent_priv->comp);
541
542 free(mad_agent_priv->reg_req);
543 ib_dereg_mr(mad_agent_priv->agent.mr);
544 free(mad_agent_priv);
545 }
546
547 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
548 {
549 struct ib_mad_qp_info *qp_info;
550 unsigned long flags;
551
552 qp_info = mad_snoop_priv->qp_info;
553 spin_lock_irqsave(&qp_info->snoop_lock, flags);
554 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
555 atomic_dec(&qp_info->snoop_count);
556 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
557
558 deref_snoop_agent(mad_snoop_priv);
559 wait_for_completion(&mad_snoop_priv->comp);
560
561 free(mad_snoop_priv);
562 }
563
564
565 * ib_unregister_mad_agent - Unregisters a client from using MAD services
566
567 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
568 {
569 struct ib_mad_agent_private *mad_agent_priv;
570 struct ib_mad_snoop_private *mad_snoop_priv;
571
572 If the TID is zero, the agent can only snoop.
573 if (mad_agent->hi_tid) {
574 mad_agent_priv = container_of(mad_agent,
575 struct ib_mad_agent_private,
576 agent);
577 unregister_mad_agent(mad_agent_priv);
578 } else {
579 mad_snoop_priv = container_of(mad_agent,
580 struct ib_mad_snoop_private,
581 agent);
582 unregister_mad_snoop(mad_snoop_priv);
583 }
584 return 0;
585 }
586 EXPORT_SYMBOL(ib_unregister_mad_agent);
587 */
588static void dequeue_mad(struct ib_mad_list_head *mad_list) {
589	struct ib_mad_queue *mad_queue;
590	//unsigned long flags;
591
592	assert(!!mad_list->mad_queue);
593	mad_queue = mad_list->mad_queue;
594	/*spin_lock_irqsave(&mad_queue->lock, flags);*/
595	list_del(&mad_list->list);
596	mad_queue->count--;
597	/*spin_unlock_irqrestore(&mad_queue->lock, flags);*/
598}
599/*
600 static void snoop_send(struct ib_mad_qp_info *qp_info,
601 struct ib_mad_send_buf *send_buf,
602 struct ib_mad_send_wc *mad_send_wc,
603 int mad_snoop_flags)
604 {
605 struct ib_mad_snoop_private *mad_snoop_priv;
606 unsigned long flags;
607 int i;
608
609 spin_lock_irqsave(&qp_info->snoop_lock, flags);
610 for (i = 0; i < qp_info->snoop_table_size; i++) {
611 mad_snoop_priv = qp_info->snoop_table[i];
612 if (!mad_snoop_priv ||
613 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
614 continue;
615
616 atomic_inc(&mad_snoop_priv->refcount);
617 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
618 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
619 send_buf, mad_send_wc);
620 deref_snoop_agent(mad_snoop_priv);
621 spin_lock_irqsave(&qp_info->snoop_lock, flags);
622 }
623 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
624 }
625
626 static void snoop_recv(struct ib_mad_qp_info *qp_info,
627 struct ib_mad_recv_wc *mad_recv_wc,
628 int mad_snoop_flags)
629 {
630 struct ib_mad_snoop_private *mad_snoop_priv;
631 unsigned long flags;
632 int i;
633
634 spin_lock_irqsave(&qp_info->snoop_lock, flags);
635 for (i = 0; i < qp_info->snoop_table_size; i++) {
636 mad_snoop_priv = qp_info->snoop_table[i];
637 if (!mad_snoop_priv ||
638 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
639 continue;
640
641 atomic_inc(&mad_snoop_priv->refcount);
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
644 mad_recv_wc);
645 deref_snoop_agent(mad_snoop_priv);
646 spin_lock_irqsave(&qp_info->snoop_lock, flags);
647 }
648 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
649 }
650 */
651static void build_smp_wc(struct ib_qp *qp, u64 wr_id, u16 slid, u16 pkey_index,
652		u8 port_num, struct ib_wc *wc) {
653	memset(wc, 0, sizeof *wc);
654	wc->wr_id = wr_id;
655	wc->status = IB_WC_SUCCESS;
656	wc->opcode = IB_WC_RECV;
657	wc->pkey_index = pkey_index;
658	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
659	wc->src_qp = IB_QP0;
660	wc->qp = qp;
661	wc->slid = slid;
662	wc->sl = 0;
663	wc->dlid_path_bits = 0;
664	wc->port_num = port_num;
665}
666/*
667
668 * Return 0 if SMP is to be sent
669 * Return 1 if SMP was consumed locally (whether or not solicited)
670 * Return < 0 if error
671 */
672static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
673		struct ib_mad_send_wr_private *mad_send_wr) {
674	int ret = 0;
675	struct ib_smp *smp = mad_send_wr->send_buf.mad;
676	/*unsigned long flags;*/
677	struct ib_mad_local_private *local;
678	struct ib_mad_private *mad_priv;
679	struct ib_mad_port_private *port_priv;
680	struct ib_mad_agent_private *recv_mad_agent = NULL;
681	struct ib_device *device = mad_agent_priv->agent.device;
682	u8 port_num;
683	struct ib_wc mad_wc;
684	struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
685
686	if (device->node_type == RDMA_NODE_IB_SWITCH)
687		port_num = send_wr->wr.ud.port_num;
688	else
689		port_num = mad_agent_priv->agent.port_num;
690
691	/** Directed route handling starts if the initial LID routed part of
692	 * a request or the ending LID routed part of a response is empty.
693	 * If we are at the start of the LID routed part, don't update the
694	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.*/
695
696	if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) !=
697	IB_LID_PERMISSIVE)
698		goto out;
699	if (smi_handle_dr_smp_send(smp, device->node_type, port_num)
700			== IB_SMI_DISCARD) {
701		ret = -EINVAL;
702		printf("Invalid directed route\n");
703		goto out;
704	}
705
706	/*Check to post send on QP or process locally*/
707	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD
708			&& smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
709		goto out;
710
711	local = malloc(sizeof *local);
712	if (!local) {
713		ret = -ENOMEM;
714		printf("No memory for ib_mad_local_private\n");
715		goto out;
716	}
717	local->mad_priv = NULL;
718	local->recv_mad_agent = NULL;
719	mad_priv = calloc(1, sizeof *mad_priv);/*kmem_cache_alloc(ib_mad_cache);*/
720	if (!mad_priv) {
721		ret = -ENOMEM;
722		printf("No memory for local response MAD\n");
723		free(local);
724		goto out;
725	}
726
727	build_smp_wc(mad_agent_priv->agent.qp, send_wr->wr_id,
728			be16_to_cpu(smp->dr_slid), send_wr->wr.ud.pkey_index,
729			send_wr->wr.ud.port_num, &mad_wc);
730
731	/*No GRH for DR SMP*/
732	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
733			(struct ib_mad *) smp, (struct ib_mad *) &mad_priv->mad);
734	switch (ret) {
735	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
736		if (ib_response_mad(&mad_priv->mad.mad)
737				&& mad_agent_priv->agent.recv_handler) {
738			local->mad_priv = mad_priv;
739			local->recv_mad_agent = mad_agent_priv;
740
741			/** Reference MAD agent until receive
742			 * side of local completion handled*/
743
744			/*atomic_inc(&mad_agent_priv->refcount);*/
745		} else
746			free(mad_priv);
747		/*kmem_cache_free(ib_mad_cache, mad_priv);*/
748		break;
749	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
750		free(mad_priv);
751		/*kmem_cache_free(ib_mad_cache, mad_priv);*/
752		break;
753	case IB_MAD_RESULT_SUCCESS:
754		/*Treat like an incoming receive MAD*/
755		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
756				mad_agent_priv->agent.port_num);
757		if (port_priv) {
758			memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
759			recv_mad_agent = find_mad_agent(port_priv, &mad_priv->mad.mad);
760		}
761		if (!port_priv || !recv_mad_agent) {
762
763			/** No receiving agent so drop packet and
764			 * generate send completion.*/
765
766			free(mad_priv);
767			/*kmem_cache_free(ib_mad_cache, mad_priv);*/
768			break;
769		}
770		local->mad_priv = mad_priv;
771		local->recv_mad_agent = recv_mad_agent;
772		break;
773	default:
774		/*kmem_cache_free(ib_mad_cache, mad_priv);*/
775		free(mad_priv);
776		free(local);
777		ret = -EINVAL;
778		goto out;
779	}
780
781	local->mad_send_wr = mad_send_wr;
782	/*Reference MAD agent until send side of local completion handled*/
783	/*atomic_inc(&mad_agent_priv->refcount);*/
784	/* Queue local completion to local list*/
785	/*spin_lock_irqsave(&mad_agent_priv->lock, flags);*/
786	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
787	/*spin_unlock_irqrestore(&mad_agent_priv->lock, flags);*/
788	queue_work(mad_agent_priv->qp_info->port_priv->wq,
789			&mad_agent_priv->local_work);
790	ret = 1;
791	out: return ret;
792}
793
794static int get_pad_size(int hdr_len, int data_len) {
795	int seg_size, pad;
796
797	seg_size = sizeof(struct ib_mad) - hdr_len;
798	if (data_len && seg_size) {
799		pad = seg_size - data_len % seg_size;
800		return pad == seg_size ? 0 : pad;
801	} else
802		return seg_size;
803}
804
805static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) {
806	struct ib_rmpp_segment *s, *t;
807
808	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list)
809	{
810		list_del(&s->list);
811		free(s);
812	}
813}
814
815static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
816		gfp_t gfp_mask) {
817	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
818	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
819	struct ib_rmpp_segment *seg = NULL;
820	int left, seg_size, pad;
821
822	send_buf->seg_size = sizeof(struct ib_mad) - send_buf->hdr_len;
823	seg_size = send_buf->seg_size;
824	pad = send_wr->pad;
825
826	/*Allocate data segments.*/
827	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
828		seg = malloc(sizeof(*seg) + seg_size);
829		if (!seg) {
830			printf("alloc_send_rmpp_segs: RMPP mem "
831					"alloc failed for len %zd, gfp %#x\n",
832					sizeof(*seg) + seg_size, gfp_mask);
833			/*TODO*/
834			/*free_send_rmpp_list(send_wr);*/
835			return -ENOMEM;
836		}
837		seg->num = ++send_buf->seg_count;
838		list_add_tail(&seg->list, &send_wr->rmpp_list);
839	}
840
841	/* Zero any padding*/
842	if (pad)
843		memset(seg->data + seg_size - pad, 0, pad);
844
845	rmpp_mad->rmpp_hdr.rmpp_version =
846			send_wr->mad_agent_priv->agent.rmpp_version;
847	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
848	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
849
850	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
851			struct ib_rmpp_segment, list);
852	send_wr->last_ack_seg = send_wr->cur_seg;
853	return 0;
854}
855
856static inline int ib_get_payload_offset(
857		struct ib_mad_send_wr_private *mad_send_wr) {
858	if (mad_send_wr->send_buf.seg_count) {
859		assert(!"NYI");
860		return 0;
861		/*return ib_get_rmpp_segment(&mad_send_wr->send_buf, mad_send_wr->seg_num);*/
862	} else
863		return /*mad_send_wr->send_buf.mad +*/mad_send_wr->send_buf.hdr_len;
864}
865
866struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
867		u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len,
868		int data_len, gfp_t gfp_mask) {
869	struct ib_mad_agent_private *mad_agent_priv;
870	struct ib_mad_send_wr_private *mad_send_wr;
871	int pad, message_size, ret, size;
872	void *buf;
873	u64 t;
874
875	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
876			agent);
877	pad = get_pad_size(hdr_len, data_len);
878	message_size = hdr_len + data_len + pad;
879
880	if ((!mad_agent->rmpp_version
881			&& (rmpp_active || message_size > sizeof(struct ib_mad)))
882			|| (!rmpp_active && message_size > sizeof(struct ib_mad)))
883		return ERR_PTR(-EINVAL);
884
885	size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
886
887	buf = dma_alloc(sizeof *mad_send_wr + size, &t);
888	if (!buf)
889		return ERR_PTR(-ENOMEM);
890
891	mad_send_wr = buf + size;
892	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
893	mad_send_wr->send_buf.mad = buf;
894	mad_send_wr->send_buf.hdr_len = hdr_len;
895	mad_send_wr->send_buf.data_len = data_len;
896	mad_send_wr->pad = pad;
897
898	mad_send_wr->mad_agent_priv = mad_agent_priv;
899	mad_send_wr->sg_list[0].length = hdr_len;
900	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
901	mad_send_wr->sg_list[0].addr = t;
902
903	mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
904	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
905	mad_send_wr->sg_list[1].addr = t + ib_get_payload_offset(mad_send_wr);
906
907	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
908	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
909	mad_send_wr->send_wr.num_sge = 2;
910	mad_send_wr->send_wr.opcode = IB_WR_SEND;
911	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
912	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
913	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
914	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
915
916	if (rmpp_active) {
917		ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
918		if (ret) {
919			free(buf);
920			return ERR_PTR(ret);
921		}
922	}
923
924	mad_send_wr->send_buf.mad_agent = mad_agent;
925	/*atomic_inc(&mad_agent_priv->refcount);*/
926	return &mad_send_wr->send_buf;
927}
928/*
929 EXPORT_SYMBOL(ib_create_send_mad);
930
931 int ib_get_mad_data_offset(u8 mgmt_class)
932 {
933 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
934 return IB_MGMT_SA_HDR;
935 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
936 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
937 (mgmt_class == IB_MGMT_CLASS_BIS))
938 return IB_MGMT_DEVICE_HDR;
939 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
940 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
941 return IB_MGMT_VENDOR_HDR;
942 else
943 return IB_MGMT_MAD_HDR;
944 }
945 EXPORT_SYMBOL(ib_get_mad_data_offset);
946
947 int ib_is_mad_class_rmpp(u8 mgmt_class)
948 {
949 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
950 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
951 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
952 (mgmt_class == IB_MGMT_CLASS_BIS) ||
953 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
954 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
955 return 1;
956 return 0;
957 }
958 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
959
960 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) {
961 struct ib_mad_send_wr_private *mad_send_wr;
962 struct list_head *list;
963
964 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
965 send_buf);
966 list = &mad_send_wr->cur_seg->list;
967
968 if (mad_send_wr->cur_seg->num < seg_num) {
969 list_for_each_entry(mad_send_wr->cur_seg, list, list)
970 if (mad_send_wr->cur_seg->num == seg_num)
971 break;
972 } else if (mad_send_wr->cur_seg->num > seg_num) {
973 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
974 if (mad_send_wr->cur_seg->num == seg_num)
975 break;
976 }
977 return mad_send_wr->cur_seg->data;
978 }
979
980 EXPORT_SYMBOL(ib_get_rmpp_segment);
981 */
982void ib_free_send_mad(struct ib_mad_send_buf *send_buf) {
983	struct ib_mad_agent_private *mad_agent_priv;
984	struct ib_mad_send_wr_private *mad_send_wr;
985
986	mad_agent_priv = container_of(send_buf->mad_agent,
987			struct ib_mad_agent_private, agent);
988	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
989			send_buf);
990
991	free_send_rmpp_list(mad_send_wr);
992	free(send_buf->mad);
993	deref_mad_agent(mad_agent_priv);
994}
995/*
996 EXPORT_SYMBOL(ib_free_send_mad);
997 */
998int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) {
999	struct ib_mad_qp_info *qp_info;
1000	struct list_head *list;
1001	struct ib_send_wr *bad_send_wr;
1002	struct ib_mad_agent *mad_agent;
1003	struct ib_sge *sge;
1004	/*unsigned long flags;*/
1005	int ret;
1006
1007	/*Set WR ID to find mad_send_wr upon completion*/
1008	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1009	mad_send_wr->send_wr.wr_id = (unsigned long) &mad_send_wr->mad_list;
1010	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1011
1012	mad_agent = mad_send_wr->send_buf.mad_agent;
1013	sge = mad_send_wr->sg_list;
1014	/*sge[0].addr = ib_dma_map_single(mad_agent->device,
1015	 mad_send_wr->send_buf.mad, sge[0].length, DMA_TO_DEVICE);*/
1016	mad_send_wr->header_mapping = sge[0].addr;
1017
1018	/*sge[1].addr = ib_dma_map_single(mad_agent->device,
1019	 ib_get_payload(mad_send_wr), sge[1].length, DMA_TO_DEVICE);*/
1020	mad_send_wr->payload_mapping = sge[1].addr;
1021
1022	/*spin_lock_irqsave(&qp_info->send_queue.lock, flags);*/
1023	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1024		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, &bad_send_wr);
1025		list = &qp_info->send_queue.list;
1026	} else {
1027		ret = 0;
1028		list = &qp_info->overflow_list;
1029	}
1030
1031	if (!ret) {
1032		qp_info->send_queue.count++;
1033		list_add_tail(&mad_send_wr->mad_list.list, list);
1034	}
1035	/*spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1036	 if (ret) {
1037	 ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping,
1038	 sge[0].length, DMA_TO_DEVICE);
1039	 ib_dma_unmap_single(mad_agent->device, mad_send_wr->payload_mapping,
1040	 sge[1].length, DMA_TO_DEVICE);
1041	 }*/
1042	return ret;
1043}
1044/*
1045
1046 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1047 *  with the registered client
1048 */
1049int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1050		struct ib_mad_send_buf **bad_send_buf) {
1051	struct ib_mad_agent_private *mad_agent_priv;
1052	struct ib_mad_send_buf *next_send_buf;
1053	struct ib_mad_send_wr_private *mad_send_wr;
1054	/*unsigned long flags;*/
1055	int ret = -EINVAL;
1056
1057	/*Walk list of send WRs and post each on send list*/
1058	for (; send_buf; send_buf = next_send_buf) {
1059
1060		mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1061				send_buf);
1062		mad_agent_priv = mad_send_wr->mad_agent_priv;
1063
1064		if (!send_buf->mad_agent->send_handler
1065				|| (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) {
1066			ret = -EINVAL;
1067			goto error;
1068		}
1069
1070		/*XXX: should be impl*/
1071		/*if (!ib_is_mad_class_rmpp(
1072		 ((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1073		 if (mad_agent_priv->agent.rmpp_version) {
1074		 ret = -EINVAL;
1075		 goto error;
1076		 }
1077		 }*/
1078
1079		/** Save pointer to next work request to post in case the
1080		 * current one completes, and the user modifies the work
1081		 * request associated with the completion*/
1082
1083		next_send_buf = send_buf->next;
1084		mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1085
1086		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1087		IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1088			ret = handle_outgoing_dr_smp(mad_agent_priv, mad_send_wr);
1089			if (ret < 0) /*error*/
1090				goto error;
1091			else if (ret == 1) /*locally consumed*/
1092				continue;
1093		}
1094
1095		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1096		/*Timeout will be updated after send completes*/
1097		/*XXX*/
1098		/*mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);*/
1099		mad_send_wr->max_retries = send_buf->retries;
1100		mad_send_wr->retries_left = send_buf->retries;
1101		send_buf->retries = 0;
1102		/*Reference for work request to QP + response*/
1103		/*XXX*/
1104		/*mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);*/
1105		mad_send_wr->status = IB_WC_SUCCESS;
1106
1107		/*Reference MAD agent until send completes*/
1108		/*XXX*/
1109		/*atomic_inc(&mad_agent_priv->refcount);
1110		 spin_lock_irqsave(&mad_agent_priv->lock, flags);*/
1111		list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list);
1112		/*spin_unlock_irqrestore(&mad_agent_priv->lock, flags);*/
1113
1114		if (mad_agent_priv->agent.rmpp_version) {
1115			assert(!"NYI");
1116			/*ret = ib_send_rmpp_mad(mad_send_wr);
1117			 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1118			 ret = ib_send_mad(mad_send_wr);*/
1119		} else
1120			ret = ib_send_mad(mad_send_wr);
1121		if (ret < 0) {
1122			/*Fail send request*/
1123			/*spin_lock_irqsave(&mad_agent_priv->lock, flags);*/
1124			list_del(&mad_send_wr->agent_list);
1125			/*spin_unlock_irqrestore(&mad_agent_priv->lock, flags);*/
1126			/*atomic_dec(&mad_agent_priv->refcount);*/
1127			goto error;
1128		}
1129	}
1130	return 0;
1131	error: if (bad_send_buf)
1132		*bad_send_buf = send_buf;
1133	return ret;
1134}
1135/*
1136 EXPORT_SYMBOL(ib_post_send_mad);
1137
1138
1139 * ib_free_recv_mad - Returns data buffers used to receive
1140 *  a MAD to the access layer
1141
1142 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1143 {
1144 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1145 struct ib_mad_private_header *mad_priv_hdr;
1146 struct ib_mad_private *priv;
1147 struct list_head free_list;
1148
1149 INIT_LIST_HEAD(&free_list);
1150 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1151
1152 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1153 &free_list, list) {
1154 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1155 recv_buf);
1156 mad_priv_hdr = container_of(mad_recv_wc,
1157 struct ib_mad_private_header,
1158 recv_wc);
1159 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1160 header);
1161 kmem_cache_free(ib_mad_cache, priv);
1162 }
1163 }
1164 EXPORT_SYMBOL(ib_free_recv_mad);
1165
1166 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1167 u8 rmpp_version,
1168 ib_mad_send_handler send_handler,
1169 ib_mad_recv_handler recv_handler,
1170 void *context)
1171 {
1172 return ERR_PTR(-EINVAL);	 XXX: for now
1173 }
1174 EXPORT_SYMBOL(ib_redirect_mad_qp);
1175
1176 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1177 struct ib_wc *wc)
1178 {
1179 printf( "ib_process_mad_wc() not implemented yet\n");
1180 return 0;
1181 }
1182 EXPORT_SYMBOL(ib_process_mad_wc);
1183 */
1184static int method_in_use(struct ib_mad_mgmt_method_table **method,
1185		struct ib_mad_reg_req *mad_reg_req) {
1186	int i;
1187
1188	for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1189			i < IB_MGMT_MAX_METHODS;
1190			i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1191					1 + i)) {
1192		if ((*method)->agent[i]) {
1193			printf("Method %d already in use\n", i);
1194			return -EINVAL;
1195		}
1196	}
1197	return 0;
1198}
1199/*
1200 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1201 {
1202 Allocate management method table
1203 *method = calloc(1,sizeof **method);
1204 if (!*method) {
1205 printf( "No memory for "
1206 "ib_mad_mgmt_method_table\n");
1207 return -ENOMEM;
1208 }
1209
1210 return 0;
1211 }
1212
1213
1214 * Check to see if there are any methods still in use
1215
1216 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1217 {
1218 int i;
1219
1220 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1221 if (method->agent[i])
1222 return 1;
1223 return 0;
1224 }
1225
1226
1227 * Check to see if there are any method tables for this class still in use
1228
1229 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1230 {
1231 int i;
1232
1233 for (i = 0; i < MAX_MGMT_CLASS; i++)
1234 if (class->method_table[i])
1235 return 1;
1236 return 0;
1237 }
1238
1239 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1240 {
1241 int i;
1242
1243 for (i = 0; i < MAX_MGMT_OUI; i++)
1244 if (vendor_class->method_table[i])
1245 return 1;
1246 return 0;
1247 }
1248 */
1249static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1250		u8 *oui) {
1251	int i;
1252
1253	for (i = 0; i < MAX_MGMT_OUI; i++)
1254		/* Is there matching OUI for this vendor class ?*/
1255		if (!memcmp(vendor_class->oui[i], oui, 3))
1256			return i;
1257
1258	return -1;
1259}
1260/*
1261 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1262 {
1263 int i;
1264
1265 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1266 if (vendor->vendor_class[i])
1267 return 1;
1268
1269 return 0;
1270 }
1271
1272 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1273 struct ib_mad_agent_private *agent)
1274 {
1275 int i;
1276
1277 Remove any methods for this mad agent
1278 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1279 if (method->agent[i] == agent) {
1280 method->agent[i] = NULL;
1281 }
1282 }
1283 }
1284
1285 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1286 struct ib_mad_agent_private *agent_priv, u8 mgmt_class) {
1287 struct ib_mad_port_private *port_priv;
1288 struct ib_mad_mgmt_class_table **class;
1289 struct ib_mad_mgmt_method_table **method;
1290 int i, ret;
1291
1292 port_priv = agent_priv->qp_info->port_priv;
1293 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1294 if (!*class) {
1295 Allocate management class table for "new" class version
1296 *class = calloc(1, sizeof **class);
1297 if (!*class) {
1298 printf("No memory for "
1299 "ib_mad_mgmt_class_table\n");
1300 ret = -ENOMEM;
1301 goto error1;
1302 }
1303
1304 Allocate method table for this management class
1305 method = &(*class)->method_table[mgmt_class];
1306 if ((ret = allocate_method_table(method)))
1307 goto error2;
1308 } else {
1309 method = &(*class)->method_table[mgmt_class];
1310 if (!*method) {
1311 Allocate method table for this management class
1312 if ((ret = allocate_method_table(method)))
1313 goto error1;
1314 }
1315 }
1316
1317 Now, make sure methods are not already in use
1318 if (method_in_use(method, mad_reg_req))
1319 goto error3;
1320
1321 Finally, add in methods being registered
1322 for (i = find_first_bit(mad_reg_req->method_mask,
1323 IB_MGMT_MAX_METHODS); i < IB_MGMT_MAX_METHODS;
1324 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1325 1 + i)) {
1326 (*method)->agent[i] = agent_priv;
1327 }
1328 return 0;
1329
1330 error3:
1331 Remove any methods for this mad agent
1332 remove_methods_mad_agent(*method, agent_priv);
1333 Now, check to see if there are any methods in use
1334 if (!check_method_table(*method)) {
1335 If not, release management method table
1336 free(*method);
1337 *method = NULL;
1338 }
1339 ret = -EINVAL;
1340 goto error1;
1341 error2: free(*class);
1342 *class = NULL;
1343 error1: return ret;
1344 }
1345
1346 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1347 struct ib_mad_agent_private *agent_priv)
1348 {
1349 struct ib_mad_port_private *port_priv;
1350 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1351 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1352 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1353 struct ib_mad_mgmt_method_table **method;
1354 int i, ret = -ENOMEM;
1355 u8 vclass;
1356
1357 "New" vendor (with OUI) class
1358 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1359 port_priv = agent_priv->qp_info->port_priv;
1360 vendor_table = &port_priv->version[
1361 mad_reg_req->mgmt_class_version].vendor;
1362 if (!*vendor_table) {
1363 Allocate mgmt vendor class table for "new" class version
1364 vendor = calloc(1,sizeof *vendor);
1365 if (!vendor) {
1366 printf( "No memory for "
1367 "ib_mad_mgmt_vendor_class_table\n");
1368 goto error1;
1369 }
1370
1371 *vendor_table = vendor;
1372 }
1373 if (!(*vendor_table)->vendor_class[vclass]) {
1374 Allocate table for this management vendor class
1375 vendor_class = calloc(1,sizeof *vendor_class);
1376 if (!vendor_class) {
1377 printf( "No memory for "
1378 "ib_mad_mgmt_vendor_class\n");
1379 goto error2;
1380 }
1381
1382 (*vendor_table)->vendor_class[vclass] = vendor_class;
1383 }
1384 for (i = 0; i < MAX_MGMT_OUI; i++) {
1385 Is there matching OUI for this vendor class ?
1386 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1387 mad_reg_req->oui, 3)) {
1388 method = &(*vendor_table)->vendor_class[
1389 vclass]->method_table[i];
1390 BUG_ON(!*method);
1391 goto check_in_use;
1392 }
1393 }
1394 for (i = 0; i < MAX_MGMT_OUI; i++) {
1395 OUI slot available ?
1396 if (!is_vendor_oui((*vendor_table)->vendor_class[
1397 vclass]->oui[i])) {
1398 method = &(*vendor_table)->vendor_class[
1399 vclass]->method_table[i];
1400 BUG_ON(*method);
1401 Allocate method table for this OUI
1402 if ((ret = allocate_method_table(method)))
1403 goto error3;
1404 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1405 mad_reg_req->oui, 3);
1406 goto check_in_use;
1407 }
1408 }
1409 printf( "All OUI slots in use\n");
1410 goto error3;
1411
1412 check_in_use:
1413 Now, make sure methods are not already in use
1414 if (method_in_use(method, mad_reg_req))
1415 goto error4;
1416
1417 Finally, add in methods being registered
1418 for (i = find_first_bit(mad_reg_req->method_mask,
1419 IB_MGMT_MAX_METHODS);
1420 i < IB_MGMT_MAX_METHODS;
1421 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1422 1+i)) {
1423 (*method)->agent[i] = agent_priv;
1424 }
1425 return 0;
1426
1427 error4:
1428 Remove any methods for this mad agent
1429 remove_methods_mad_agent(*method, agent_priv);
1430 Now, check to see if there are any methods in use
1431 if (!check_method_table(*method)) {
1432 If not, release management method table
1433 free(*method);
1434 *method = NULL;
1435 }
1436 ret = -EINVAL;
1437 error3:
1438 if (vendor_class) {
1439 (*vendor_table)->vendor_class[vclass] = NULL;
1440 free(vendor_class);
1441 }
1442 error2:
1443 if (vendor) {
1444 *vendor_table = NULL;
1445 free(vendor);
1446 }
1447 error1:
1448 return ret;
1449 }
1450
1451 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1452 {
1453 struct ib_mad_port_private *port_priv;
1454 struct ib_mad_mgmt_class_table *class;
1455 struct ib_mad_mgmt_method_table *method;
1456 struct ib_mad_mgmt_vendor_class_table *vendor;
1457 struct ib_mad_mgmt_vendor_class *vendor_class;
1458 int index;
1459 u8 mgmt_class;
1460
1461
1462 * Was MAD registration request supplied
1463 * with original registration ?
1464
1465 if (!agent_priv->reg_req) {
1466 goto out;
1467 }
1468
1469 port_priv = agent_priv->qp_info->port_priv;
1470 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1471 class = port_priv->version[
1472 agent_priv->reg_req->mgmt_class_version].class;
1473 if (!class)
1474 goto vendor_check;
1475
1476 method = class->method_table[mgmt_class];
1477 if (method) {
1478 Remove any methods for this mad agent
1479 remove_methods_mad_agent(method, agent_priv);
1480 Now, check to see if there are any methods still in use
1481 if (!check_method_table(method)) {
1482 If not, release management method table
1483 free(method);
1484 class->method_table[mgmt_class] = NULL;
1485 Any management classes left ?
1486 if (!check_class_table(class)) {
1487 If not, release management class table
1488 free(class);
1489 port_priv->version[
1490 agent_priv->reg_req->
1491 mgmt_class_version].class = NULL;
1492 }
1493 }
1494 }
1495
1496 vendor_check:
1497 if (!is_vendor_class(mgmt_class))
1498 goto out;
1499
1500 normalize mgmt_class to vendor range 2
1501 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1502 vendor = port_priv->version[
1503 agent_priv->reg_req->mgmt_class_version].vendor;
1504
1505 if (!vendor)
1506 goto out;
1507
1508 vendor_class = vendor->vendor_class[mgmt_class];
1509 if (vendor_class) {
1510 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1511 if (index < 0)
1512 goto out;
1513 method = vendor_class->method_table[index];
1514 if (method) {
1515 Remove any methods for this mad agent
1516 remove_methods_mad_agent(method, agent_priv);
1517
1518 * Now, check to see if there are
1519 * any methods still in use
1520
1521 if (!check_method_table(method)) {
1522 If not, release management method table
1523 free(method);
1524 vendor_class->method_table[index] = NULL;
1525 memset(vendor_class->oui[index], 0, 3);
1526 Any OUIs left ?
1527 if (!check_vendor_class(vendor_class)) {
1528 If not, release vendor class table
1529 free(vendor_class);
1530 vendor->vendor_class[mgmt_class] = NULL;
1531 Any other vendor classes left ?
1532 if (!check_vendor_table(vendor)) {
1533 free(vendor);
1534 port_priv->version[
1535 agent_priv->reg_req->
1536 mgmt_class_version].
1537 vendor = NULL;
1538 }
1539 }
1540 }
1541 }
1542 }
1543
1544 out:
1545 return;
1546 }
1547 */
1548static struct ib_mad_agent_private *find_mad_agent(
1549		struct ib_mad_port_private *port_priv, struct ib_mad *mad) {
1550	struct ib_mad_agent_private *mad_agent = NULL;
1551	/*unsigned long flags;*/
1552
1553	/*spin_lock_irqsave(&port_priv->reg_lock, flags);*/
1554	if (ib_response_mad(mad)) {
1555		u32 hi_tid;
1556		struct ib_mad_agent_private *entry;
1557
1558		/** Routing is based on high 32 bits of transaction ID
1559		 * of MAD.*/
1560
1561		hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1562		list_for_each_entry(entry, &port_priv->agent_list, agent_list)
1563		{
1564			if (entry->agent.hi_tid == hi_tid) {
1565				mad_agent = entry;
1566				break;
1567			}
1568		}
1569	} else {
1570		struct ib_mad_mgmt_class_table *class;
1571		struct ib_mad_mgmt_method_table *method;
1572		struct ib_mad_mgmt_vendor_class_table *vendor;
1573		struct ib_mad_mgmt_vendor_class *vendor_class;
1574		struct ib_vendor_mad *vendor_mad;
1575		int index;
1576
1577		/** Routing is based on version, class, and method
1578		 * For "newer" vendor MADs, also based on OUI*/
1579
1580		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1581			goto out;
1582		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1583			class = port_priv->version[mad->mad_hdr.class_version].class;
1584			if (!class)
1585				goto out;
1586			method = class->method_table[convert_mgmt_class(
1587					mad->mad_hdr.mgmt_class)];
1588			if (method)
1589				mad_agent = method->agent[mad->mad_hdr.method
1590						& ~IB_MGMT_METHOD_RESP];
1591		} else {
1592			vendor = port_priv->version[mad->mad_hdr.class_version].vendor;
1593			if (!vendor)
1594				goto out;
1595			vendor_class = vendor->vendor_class[vendor_class_index(
1596					mad->mad_hdr.mgmt_class)];
1597			if (!vendor_class)
1598				goto out;
1599			/*Find matching OUI*/
1600			vendor_mad = (struct ib_vendor_mad *) mad;
1601			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1602			if (index == -1)
1603				goto out;
1604			method = vendor_class->method_table[index];
1605			if (method) {
1606				mad_agent = method->agent[mad->mad_hdr.method
1607						& ~IB_MGMT_METHOD_RESP];
1608			}
1609		}
1610	}
1611
1612	if (mad_agent) {
1613		if (!mad_agent->agent.recv_handler) {
1614			/*atomic_inc(&mad_agent->refcount);
1615			 else {*/
1616			printf("No receive handler for client "
1617					"%p on port %d\n", &mad_agent->agent, port_priv->port_num);
1618			mad_agent = NULL;
1619		}
1620	}
1621	out: /*spin_unlock_irqrestore(&port_priv->reg_lock, flags);*/
1622
1623	return mad_agent;
1624}
1625/*
1626 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1627 {
1628 int valid = 0;
1629
1630 Make sure MAD base version is understood
1631 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1632 printf( "MAD received with unsupported base "
1633 "version %d\n", mad->mad_hdr.base_version);
1634 goto out;
1635 }
1636
1637 Filter SMI packets sent to other than QP0
1638 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1639 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1640 if (qp_num == 0)
1641 valid = 1;
1642 } else {
1643 Filter GSI packets sent to QP0
1644 if (qp_num != 0)
1645 valid = 1;
1646 }
1647
1648 out:
1649 return valid;
1650 }
1651
1652 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1653 struct ib_mad_hdr *mad_hdr)
1654 {
1655 struct ib_rmpp_mad *rmpp_mad;
1656
1657 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1658 return !mad_agent_priv->agent.rmpp_version ||
1659 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1660 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1661 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1662 }
1663
1664 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1665 struct ib_mad_recv_wc *rwc)
1666 {
1667 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1668 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1669 }
1670
1671 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1672 struct ib_mad_send_wr_private *wr,
1673 struct ib_mad_recv_wc *rwc )
1674 {
1675 struct ib_ah_attr attr;
1676 u8 send_resp, rcv_resp;
1677 union ib_gid sgid;
1678 struct ib_device *device = mad_agent_priv->agent.device;
1679 u8 port_num = mad_agent_priv->agent.port_num;
1680 u8 lmc;
1681
1682 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1683 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1684
1685 if (send_resp == rcv_resp)
1686 both requests, or both responses. GIDs different
1687 return 0;
1688
1689 if (ib_query_ah(wr->send_buf.ah, &attr))
1690 Assume not equal, to avoid false positives.
1691 return 0;
1692
1693 if (!!(attr.ah_flags & IB_AH_GRH) !=
1694 !!(rwc->wc->wc_flags & IB_WC_GRH))
1695 one has GID, other does not.  Assume different
1696 return 0;
1697
1698 if (!send_resp && rcv_resp) {
1699 is request/response.
1700 if (!(attr.ah_flags & IB_AH_GRH)) {
1701 if (ib_get_cached_lmc(device, port_num, &lmc))
1702 return 0;
1703 return (!lmc || !((attr.src_path_bits ^
1704 rwc->wc->dlid_path_bits) &
1705 ((1 << lmc) - 1)));
1706 } else {
1707 if (ib_get_cached_gid(device, port_num,
1708 attr.grh.sgid_index, &sgid))
1709 return 0;
1710 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1711 16);
1712 }
1713 }
1714
1715 if (!(attr.ah_flags & IB_AH_GRH))
1716 return attr.dlid == rwc->wc->slid;
1717 else
1718 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1719 16);
1720 }
1721
1722 static inline int is_direct(u8 class)
1723 {
1724 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1725 }
1726
1727 struct ib_mad_send_wr_private*
1728 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1729 struct ib_mad_recv_wc *wc)
1730 {
1731 struct ib_mad_send_wr_private *wr;
1732 struct ib_mad *mad;
1733
1734 mad = (struct ib_mad *)wc->recv_buf.mad;
1735
1736 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1737 if ((wr->tid == mad->mad_hdr.tid) &&
1738 rcv_has_same_class(wr, wc) &&
1739
1740 * Don't check GID for direct routed MADs.
1741 * These might have permissive LIDs.
1742
1743 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1744 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1745 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1746 }
1747
1748
1749 * It's possible to receive the response before we've
1750 * been notified that the send has completed
1751
1752 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1753 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1754 wr->tid == mad->mad_hdr.tid &&
1755 wr->timeout &&
1756 rcv_has_same_class(wr, wc) &&
1757
1758 * Don't check GID for direct routed MADs.
1759 * These might have permissive LIDs.
1760
1761 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1762 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1763 Verify request has not been canceled
1764 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1765 }
1766 return NULL;
1767 }
1768
1769 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1770 {
1771 mad_send_wr->timeout = 0;
1772 if (mad_send_wr->refcount == 1)
1773 list_move_tail(&mad_send_wr->agent_list,
1774 &mad_send_wr->mad_agent_priv->done_list);
1775 }
1776
1777 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1778 struct ib_mad_recv_wc *mad_recv_wc)
1779 {
1780 struct ib_mad_send_wr_private *mad_send_wr;
1781 struct ib_mad_send_wc mad_send_wc;
1782 unsigned long flags;
1783
1784 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1785 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1786 if (mad_agent_priv->agent.rmpp_version) {
1787 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1788 mad_recv_wc);
1789 if (!mad_recv_wc) {
1790 deref_mad_agent(mad_agent_priv);
1791 return;
1792 }
1793 }
1794
1795 Complete corresponding request
1796 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1797 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1798 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1799 if (!mad_send_wr) {
1800 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1801 ib_free_recv_mad(mad_recv_wc);
1802 deref_mad_agent(mad_agent_priv);
1803 return;
1804 }
1805 ib_mark_mad_done(mad_send_wr);
1806 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1807
1808 Defined behavior is to complete response before request
1809 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1810 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1811 mad_recv_wc);
1812 atomic_dec(&mad_agent_priv->refcount);
1813
1814 mad_send_wc.status = IB_WC_SUCCESS;
1815 mad_send_wc.vendor_err = 0;
1816 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1817 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1818 } else {
1819 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1820 mad_recv_wc);
1821 deref_mad_agent(mad_agent_priv);
1822 }
1823 }
1824 */
1825static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1826		struct ib_wc *wc) {
1827	struct ib_mad_qp_info *qp_info;
1828	struct ib_mad_private_header *mad_priv_hdr;
1829	struct ib_mad_private *recv, *response = NULL;
1830	struct ib_mad_list_head *mad_list;
1831	struct ib_mad_agent_private *mad_agent;
1832	int port_num;
1833
1834	mad_list = (struct ib_mad_list_head *) (unsigned long) wc->wr_id;
1835	qp_info = mad_list->mad_queue->qp_info;
1836	dequeue_mad(mad_list);
1837
1838	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1839			mad_list);
1840	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1841//ib_dma_unmap_single(port_priv->device, recv->header.mapping,
1842//		sizeof(struct ib_mad_private)
1843//				- sizeof(struct ib_mad_private_header), DMA_FROM_DEVICE);
1844
1845	/*Setup MAD receive work completion from "normal" work completion*/
1846	recv->header.wc = *wc;
1847	recv->header.recv_wc.wc = &recv->header.wc;
1848	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1849	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1850	recv->header.recv_wc.recv_buf.grh = &recv->grh;
1851
1852//if (atomic_read(&qp_info->snoop_count))
1853//	snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1854
1855	/*Validate MAD*/
1856//if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1857//	goto out;
1858	response = calloc(1, sizeof *response);	//kmem_cache_alloc(ib_mad_cache);
1859	if (!response) {
1860		printf("ib_mad_recv_done_handler no memory "
1861				"for response buffer\n");
1862		goto out;
1863	}
1864
1865	if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1866		port_num = wc->port_num;
1867	else
1868		port_num = port_priv->port_num;
1869
1870	if (recv->mad.mad.mad_hdr.mgmt_class ==
1871	IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1872		enum smi_forward_action retsmi;
1873
1874		if (smi_handle_dr_smp_recv(&recv->mad.smp, port_priv->device->node_type,
1875				port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD)
1876			goto out;
1877
1878		retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1879		if (retsmi == IB_SMI_LOCAL)
1880			goto local;
1881
1882		if (retsmi == IB_SMI_SEND) { /*don't forward
1883		 */
1884			if (smi_handle_dr_smp_send(&recv->mad.smp,
1885					port_priv->device->node_type, port_num) == IB_SMI_DISCARD)
1886				goto out;
1887
1888			if (smi_check_local_smp(&recv->mad.smp, port_priv->device)
1889					== IB_SMI_DISCARD)
1890				goto out;
1891		} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1892			/*forward case for switches*/
1893			memcpy(response, recv, sizeof(*response));
1894			response->header.recv_wc.wc = &response->header.wc;
1895			response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1896			response->header.recv_wc.recv_buf.grh = &response->grh;
1897
1898			agent_send_response(&response->mad.mad, &response->grh, wc,
1899					port_priv->device, smi_get_fwd_port(&recv->mad.smp),
1900					qp_info->qp->qp_num);
1901
1902			goto out;
1903		}
1904	}
1905
1906	local:
1907	/*Give driver "right of first refusal" on incoming MAD*/
1908	if (port_priv->device->process_mad) {
1909		int ret;
1910
1911		ret = port_priv->device->process_mad(port_priv->device, 0,
1912				port_priv->port_num, wc, &recv->grh, &recv->mad.mad,
1913				&response->mad.mad);
1914		if (ret & IB_MAD_RESULT_SUCCESS) {
1915			if (ret & IB_MAD_RESULT_CONSUMED)
1916				goto out;
1917			if (ret & IB_MAD_RESULT_REPLY) {
1918				agent_send_response(&response->mad.mad, &recv->grh, wc,
1919						port_priv->device, port_num, qp_info->qp->qp_num);
1920				goto out;
1921			}
1922		}
1923	}
1924
1925	mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1926	if (mad_agent) {
1927		//ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1928
1929		/** recv is freed up in error cases in ib_mad_complete_recv
1930		 * or via recv_handler in ib_mad_complete_recv()*/
1931
1932		recv = NULL;
1933	}
1934
1935	out:
1936	/*Post another receive request for this QP*/
1937	/*if (response) {
1938	 ib_mad_post_receive_mads(qp_info, response);
1939	 if (recv)
1940	 kmem_cache_free(ib_mad_cache, recv);
1941	 } else
1942	 ib_mad_post_receive_mads(qp_info, recv);*/
1943	return;
1944}
1945/*
1946 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1947 {
1948 struct ib_mad_send_wr_private *mad_send_wr;
1949
1950 if (list_empty(&mad_agent_priv->wait_list)) {
1951 del_timer(&mad_agent_priv->timeout_timer);
1952 } else {
1953 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1954 struct ib_mad_send_wr_private,
1955 agent_list);
1956
1957 if (time_after(mad_agent_priv->timeout,
1958 mad_send_wr->timeout)) {
1959 mad_agent_priv->timeout = mad_send_wr->timeout;
1960 mod_timer(&mad_agent_priv->timeout_timer,
1961 mad_send_wr->timeout);
1962 }
1963 }
1964 }
1965 */
1966/*static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) {
1967 struct ib_mad_agent_private *mad_agent_priv;
1968 struct ib_mad_send_wr_private *temp_mad_send_wr;
1969 struct list_head *list_item;
1970 unsigned long delay;
1971
1972 mad_agent_priv = mad_send_wr->mad_agent_priv;
1973 list_del(&mad_send_wr->agent_list);
1974
1975 delay = mad_send_wr->timeout;
1976 mad_send_wr->timeout += jiffies;
1977
1978 if (delay) {
1979 list_for_each_prev(list_item, &mad_agent_priv->wait_list)
1980 {
1981 temp_mad_send_wr = list_entry(list_item,
1982 struct ib_mad_send_wr_private, agent_list);
1983 if (time_after(mad_send_wr->timeout, temp_mad_send_wr->timeout))
1984 break;
1985 }
1986 } else
1987 list_item = &mad_agent_priv->wait_list;
1988 list_add(&mad_send_wr->agent_list, list_item);
1989
1990 Reschedule a work item if we have a shorter timeout
1991 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
1992 mod_timer(&mad_agent_priv->timeout_timer, mad_send_wr->timeout);
1993 }*/
1994/*
1995 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1996 int timeout_ms)
1997 {
1998 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1999 wait_for_response(mad_send_wr);
2000 }
2001
2002
2003 * Process a send work completion
2004 */
2005/*void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2006 struct ib_mad_send_wc *mad_send_wc) {
2007 struct ib_mad_agent_private *mad_agent_priv;
2008 unsigned long flags;
2009 int ret;
2010
2011 mad_agent_priv = mad_send_wr->mad_agent_priv;
2012 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2013 if (mad_agent_priv->agent.rmpp_version) {
2014 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2015 if (ret == IB_RMPP_RESULT_CONSUMED)
2016 goto done;
2017 } else
2018 ret = IB_RMPP_RESULT_UNHANDLED;
2019
2020 if (mad_send_wc->status != IB_WC_SUCCESS
2021 && mad_send_wr->status == IB_WC_SUCCESS) {
2022 mad_send_wr->status = mad_send_wc->status;
2023 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2024 }
2025
2026 if (--mad_send_wr->refcount > 0) {
2027 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout
2028 && mad_send_wr->status == IB_WC_SUCCESS) {
2029 wait_for_response(mad_send_wr);
2030 }
2031 goto done;
2032 }
2033
2034 Remove send from MAD agent and notify client of completion
2035 list_del(&mad_send_wr->agent_list);
2036 adjust_timeout(mad_agent_priv);
2037 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2038
2039 if (mad_send_wr->status != IB_WC_SUCCESS)
2040 mad_send_wc->status = mad_send_wr->status;
2041 if (ret == IB_RMPP_RESULT_INTERNAL)
2042 ib_rmpp_send_handler(mad_send_wc);
2043 else
2044 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc);
2045
2046 Release reference on agent taken when sending
2047 deref_mad_agent(mad_agent_priv);
2048 return;
2049 done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2050 }*/
2051
2052/*static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2053 struct ib_wc *wc) {
2054 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2055 struct ib_mad_list_head *mad_list;
2056 struct ib_mad_qp_info *qp_info;
2057 struct ib_mad_queue *send_queue;
2058 struct ib_send_wr *bad_send_wr;
2059 struct ib_mad_send_wc mad_send_wc;
2060 unsigned long flags;
2061 int ret;
2062
2063 mad_list = (struct ib_mad_list_head *) (unsigned long) wc->wr_id;
2064 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2065 mad_list);
2066 send_queue = mad_list->mad_queue;
2067 qp_info = send_queue->qp_info;
2068
2069 retry: ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2070 mad_send_wr->header_mapping, mad_send_wr->sg_list[0].length,
2071 DMA_TO_DEVICE);
2072 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2073 mad_send_wr->payload_mapping, mad_send_wr->sg_list[1].length,
2074 DMA_TO_DEVICE);
2075 queued_send_wr = NULL;
2076 spin_lock_irqsave(&send_queue->lock, flags);
2077 list_del(&mad_list->list);
2078
2079 Move queued send to the send queue
2080 if (send_queue->count-- > send_queue->max_active) {
2081 mad_list = container_of(qp_info->overflow_list.next,
2082 struct ib_mad_list_head, list);
2083 queued_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2084 mad_list);
2085 list_move_tail(&mad_list->list, &send_queue->list);
2086 }
2087 spin_unlock_irqrestore(&send_queue->lock, flags);
2088
2089 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2090 mad_send_wc.status = wc->status;
2091 mad_send_wc.vendor_err = wc->vendor_err;
2092 if (atomic_read(&qp_info->snoop_count))
2093 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2094 IB_MAD_SNOOP_SEND_COMPLETIONS);
2095 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2096
2097 if (queued_send_wr) {
2098 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, &bad_send_wr);
2099 if (ret) {
2100 printf("ib_post_send failed: %d\n", ret);
2101 mad_send_wr = queued_send_wr;
2102 wc->status = IB_WC_LOC_QP_OP_ERR;
2103 goto retry;
2104 }
2105 }
2106 }*/
2107/*
2108 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2109 {
2110 struct ib_mad_send_wr_private *mad_send_wr;
2111 struct ib_mad_list_head *mad_list;
2112 unsigned long flags;
2113
2114 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2115 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2116 mad_send_wr = container_of(mad_list,
2117 struct ib_mad_send_wr_private,
2118 mad_list);
2119 mad_send_wr->retry = 1;
2120 }
2121 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2122 }
2123
2124 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2125 struct ib_wc *wc)
2126 {
2127 struct ib_mad_list_head *mad_list;
2128 struct ib_mad_qp_info *qp_info;
2129 struct ib_mad_send_wr_private *mad_send_wr;
2130 int ret;
2131
2132 Determine if failure was a send or receive
2133 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2134 qp_info = mad_list->mad_queue->qp_info;
2135 if (mad_list->mad_queue == &qp_info->recv_queue)
2136
2137 * Receive errors indicate that the QP has entered the error
2138 * state - error handling/shutdown code will cleanup
2139
2140 return;
2141
2142
2143 * Send errors will transition the QP to SQE - move
2144 * QP to RTS and repost flushed work requests
2145
2146 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2147 mad_list);
2148 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2149 if (mad_send_wr->retry) {
2150 Repost send
2151 struct ib_send_wr *bad_send_wr;
2152
2153 mad_send_wr->retry = 0;
2154 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2155 &bad_send_wr);
2156 if (ret)
2157 ib_mad_send_done_handler(port_priv, wc);
2158 } else
2159 ib_mad_send_done_handler(port_priv, wc);
2160 } else {
2161 struct ib_qp_attr *attr;
2162
2163 Transition QP to RTS and fail offending send
2164 attr = malloc(sizeof *attr);
2165 if (attr) {
2166 attr->qp_state = IB_QPS_RTS;
2167 attr->cur_qp_state = IB_QPS_SQE;
2168 ret = ib_modify_qp(qp_info->qp, attr,
2169 IB_QP_STATE | IB_QP_CUR_STATE);
2170 free(attr);
2171 if (ret)
2172 printf( "mad_error_handler - "
2173 "ib_modify_qp to RTS : %d\n", ret);
2174 else
2175 mark_sends_for_retry(qp_info);
2176 }
2177 ib_mad_send_done_handler(port_priv, wc);
2178 }
2179 }
2180
2181
2182 * IB MAD completion callback
2183 */
2184static void ib_mad_completion_handler(struct work_struct *work) {
2185	struct ib_mad_port_private *port_priv;
2186	struct ib_wc wc;
2187
2188	port_priv = container_of(work, struct ib_mad_port_private, work);
2189	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2190
2191	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2192		if (wc.status == IB_WC_SUCCESS) {
2193			switch (wc.opcode) {
2194			case IB_WC_SEND:
2195				//ib_mad_send_done_handler(port_priv, &wc);
2196				break;
2197			case IB_WC_RECV:
2198				ib_mad_recv_done_handler(port_priv, &wc);
2199				break;
2200			default:
2201				assert(0);
2202				break;
2203			}
2204		} /*else
2205		 mad_error_handler(port_priv, &wc);*/
2206	}
2207}
2208/*
2209 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2210 {
2211 unsigned long flags;
2212 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2213 struct ib_mad_send_wc mad_send_wc;
2214 struct list_head cancel_list;
2215
2216 INIT_LIST_HEAD(&cancel_list);
2217
2218 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2219 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2220 &mad_agent_priv->send_list, agent_list) {
2221 if (mad_send_wr->status == IB_WC_SUCCESS) {
2222 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2223 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2224 }
2225 }
2226
2227 Empty wait list to prevent receives from finding a request
2228 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2229 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2230
2231 Report all cancelled requests
2232 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2233 mad_send_wc.vendor_err = 0;
2234
2235 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2236 &cancel_list, agent_list) {
2237 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2238 list_del(&mad_send_wr->agent_list);
2239 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2240 &mad_send_wc);
2241 atomic_dec(&mad_agent_priv->refcount);
2242 }
2243 }
2244
2245 static struct ib_mad_send_wr_private*
2246 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2247 struct ib_mad_send_buf *send_buf)
2248 {
2249 struct ib_mad_send_wr_private *mad_send_wr;
2250
2251 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2252 agent_list) {
2253 if (&mad_send_wr->send_buf == send_buf)
2254 return mad_send_wr;
2255 }
2256
2257 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2258 agent_list) {
2259 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2260 &mad_send_wr->send_buf == send_buf)
2261 return mad_send_wr;
2262 }
2263 return NULL;
2264 }
2265
2266 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2267 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2268 {
2269 struct ib_mad_agent_private *mad_agent_priv;
2270 struct ib_mad_send_wr_private *mad_send_wr;
2271 unsigned long flags;
2272 int active;
2273
2274 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2275 agent);
2276 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2277 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2278 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2279 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2280 return -EINVAL;
2281 }
2282
2283 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2284 if (!timeout_ms) {
2285 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2286 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2287 }
2288
2289 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2290 if (active)
2291 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2292 else
2293 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2294
2295 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2296 return 0;
2297 }
2298 EXPORT_SYMBOL(ib_modify_mad);
2299
2300 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2301 struct ib_mad_send_buf *send_buf)
2302 {
2303 ib_modify_mad(mad_agent, send_buf, 0);
2304 }
2305 EXPORT_SYMBOL(ib_cancel_mad);
2306
2307 static void local_completions(struct work_struct *work)
2308 {
2309 struct ib_mad_agent_private *mad_agent_priv;
2310 struct ib_mad_local_private *local;
2311 struct ib_mad_agent_private *recv_mad_agent;
2312 unsigned long flags;
2313 int free_mad;
2314 struct ib_wc wc;
2315 struct ib_mad_send_wc mad_send_wc;
2316
2317 mad_agent_priv =
2318 container_of(work, struct ib_mad_agent_private, local_work);
2319
2320 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2321 while (!list_empty(&mad_agent_priv->local_list)) {
2322 local = list_entry(mad_agent_priv->local_list.next,
2323 struct ib_mad_local_private,
2324 completion_list);
2325 list_del(&local->completion_list);
2326 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2327 free_mad = 0;
2328 if (local->mad_priv) {
2329 recv_mad_agent = local->recv_mad_agent;
2330 if (!recv_mad_agent) {
2331 printf( "No receive MAD agent for local completion\n");
2332 free_mad = 1;
2333 goto local_send_completion;
2334 }
2335
2336
2337 * Defined behavior is to complete response
2338 * before request
2339
2340 build_smp_wc(recv_mad_agent->agent.qp,
2341 (unsigned long) local->mad_send_wr,
2342 be16_to_cpu(IB_LID_PERMISSIVE),
2343 0, recv_mad_agent->agent.port_num, &wc);
2344
2345 local->mad_priv->header.recv_wc.wc = &wc;
2346 local->mad_priv->header.recv_wc.mad_len =
2347 sizeof(struct ib_mad);
2348 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2349 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2350 &local->mad_priv->header.recv_wc.rmpp_list);
2351 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2352 local->mad_priv->header.recv_wc.recv_buf.mad =
2353 &local->mad_priv->mad.mad;
2354 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2355 snoop_recv(recv_mad_agent->qp_info,
2356 &local->mad_priv->header.recv_wc,
2357 IB_MAD_SNOOP_RECVS);
2358 recv_mad_agent->agent.recv_handler(
2359 &recv_mad_agent->agent,
2360 &local->mad_priv->header.recv_wc);
2361 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2362 atomic_dec(&recv_mad_agent->refcount);
2363 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2364 }
2365
2366 local_send_completion:
2367 Complete send
2368 mad_send_wc.status = IB_WC_SUCCESS;
2369 mad_send_wc.vendor_err = 0;
2370 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2371 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2372 snoop_send(mad_agent_priv->qp_info,
2373 &local->mad_send_wr->send_buf,
2374 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2375 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2376 &mad_send_wc);
2377
2378 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2379 atomic_dec(&mad_agent_priv->refcount);
2380 if (free_mad)
2381 kmem_cache_free(ib_mad_cache, local->mad_priv);
2382 free(local);
2383 }
2384 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2385 }
2386
2387 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2388 {
2389 int ret;
2390
2391 if (!mad_send_wr->retries_left)
2392 return -ETIMEDOUT;
2393
2394 mad_send_wr->retries_left--;
2395 mad_send_wr->send_buf.retries++;
2396
2397 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2398
2399 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2400 ret = ib_retry_rmpp(mad_send_wr);
2401 switch (ret) {
2402 case IB_RMPP_RESULT_UNHANDLED:
2403 ret = ib_send_mad(mad_send_wr);
2404 break;
2405 case IB_RMPP_RESULT_CONSUMED:
2406 ret = 0;
2407 break;
2408 default:
2409 ret = -ECOMM;
2410 break;
2411 }
2412 } else
2413 ret = ib_send_mad(mad_send_wr);
2414
2415 if (!ret) {
2416 mad_send_wr->refcount++;
2417 list_add_tail(&mad_send_wr->agent_list,
2418 &mad_send_wr->mad_agent_priv->send_list);
2419 }
2420 return ret;
2421 }
2422
2423 static void timeout_sends(struct work_struct *work)
2424 {
2425 struct ib_mad_agent_private *mad_agent_priv;
2426 struct ib_mad_send_wr_private *mad_send_wr;
2427 struct ib_mad_send_wc mad_send_wc;
2428 unsigned long flags;
2429
2430 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2431 timeout_work);
2432 mad_send_wc.vendor_err = 0;
2433
2434 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2435 while (!list_empty(&mad_agent_priv->wait_list)) {
2436 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2437 struct ib_mad_send_wr_private,
2438 agent_list);
2439
2440 if (time_after(mad_send_wr->timeout, jiffies)) {
2441 mod_timer(&mad_agent_priv->timeout_timer,
2442 mad_send_wr->timeout);
2443 break;
2444 }
2445
2446 list_del(&mad_send_wr->agent_list);
2447 if (mad_send_wr->status == IB_WC_SUCCESS &&
2448 !retry_send(mad_send_wr))
2449 continue;
2450
2451 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2452
2453 if (mad_send_wr->status == IB_WC_SUCCESS)
2454 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2455 else
2456 mad_send_wc.status = mad_send_wr->status;
2457 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2458 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2459 &mad_send_wc);
2460
2461 atomic_dec(&mad_agent_priv->refcount);
2462 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2463 }
2464 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2465 }
2466 */
2467static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) {
2468	struct ib_mad_port_private *port_priv = cq->cq_context;
2469	/*unsigned long flags;*/
2470
2471	printf("ib_mad_completion_handler\n");
2472
2473	/*spin_lock_irqsave(&ib_mad_port_list_lock, flags);*/
2474	if (!list_empty(&port_priv->port_list))
2475		queue_work(port_priv->wq, &port_priv->work);
2476	/*ib_mad_completion_handler(port_priv);*/
2477	/*spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);*/
2478}
2479/*
2480
2481 * Allocate receive MADs and post receive WRs for them
2482 */
2483static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2484		struct ib_mad_private *mad) {
2485	/*unsigned long flags;*/
2486	int post, ret;
2487	struct ib_mad_private *mad_priv;
2488	struct ib_sge sg_list;
2489	struct ib_recv_wr recv_wr, *bad_recv_wr;
2490	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2491	genpaddr_t t;
2492
2493	/*Initialize common scatter list fields*/
2494	sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2495	sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2496
2497	/*Initialize common receive WR fields*/
2498	recv_wr.next = NULL;
2499	recv_wr.sg_list = &sg_list;
2500	recv_wr.num_sge = 1;
2501
2502	do {
2503		/*Allocate and map receive buffer*/
2504		if (mad) {
2505			mad_priv = mad;
2506			mad = NULL;
2507		} else {
2508			mad_priv = dma_alloc(sizeof *mad_priv, &t);	//kmem_cache_alloc(ib_mad_cache);
2509			if (!mad_priv) {
2510				printf("No memory for receive buffer\n");
2511				ret = -ENOMEM;
2512				break;
2513			}
2514		}
2515		sg_list.addr = t + sizeof mad_priv->header;/*ib_dma_map_single(qp_info->port_priv->device,
2516		 &mad_priv->grh, sizeof *mad_priv - sizeof mad_priv->header,
2517		 DMA_FROM_DEVICE);*/
2518		mad_priv->header.mapping = sg_list.addr;
2519		recv_wr.wr_id = (unsigned long) &mad_priv->header.mad_list;
2520		mad_priv->header.mad_list.mad_queue = recv_queue;
2521
2522		/*Post receive WR*/
2523		/*spin_lock_irqsave(&recv_queue->lock, flags);*/
2524		post = (++recv_queue->count < recv_queue->max_active);
2525		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2526		/*spin_unlock_irqrestore(&recv_queue->lock, flags);*/
2527		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2528		if (ret) {
2529			/*spin_lock_irqsave(&recv_queue->lock, flags);*/
2530			list_del(&mad_priv->header.mad_list.list);
2531			recv_queue->count--;
2532			/*spin_unlock_irqrestore(&recv_queue->lock, flags);*/
2533			/*TODO: cleanup*/
2534			assert(0);
2535			/*ib_dma_unmap_single(qp_info->port_priv->device,
2536			 mad_priv->header.mapping,
2537			 sizeof *mad_priv - sizeof mad_priv->header,
2538			 DMA_FROM_DEVICE);*/
2539			free(mad_priv);
2540			printf("ib_post_recv failed: %d\n", ret);
2541			break;
2542		}
2543	} while (post);
2544
2545	return ret;
2546}
2547/*
2548
2549 * Return all the posted receive MADs
2550
2551 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2552 {
2553 struct ib_mad_private_header *mad_priv_hdr;
2554 struct ib_mad_private *recv;
2555 struct ib_mad_list_head *mad_list;
2556
2557 if (!qp_info->qp)
2558 return;
2559
2560 while (!list_empty(&qp_info->recv_queue.list)) {
2561
2562 mad_list = list_entry(qp_info->recv_queue.list.next,
2563 struct ib_mad_list_head, list);
2564 mad_priv_hdr = container_of(mad_list,
2565 struct ib_mad_private_header,
2566 mad_list);
2567 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2568 header);
2569
2570 Remove from posted receive MAD list
2571 list_del(&mad_list->list);
2572
2573 ib_dma_unmap_single(qp_info->port_priv->device,
2574 recv->header.mapping,
2575 sizeof(struct ib_mad_private) -
2576 sizeof(struct ib_mad_private_header),
2577 DMA_FROM_DEVICE);
2578 kmem_cache_free(ib_mad_cache, recv);
2579 }
2580
2581 qp_info->recv_queue.count = 0;
2582 }
2583
2584
2585 * Start the port
2586 */
2587static int ib_mad_port_start(struct ib_mad_port_private *port_priv) {
2588	int ret, i;
2589	struct ib_qp_attr *attr;
2590	struct ib_qp *qp;
2591
2592	attr = malloc(sizeof *attr);
2593	if (!attr) {
2594		printf("Couldn't malloc ib_qp_attr\n");
2595		return -ENOMEM;
2596	}
2597
2598	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2599		qp = port_priv->qp_info[i].qp;
2600		if (!qp)
2601			continue;
2602
2603		/** PKey index for QP1 is irrelevant but
2604		 * one is needed for the Reset to Init transition*/
2605
2606		attr->qp_state = IB_QPS_INIT;
2607		attr->pkey_index = 0;
2608		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2609		ret = ib_modify_qp(qp, attr,
2610				IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY);
2611		if (ret) {
2612			printf("Couldn't change QP%d state to "
2613					"INIT: %d\n", i, ret);
2614			goto out;
2615		}
2616
2617		attr->qp_state = IB_QPS_RTR;
2618		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2619		if (ret) {
2620			printf("Couldn't change QP%d state to "
2621					"RTR: %d\n", i, ret);
2622			goto out;
2623		}
2624
2625		attr->qp_state = IB_QPS_RTS;
2626		attr->sq_psn = IB_MAD_SEND_Q_PSN;
2627		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2628		if (ret) {
2629			printf("Couldn't change QP%d state to "
2630					"RTS: %d\n", i, ret);
2631			goto out;
2632		}
2633	}
2634
2635	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2636	if (ret) {
2637		printf("Failed to request completion "
2638				"notification: %d\n", ret);
2639		goto out;
2640	}
2641
2642	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2643		if (!port_priv->qp_info[i].qp)
2644			continue;
2645
2646		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2647		if (ret) {
2648			printf("Couldn't post receive WRs\n");
2649			goto out;
2650		}
2651	}
2652	out: free(attr);
2653	return ret;
2654}
2655
2656static void qp_event_handler(struct ib_event *event, void *qp_context) {
2657	struct ib_mad_qp_info *qp_info = qp_context;
2658
2659	/*It's worse than that! He's dead, Jim!*/
2660	printf("Fatal error (%d) on MAD QP (%d)\n", event->event,
2661			qp_info->qp->qp_num);
2662}
2663
2664static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2665		struct ib_mad_queue *mad_queue) {
2666	mad_queue->qp_info = qp_info;
2667	mad_queue->count = 0;
2668	/*spin_lock_init(&mad_queue->lock);*/
2669	INIT_LIST_HEAD(&mad_queue->list);
2670}
2671
2672static void init_mad_qp(struct ib_mad_port_private *port_priv,
2673		struct ib_mad_qp_info *qp_info) {
2674	qp_info->port_priv = port_priv;
2675	init_mad_queue(qp_info, &qp_info->send_queue);
2676	init_mad_queue(qp_info, &qp_info->recv_queue);
2677	INIT_LIST_HEAD(&qp_info->overflow_list);
2678	/*spin_lock_init(&qp_info->snoop_lock);*/
2679	qp_info->snoop_table = NULL;
2680	qp_info->snoop_table_size = 0;
2681	/*atomic_set(&qp_info->snoop_count, 0);*/
2682}
2683
2684static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2685		enum ib_qp_type qp_type) {
2686	struct ib_qp_init_attr qp_init_attr;
2687	int ret;
2688
2689	memset(&qp_init_attr, 0, sizeof qp_init_attr);
2690	qp_init_attr.send_cq = qp_info->port_priv->cq;
2691	qp_init_attr.recv_cq = qp_info->port_priv->cq;
2692	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2693	qp_init_attr.cap.max_send_wr = mad_sendq_size;
2694	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2695	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2696	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2697	qp_init_attr.qp_type = qp_type;
2698	qp_init_attr.port_num = qp_info->port_priv->port_num;
2699	qp_init_attr.qp_context = qp_info;
2700	qp_init_attr.event_handler = qp_event_handler;
2701	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2702	if (IS_ERR(qp_info->qp)) {
2703		printf("Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type));
2704		ret = PTR_ERR(qp_info->qp);
2705		goto error;
2706	}
2707	/*Use minimum queue sizes unless the CQ is resized*/
2708	qp_info->send_queue.max_active = mad_sendq_size;
2709	qp_info->recv_queue.max_active = mad_recvq_size;
2710	return 0;
2711
2712	error: return ret;
2713}
2714/*
2715 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2716 {
2717 if (!qp_info->qp)
2718 return;
2719
2720 ib_destroy_qp(qp_info->qp);
2721 free(qp_info->snoop_table);
2722 }
2723
2724
2725 * Open the port
2726 * Create the QP, PD, MR, and CQ if needed
2727 */
2728static int ib_mad_port_open(struct ib_device *device, int port_num) {
2729	int ret, cq_size;
2730	struct ib_mad_port_private *port_priv;
2731	/*unsigned long flags;*/
2732	char name[sizeof "ib_mad123"];
2733	int has_smi;
2734
2735	/*Create new device info*/
2736	port_priv = calloc(1, sizeof *port_priv);
2737	if (!port_priv) {
2738		printf("No memory for ib_mad_port_private\n");
2739		return -ENOMEM;
2740	}
2741
2742	port_priv->device = device;
2743	port_priv->port_num = port_num;
2744	/*spin_lock_init(&port_priv->reg_lock);*/
2745	INIT_LIST_HEAD(&port_priv->agent_list);
2746	init_mad_qp(port_priv, &port_priv->qp_info[0]);
2747	init_mad_qp(port_priv, &port_priv->qp_info[1]);
2748
2749	cq_size = mad_sendq_size + mad_recvq_size;
2750	has_smi = rdma_port_get_link_layer(device, port_num)
2751			== IB_LINK_LAYER_INFINIBAND;
2752	if (has_smi)
2753		cq_size *= 2;
2754
2755	port_priv->cq = ib_create_cq(port_priv->device,
2756			ib_mad_thread_completion_handler,
2757			NULL, port_priv, cq_size, 0);
2758	if (IS_ERR(port_priv->cq)) {
2759		printf("Couldn't create ib_mad CQ\n");
2760		ret = PTR_ERR(port_priv->cq);
2761		goto error3;
2762	}
2763
2764	port_priv->pd = ib_alloc_pd(device);
2765	if (IS_ERR(port_priv->pd)) {
2766		printf("Couldn't create ib_mad PD\n");
2767		ret = PTR_ERR(port_priv->pd);
2768		goto error4;
2769	}
2770
2771	port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2772	if (IS_ERR(port_priv->mr)) {
2773		printf("Couldn't get ib_mad DMA MR\n");
2774		ret = PTR_ERR(port_priv->mr);
2775		goto error5;
2776	}
2777
2778	if (has_smi) {
2779		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2780		if (ret)
2781			goto error6;
2782	}
2783
2784	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2785	if (ret)
2786		goto error7;
2787
2788	snprintf(name, sizeof name, "ib_mad%d", port_num);
2789	port_priv->wq = create_singlethread_workqueue(name);
2790	if (!port_priv->wq) {
2791		ret = -ENOMEM;
2792		goto error8;
2793	}
2794	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2795
2796	if (!ib_mad_port_list.next)
2797		INIT_LIST_HEAD(&ib_mad_port_list);
2798
2799	/*spin_lock_irqsave(&ib_mad_port_list_lock, flags);*/
2800	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2801	/*spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);*/
2802
2803	ret = ib_mad_port_start(port_priv);
2804	if (ret) {
2805		printf("Couldn't start port\n");
2806		goto error9;
2807	}
2808
2809	return 0;
2810
2811	/*TODO: cleanup*/
2812	error9: /*spin_lock_irqsave(&ib_mad_port_list_lock, flags);*/
2813	list_del_init(&port_priv->port_list);
2814	/*spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2815
2816	 destroy_workqueue(port_priv->wq);*/
2817	error8: /*destroy_mad_qp(&port_priv->qp_info[1]);*/
2818	error7: /*destroy_mad_qp(&port_priv->qp_info[0]);*/
2819	error6: /*ib_dereg_mr(port_priv->mr);*/
2820	error5: /*ib_dealloc_pd(port_priv->pd);*/
2821	error4: /*ib_destroy_cq(port_priv->cq);*/
2822	/*cleanup_recv_queue(&port_priv->qp_info[1]);
2823	 cleanup_recv_queue(&port_priv->qp_info[0]);*/
2824	error3: free(port_priv);
2825
2826	return ret;
2827}
2828/*
2829
2830 * Close the port
2831 * If there are no classes using the port, free the port
2832 * resources (CQ, MR, PD, QP) and remove the port's info structure
2833
2834 static int ib_mad_port_close(struct ib_device *device, int port_num)
2835 {
2836 struct ib_mad_port_private *port_priv;
2837 unsigned long flags;
2838
2839 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2840 port_priv = __ib_get_mad_port(device, port_num);
2841 if (port_priv == NULL) {
2842 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2843 printf( "Port %d not found\n", port_num);
2844 return -ENODEV;
2845 }
2846 list_del_init(&port_priv->port_list);
2847 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2848
2849 destroy_workqueue(port_priv->wq);
2850 destroy_mad_qp(&port_priv->qp_info[1]);
2851 destroy_mad_qp(&port_priv->qp_info[0]);
2852 ib_dereg_mr(port_priv->mr);
2853 ib_dealloc_pd(port_priv->pd);
2854 ib_destroy_cq(port_priv->cq);
2855 cleanup_recv_queue(&port_priv->qp_info[1]);
2856 cleanup_recv_queue(&port_priv->qp_info[0]);
2857 XXX: Handle deallocation of MAD registration tables
2858
2859 free(port_priv);
2860
2861 return 0;
2862 }
2863 */
2864void ib_mad_init_device(struct ib_device *device) {
2865	int start, end, i;
2866
2867	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2868		return;
2869
2870	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2871		start = 0;
2872		end = 0;
2873	} else {
2874		start = 1;
2875		end = device->phys_port_cnt;
2876	}
2877
2878	for (i = start; i <= end; i++) {
2879		if (ib_mad_port_open(device, i)) {
2880			printf("Couldn't open %s port %d\n", device->name, i);
2881			goto error;
2882		}
2883		if (ib_agent_port_open(device, i)) {
2884			printf("Couldn't open %s port %d "
2885					"for agents\n", device->name, i);
2886			goto error_agent;
2887		}
2888	}
2889	return;
2890
2891	/*TODO: cleanup*/
2892	error_agent: /*if (ib_mad_port_close(device, i))*/
2893	printf("Couldn't close %s port %d\n", device->name, i);
2894
2895	error: i--;
2896
2897	/*while (i >= start) {
2898	 if (ib_agent_port_close(device, i))
2899	 printf("Couldn't close %s port %d "
2900	 "for agents\n", device->name, i);
2901	 if (ib_mad_port_close(device, i))
2902	 printf("Couldn't close %s port %d\n", device->name, i);
2903	 i--;
2904	 }*/
2905}
2906/*
2907 static void ib_mad_remove_device(struct ib_device *device)
2908 {
2909 int i, num_ports, cur_port;
2910
2911 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2912 num_ports = 1;
2913 cur_port = 0;
2914 } else {
2915 num_ports = device->phys_port_cnt;
2916 cur_port = 1;
2917 }
2918 for (i = 0; i < num_ports; i++, cur_port++) {
2919 if (ib_agent_port_close(device, cur_port))
2920 printf( "Couldn't close %s port %d "
2921 "for agents\n",
2922 device->name, cur_port);
2923 if (ib_mad_port_close(device, cur_port))
2924 printf( "Couldn't close %s port %d\n",
2925 device->name, cur_port);
2926 }
2927 }
2928
2929 static struct ib_client mad_client = {
2930 .name   = "mad",
2931 .add = ib_mad_init_device,
2932 .remove = ib_mad_remove_device
2933 };
2934
2935 static int __init ib_mad_init_module(void)
2936 {
2937 int ret;
2938
2939 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
2940 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
2941
2942 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
2943 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
2944
2945 spin_lock_init(&ib_mad_port_list_lock);
2946
2947 ib_mad_cache = kmem_cache_create("ib_mad",
2948 sizeof(struct ib_mad_private),
2949 0,
2950 SLAB_HWCACHE_ALIGN,
2951 NULL);
2952 if (!ib_mad_cache) {
2953 printf( "Couldn't create ib_mad cache\n");
2954 ret = -ENOMEM;
2955 goto error1;
2956 }
2957
2958 INIT_LIST_HEAD(&ib_mad_port_list);
2959
2960 if (ib_register_client(&mad_client)) {
2961 printf( "Couldn't register ib_mad client\n");
2962 ret = -EINVAL;
2963 goto error2;
2964 }
2965
2966 return 0;
2967
2968 error2:
2969 kmem_cache_destroy(ib_mad_cache);
2970 error1:
2971 return ret;
2972 }
2973
2974 static void __exit ib_mad_cleanup_module(void)
2975 {
2976 ib_unregister_client(&mad_client);
2977 kmem_cache_destroy(ib_mad_cache);
2978 }
2979
2980 module_init(ib_mad_init_module);
2981 module_exit(ib_mad_cleanup_module);
2982
2983 */
2984