1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
41#include <linux/string.h>
42
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_cache.h>
45
46int ib_rate_to_mult(enum ib_rate rate)
47{
48	switch (rate) {
49	case IB_RATE_2_5_GBPS: return  1;
50	case IB_RATE_5_GBPS:   return  2;
51	case IB_RATE_10_GBPS:  return  4;
52	case IB_RATE_20_GBPS:  return  8;
53	case IB_RATE_30_GBPS:  return 12;
54	case IB_RATE_40_GBPS:  return 16;
55	case IB_RATE_60_GBPS:  return 24;
56	case IB_RATE_80_GBPS:  return 32;
57	case IB_RATE_120_GBPS: return 48;
58	default:	       return -1;
59	}
60}
61EXPORT_SYMBOL(ib_rate_to_mult);
62
63enum ib_rate mult_to_ib_rate(int mult)
64{
65	switch (mult) {
66	case 1:  return IB_RATE_2_5_GBPS;
67	case 2:  return IB_RATE_5_GBPS;
68	case 4:  return IB_RATE_10_GBPS;
69	case 8:  return IB_RATE_20_GBPS;
70	case 12: return IB_RATE_30_GBPS;
71	case 16: return IB_RATE_40_GBPS;
72	case 24: return IB_RATE_60_GBPS;
73	case 32: return IB_RATE_80_GBPS;
74	case 48: return IB_RATE_120_GBPS;
75	default: return IB_RATE_PORT_CURRENT;
76	}
77}
78EXPORT_SYMBOL(mult_to_ib_rate);
79
80enum rdma_transport_type
81rdma_node_get_transport(enum rdma_node_type node_type)
82{
83	switch (node_type) {
84	case RDMA_NODE_IB_CA:
85	case RDMA_NODE_IB_SWITCH:
86	case RDMA_NODE_IB_ROUTER:
87		return RDMA_TRANSPORT_IB;
88	case RDMA_NODE_RNIC:
89		return RDMA_TRANSPORT_IWARP;
90	default:
91		BUG();
92		return 0;
93	}
94}
95EXPORT_SYMBOL(rdma_node_get_transport);
96
97enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
98{
99	if (device->get_link_layer)
100		return device->get_link_layer(device, port_num);
101
102	switch (rdma_node_get_transport(device->node_type)) {
103	case RDMA_TRANSPORT_IB:
104		return IB_LINK_LAYER_INFINIBAND;
105	case RDMA_TRANSPORT_IWARP:
106		return IB_LINK_LAYER_ETHERNET;
107	default:
108		return IB_LINK_LAYER_UNSPECIFIED;
109	}
110}
111EXPORT_SYMBOL(rdma_port_get_link_layer);
112
113/* Protection domains */
114
115struct ib_pd *ib_alloc_pd(struct ib_device *device)
116{
117	struct ib_pd *pd;
118
119	pd = device->alloc_pd(device, NULL, NULL);
120
121	if (!IS_ERR(pd)) {
122		pd->device  = device;
123		pd->uobject = NULL;
124		atomic_set(&pd->usecnt, 0);
125	}
126
127	return pd;
128}
129EXPORT_SYMBOL(ib_alloc_pd);
130
131int ib_dealloc_pd(struct ib_pd *pd)
132{
133	if (atomic_read(&pd->usecnt))
134		return -EBUSY;
135
136	return pd->device->dealloc_pd(pd);
137}
138EXPORT_SYMBOL(ib_dealloc_pd);
139
140/* Address handles */
141
142struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
143{
144	struct ib_ah *ah;
145
146	ah = pd->device->create_ah(pd, ah_attr);
147
148	if (!IS_ERR(ah)) {
149		ah->device  = pd->device;
150		ah->pd      = pd;
151		ah->uobject = NULL;
152		atomic_inc(&pd->usecnt);
153	}
154
155	return ah;
156}
157EXPORT_SYMBOL(ib_create_ah);
158
159int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
160		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
161{
162	u32 flow_class;
163	u16 gid_index;
164	int ret;
165
166	memset(ah_attr, 0, sizeof *ah_attr);
167	ah_attr->dlid = wc->slid;
168	ah_attr->sl = wc->sl;
169	ah_attr->src_path_bits = wc->dlid_path_bits;
170	ah_attr->port_num = port_num;
171
172	if (wc->wc_flags & IB_WC_GRH) {
173		ah_attr->ah_flags = IB_AH_GRH;
174		ah_attr->grh.dgid = grh->sgid;
175
176		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
177					 &gid_index);
178		if (ret)
179			return ret;
180
181		ah_attr->grh.sgid_index = (u8) gid_index;
182		flow_class = be32_to_cpu(grh->version_tclass_flow);
183		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
184		ah_attr->grh.hop_limit = 0xFF;
185		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
186	}
187	return 0;
188}
189EXPORT_SYMBOL(ib_init_ah_from_wc);
190
191struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
192				   struct ib_grh *grh, u8 port_num)
193{
194	struct ib_ah_attr ah_attr;
195	int ret;
196
197	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
198	if (ret)
199		return ERR_PTR(ret);
200
201	return ib_create_ah(pd, &ah_attr);
202}
203EXPORT_SYMBOL(ib_create_ah_from_wc);
204
205int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
206{
207	return ah->device->modify_ah ?
208		ah->device->modify_ah(ah, ah_attr) :
209		-ENOSYS;
210}
211EXPORT_SYMBOL(ib_modify_ah);
212
213int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
214{
215	return ah->device->query_ah ?
216		ah->device->query_ah(ah, ah_attr) :
217		-ENOSYS;
218}
219EXPORT_SYMBOL(ib_query_ah);
220
221int ib_destroy_ah(struct ib_ah *ah)
222{
223	struct ib_pd *pd;
224	int ret;
225
226	pd = ah->pd;
227	ret = ah->device->destroy_ah(ah);
228	if (!ret)
229		atomic_dec(&pd->usecnt);
230
231	return ret;
232}
233EXPORT_SYMBOL(ib_destroy_ah);
234
235/* Shared receive queues */
236
237struct ib_srq *ib_create_srq(struct ib_pd *pd,
238			     struct ib_srq_init_attr *srq_init_attr)
239{
240	struct ib_srq *srq;
241
242	if (!pd->device->create_srq)
243		return ERR_PTR(-ENOSYS);
244
245	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
246
247	if (!IS_ERR(srq)) {
248		srq->device    	   = pd->device;
249		srq->pd        	   = pd;
250		srq->uobject       = NULL;
251		srq->event_handler = srq_init_attr->event_handler;
252		srq->srq_context   = srq_init_attr->srq_context;
253		srq->xrc_cq = NULL;
254		srq->xrcd = NULL;
255		atomic_inc(&pd->usecnt);
256		atomic_set(&srq->usecnt, 0);
257	}
258
259	return srq;
260}
261EXPORT_SYMBOL(ib_create_srq);
262
263struct ib_srq *ib_create_xrc_srq(struct ib_pd *pd,
264				 struct ib_cq *xrc_cq,
265				 struct ib_xrcd *xrcd,
266				 struct ib_srq_init_attr *srq_init_attr)
267{
268	struct ib_srq *srq;
269
270	if (!pd->device->create_xrc_srq)
271		return ERR_PTR(-ENOSYS);
272
273	srq = pd->device->create_xrc_srq(pd, xrc_cq, xrcd, srq_init_attr, NULL);
274
275	if (!IS_ERR(srq)) {
276		srq->device	   = pd->device;
277		srq->pd		   = pd;
278		srq->uobject	   = NULL;
279		srq->event_handler = srq_init_attr->event_handler;
280		srq->srq_context   = srq_init_attr->srq_context;
281		srq->xrc_cq	   = xrc_cq;
282		srq->xrcd	   = xrcd;
283		atomic_inc(&pd->usecnt);
284		atomic_inc(&xrcd->usecnt);
285		atomic_inc(&xrc_cq->usecnt);
286		atomic_set(&srq->usecnt, 0);
287	}
288
289	return srq;
290}
291EXPORT_SYMBOL(ib_create_xrc_srq);
292
293int ib_modify_srq(struct ib_srq *srq,
294		  struct ib_srq_attr *srq_attr,
295		  enum ib_srq_attr_mask srq_attr_mask)
296{
297	return srq->device->modify_srq ?
298		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
299		-ENOSYS;
300}
301EXPORT_SYMBOL(ib_modify_srq);
302
303int ib_query_srq(struct ib_srq *srq,
304		 struct ib_srq_attr *srq_attr)
305{
306	return srq->device->query_srq ?
307		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
308}
309EXPORT_SYMBOL(ib_query_srq);
310
311int ib_destroy_srq(struct ib_srq *srq)
312{
313	struct ib_pd *pd;
314	struct ib_cq *xrc_cq;
315	struct ib_xrcd *xrcd;
316	int ret;
317
318	if (atomic_read(&srq->usecnt))
319		return -EBUSY;
320
321	pd = srq->pd;
322	xrc_cq = srq->xrc_cq;
323	xrcd = srq->xrcd;
324
325	ret = srq->device->destroy_srq(srq);
326	if (!ret) {
327		atomic_dec(&pd->usecnt);
328		if (xrc_cq)
329			atomic_dec(&xrc_cq->usecnt);
330		if (xrcd)
331			atomic_dec(&xrcd->usecnt);
332	}
333
334	return ret;
335}
336EXPORT_SYMBOL(ib_destroy_srq);
337
338/* Queue pairs */
339
340struct ib_qp *ib_create_qp(struct ib_pd *pd,
341			   struct ib_qp_init_attr *qp_init_attr)
342{
343	struct ib_qp *qp;
344
345	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
346
347	if (!IS_ERR(qp)) {
348		qp->device     	  = pd->device;
349		qp->pd         	  = pd;
350		qp->send_cq    	  = qp_init_attr->send_cq;
351		qp->recv_cq    	  = qp_init_attr->recv_cq;
352		qp->srq	       	  = qp_init_attr->srq;
353		qp->uobject       = NULL;
354		qp->event_handler = qp_init_attr->event_handler;
355		qp->qp_context    = qp_init_attr->qp_context;
356		qp->qp_type	  = qp_init_attr->qp_type;
357		qp->xrcd	  = qp->qp_type == IB_QPT_XRC ?
358			qp_init_attr->xrc_domain : NULL;
359		atomic_inc(&pd->usecnt);
360		atomic_inc(&qp_init_attr->send_cq->usecnt);
361		atomic_inc(&qp_init_attr->recv_cq->usecnt);
362		if (qp_init_attr->srq)
363			atomic_inc(&qp_init_attr->srq->usecnt);
364		if (qp->qp_type == IB_QPT_XRC)
365			atomic_inc(&qp->xrcd->usecnt);
366	}
367
368	return qp;
369}
370EXPORT_SYMBOL(ib_create_qp);
371
372static const struct {
373	int			valid;
374	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETH + 1];
375	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETH + 1];
376} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
377	[IB_QPS_RESET] = {
378		[IB_QPS_RESET] = { .valid = 1 },
379		[IB_QPS_INIT]  = {
380			.valid = 1,
381			.req_param = {
382				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
383						IB_QP_PORT			|
384						IB_QP_QKEY),
385				[IB_QPT_RAW_ETH] = IB_QP_PORT,
386				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
387						IB_QP_PORT			|
388						IB_QP_ACCESS_FLAGS),
389				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
390						IB_QP_PORT			|
391						IB_QP_ACCESS_FLAGS),
392				[IB_QPT_XRC] = (IB_QP_PKEY_INDEX		|
393						IB_QP_PORT			|
394						IB_QP_ACCESS_FLAGS),
395				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
396						IB_QP_QKEY),
397				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
398						IB_QP_QKEY),
399			}
400		},
401	},
402	[IB_QPS_INIT]  = {
403		[IB_QPS_RESET] = { .valid = 1 },
404		[IB_QPS_ERR] =   { .valid = 1 },
405		[IB_QPS_INIT]  = {
406			.valid = 1,
407			.opt_param = {
408				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
409						IB_QP_PORT			|
410						IB_QP_QKEY),
411				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
412						IB_QP_PORT			|
413						IB_QP_ACCESS_FLAGS),
414				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
415						IB_QP_PORT			|
416						IB_QP_ACCESS_FLAGS),
417				[IB_QPT_XRC] = (IB_QP_PKEY_INDEX		|
418						IB_QP_PORT			|
419						IB_QP_ACCESS_FLAGS),
420				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
421						IB_QP_QKEY),
422				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
423						IB_QP_QKEY),
424			}
425		},
426		[IB_QPS_RTR]   = {
427			.valid = 1,
428			.req_param = {
429				[IB_QPT_UC]  = (IB_QP_AV			|
430						IB_QP_PATH_MTU			|
431						IB_QP_DEST_QPN			|
432						IB_QP_RQ_PSN),
433				[IB_QPT_RC]  = (IB_QP_AV			|
434						IB_QP_PATH_MTU			|
435						IB_QP_DEST_QPN			|
436						IB_QP_RQ_PSN			|
437						IB_QP_MAX_DEST_RD_ATOMIC	|
438						IB_QP_MIN_RNR_TIMER),
439				[IB_QPT_XRC] = (IB_QP_AV			|
440						IB_QP_PATH_MTU			|
441						IB_QP_DEST_QPN			|
442						IB_QP_RQ_PSN			|
443						IB_QP_MAX_DEST_RD_ATOMIC	|
444						IB_QP_MIN_RNR_TIMER),
445			},
446			.opt_param = {
447				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
448						 IB_QP_QKEY),
449				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
450						 IB_QP_ACCESS_FLAGS		|
451						 IB_QP_PKEY_INDEX),
452				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
453						 IB_QP_ACCESS_FLAGS		|
454						 IB_QP_PKEY_INDEX),
455				 [IB_QPT_XRC] = (IB_QP_ALT_PATH			|
456						IB_QP_ACCESS_FLAGS		|
457						IB_QP_PKEY_INDEX),
458				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
459						 IB_QP_QKEY),
460				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
461						 IB_QP_QKEY),
462			 }
463		}
464	},
465	[IB_QPS_RTR]   = {
466		[IB_QPS_RESET] = { .valid = 1 },
467		[IB_QPS_ERR] =   { .valid = 1 },
468		[IB_QPS_RTS]   = {
469			.valid = 1,
470			.req_param = {
471				[IB_QPT_UD]  = IB_QP_SQ_PSN,
472				[IB_QPT_UC]  = IB_QP_SQ_PSN,
473				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
474						IB_QP_RETRY_CNT			|
475						IB_QP_RNR_RETRY			|
476						IB_QP_SQ_PSN			|
477						IB_QP_MAX_QP_RD_ATOMIC),
478				[IB_QPT_XRC] = (IB_QP_TIMEOUT			|
479						IB_QP_RETRY_CNT			|
480						IB_QP_RNR_RETRY			|
481						IB_QP_SQ_PSN			|
482						IB_QP_MAX_QP_RD_ATOMIC),
483				[IB_QPT_SMI] = IB_QP_SQ_PSN,
484				[IB_QPT_GSI] = IB_QP_SQ_PSN,
485			},
486			.opt_param = {
487				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
488						 IB_QP_QKEY),
489				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
490						 IB_QP_ALT_PATH			|
491						 IB_QP_ACCESS_FLAGS		|
492						 IB_QP_PATH_MIG_STATE),
493				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
494						 IB_QP_ALT_PATH			|
495						 IB_QP_ACCESS_FLAGS		|
496						 IB_QP_MIN_RNR_TIMER		|
497						 IB_QP_PATH_MIG_STATE),
498				 [IB_QPT_XRC] = (IB_QP_CUR_STATE		|
499						IB_QP_ALT_PATH			|
500						IB_QP_ACCESS_FLAGS		|
501						IB_QP_MIN_RNR_TIMER		|
502						IB_QP_PATH_MIG_STATE),
503				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
504						 IB_QP_QKEY),
505				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
506						 IB_QP_QKEY),
507			 }
508		}
509	},
510	[IB_QPS_RTS]   = {
511		[IB_QPS_RESET] = { .valid = 1 },
512		[IB_QPS_ERR] =   { .valid = 1 },
513		[IB_QPS_RTS]   = {
514			.valid = 1,
515			.opt_param = {
516				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
517						IB_QP_QKEY),
518				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
519						IB_QP_ACCESS_FLAGS		|
520						IB_QP_ALT_PATH			|
521						IB_QP_PATH_MIG_STATE),
522				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
523						IB_QP_ACCESS_FLAGS		|
524						IB_QP_ALT_PATH			|
525						IB_QP_PATH_MIG_STATE		|
526						IB_QP_MIN_RNR_TIMER),
527				[IB_QPT_XRC] = (IB_QP_CUR_STATE			|
528						IB_QP_ACCESS_FLAGS		|
529						IB_QP_ALT_PATH			|
530						IB_QP_PATH_MIG_STATE		|
531						IB_QP_MIN_RNR_TIMER),
532				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
533						IB_QP_QKEY),
534				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
535						IB_QP_QKEY),
536			}
537		},
538		[IB_QPS_SQD]   = {
539			.valid = 1,
540			.opt_param = {
541				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
542				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
543				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
544				[IB_QPT_XRC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
545				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
546				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
547			}
548		},
549	},
550	[IB_QPS_SQD]   = {
551		[IB_QPS_RESET] = { .valid = 1 },
552		[IB_QPS_ERR] =   { .valid = 1 },
553		[IB_QPS_RTS]   = {
554			.valid = 1,
555			.opt_param = {
556				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
557						IB_QP_QKEY),
558				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
559						IB_QP_ALT_PATH			|
560						IB_QP_ACCESS_FLAGS		|
561						IB_QP_PATH_MIG_STATE),
562				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
563						IB_QP_ALT_PATH			|
564						IB_QP_ACCESS_FLAGS		|
565						IB_QP_MIN_RNR_TIMER		|
566						IB_QP_PATH_MIG_STATE),
567				[IB_QPT_XRC] = (IB_QP_CUR_STATE			|
568						IB_QP_ALT_PATH			|
569						IB_QP_ACCESS_FLAGS		|
570						IB_QP_MIN_RNR_TIMER		|
571						IB_QP_PATH_MIG_STATE),
572				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
573						IB_QP_QKEY),
574				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
575						IB_QP_QKEY),
576			}
577		},
578		[IB_QPS_SQD]   = {
579			.valid = 1,
580			.opt_param = {
581				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
582						IB_QP_QKEY),
583				[IB_QPT_UC]  = (IB_QP_AV			|
584						IB_QP_ALT_PATH			|
585						IB_QP_ACCESS_FLAGS		|
586						IB_QP_PKEY_INDEX		|
587						IB_QP_PATH_MIG_STATE),
588				[IB_QPT_RC]  = (IB_QP_PORT			|
589						IB_QP_AV			|
590						IB_QP_TIMEOUT			|
591						IB_QP_RETRY_CNT			|
592						IB_QP_RNR_RETRY			|
593						IB_QP_MAX_QP_RD_ATOMIC		|
594						IB_QP_MAX_DEST_RD_ATOMIC	|
595						IB_QP_ALT_PATH			|
596						IB_QP_ACCESS_FLAGS		|
597						IB_QP_PKEY_INDEX		|
598						IB_QP_MIN_RNR_TIMER		|
599						IB_QP_PATH_MIG_STATE),
600				[IB_QPT_XRC] = (IB_QP_PORT			|
601						IB_QP_AV			|
602						IB_QP_TIMEOUT			|
603						IB_QP_RETRY_CNT			|
604						IB_QP_RNR_RETRY			|
605						IB_QP_MAX_QP_RD_ATOMIC		|
606						IB_QP_MAX_DEST_RD_ATOMIC	|
607						IB_QP_ALT_PATH			|
608						IB_QP_ACCESS_FLAGS		|
609						IB_QP_PKEY_INDEX		|
610						IB_QP_MIN_RNR_TIMER		|
611						IB_QP_PATH_MIG_STATE),
612				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
613						IB_QP_QKEY),
614				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
615						IB_QP_QKEY),
616			}
617		}
618	},
619	[IB_QPS_SQE]   = {
620		[IB_QPS_RESET] = { .valid = 1 },
621		[IB_QPS_ERR] =   { .valid = 1 },
622		[IB_QPS_RTS]   = {
623			.valid = 1,
624			.opt_param = {
625				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
626						IB_QP_QKEY),
627				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
628						IB_QP_ACCESS_FLAGS),
629				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
630						IB_QP_QKEY),
631				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
632						IB_QP_QKEY),
633			}
634		}
635	},
636	[IB_QPS_ERR] = {
637		[IB_QPS_RESET] = { .valid = 1 },
638		[IB_QPS_ERR] =   { .valid = 1 }
639	}
640};
641
642int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
643		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
644{
645	enum ib_qp_attr_mask req_param, opt_param;
646
647	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
648	    next_state < 0 || next_state > IB_QPS_ERR)
649		return 0;
650
651	if (mask & IB_QP_CUR_STATE  &&
652	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
653	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
654		return 0;
655
656	if (!qp_state_table[cur_state][next_state].valid)
657		return 0;
658
659	req_param = qp_state_table[cur_state][next_state].req_param[type];
660	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
661
662	if ((mask & req_param) != req_param)
663		return 0;
664
665	if (mask & ~(req_param | opt_param | IB_QP_STATE))
666		return 0;
667
668	return 1;
669}
670EXPORT_SYMBOL(ib_modify_qp_is_ok);
671
672int ib_modify_qp(struct ib_qp *qp,
673		 struct ib_qp_attr *qp_attr,
674		 int qp_attr_mask)
675{
676	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
677}
678EXPORT_SYMBOL(ib_modify_qp);
679
680int ib_query_qp(struct ib_qp *qp,
681		struct ib_qp_attr *qp_attr,
682		int qp_attr_mask,
683		struct ib_qp_init_attr *qp_init_attr)
684{
685	return qp->device->query_qp ?
686		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
687		-ENOSYS;
688}
689EXPORT_SYMBOL(ib_query_qp);
690
691int ib_destroy_qp(struct ib_qp *qp)
692{
693	struct ib_pd *pd;
694	struct ib_cq *scq, *rcq;
695	struct ib_srq *srq;
696	struct ib_xrcd *xrcd;
697	enum ib_qp_type	qp_type = qp->qp_type;
698	int ret;
699
700	pd  = qp->pd;
701	scq = qp->send_cq;
702	rcq = qp->recv_cq;
703	srq = qp->srq;
704	xrcd = qp->xrcd;
705
706	ret = qp->device->destroy_qp(qp);
707	if (!ret) {
708		atomic_dec(&pd->usecnt);
709		atomic_dec(&scq->usecnt);
710		atomic_dec(&rcq->usecnt);
711		if (srq)
712			atomic_dec(&srq->usecnt);
713		if (qp_type == IB_QPT_XRC)
714			atomic_dec(&xrcd->usecnt);
715	}
716
717	return ret;
718}
719EXPORT_SYMBOL(ib_destroy_qp);
720
721/* Completion queues */
722
723struct ib_cq *ib_create_cq(struct ib_device *device,
724			   ib_comp_handler comp_handler,
725			   void (*event_handler)(struct ib_event *, void *),
726			   void *cq_context, int cqe, int comp_vector)
727{
728	struct ib_cq *cq;
729
730	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
731
732	if (!IS_ERR(cq)) {
733		cq->device        = device;
734		cq->uobject       = NULL;
735		cq->comp_handler  = comp_handler;
736		cq->event_handler = event_handler;
737		cq->cq_context    = cq_context;
738		atomic_set(&cq->usecnt, 0);
739	}
740
741	return cq;
742}
743EXPORT_SYMBOL(ib_create_cq);
744
745int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
746{
747	return cq->device->modify_cq ?
748		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
749}
750EXPORT_SYMBOL(ib_modify_cq);
751
752int ib_destroy_cq(struct ib_cq *cq)
753{
754	if (atomic_read(&cq->usecnt))
755		return -EBUSY;
756
757	return cq->device->destroy_cq(cq);
758}
759EXPORT_SYMBOL(ib_destroy_cq);
760
761int ib_resize_cq(struct ib_cq *cq, int cqe)
762{
763	return cq->device->resize_cq ?
764		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
765}
766EXPORT_SYMBOL(ib_resize_cq);
767
768/* Memory regions */
769
770struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
771{
772	struct ib_mr *mr;
773
774	mr = pd->device->get_dma_mr(pd, mr_access_flags);
775
776	if (!IS_ERR(mr)) {
777		mr->device  = pd->device;
778		mr->pd      = pd;
779		mr->uobject = NULL;
780		atomic_inc(&pd->usecnt);
781		atomic_set(&mr->usecnt, 0);
782	}
783
784	return mr;
785}
786EXPORT_SYMBOL(ib_get_dma_mr);
787
788struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
789			     struct ib_phys_buf *phys_buf_array,
790			     int num_phys_buf,
791			     int mr_access_flags,
792			     u64 *iova_start)
793{
794	struct ib_mr *mr;
795
796	if (!pd->device->reg_phys_mr)
797		return ERR_PTR(-ENOSYS);
798
799	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
800				     mr_access_flags, iova_start);
801
802	if (!IS_ERR(mr)) {
803		mr->device  = pd->device;
804		mr->pd      = pd;
805		mr->uobject = NULL;
806		atomic_inc(&pd->usecnt);
807		atomic_set(&mr->usecnt, 0);
808	}
809
810	return mr;
811}
812EXPORT_SYMBOL(ib_reg_phys_mr);
813
814int ib_rereg_phys_mr(struct ib_mr *mr,
815		     int mr_rereg_mask,
816		     struct ib_pd *pd,
817		     struct ib_phys_buf *phys_buf_array,
818		     int num_phys_buf,
819		     int mr_access_flags,
820		     u64 *iova_start)
821{
822	struct ib_pd *old_pd;
823	int ret;
824
825	if (!mr->device->rereg_phys_mr)
826		return -ENOSYS;
827
828	if (atomic_read(&mr->usecnt))
829		return -EBUSY;
830
831	old_pd = mr->pd;
832
833	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
834					phys_buf_array, num_phys_buf,
835					mr_access_flags, iova_start);
836
837	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
838		atomic_dec(&old_pd->usecnt);
839		atomic_inc(&pd->usecnt);
840	}
841
842	return ret;
843}
844EXPORT_SYMBOL(ib_rereg_phys_mr);
845
846int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
847{
848	return mr->device->query_mr ?
849		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
850}
851EXPORT_SYMBOL(ib_query_mr);
852
853int ib_dereg_mr(struct ib_mr *mr)
854{
855	struct ib_pd *pd;
856	int ret;
857
858	if (atomic_read(&mr->usecnt))
859		return -EBUSY;
860
861	pd = mr->pd;
862	ret = mr->device->dereg_mr(mr);
863	if (!ret)
864		atomic_dec(&pd->usecnt);
865
866	return ret;
867}
868EXPORT_SYMBOL(ib_dereg_mr);
869
870struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
871{
872	struct ib_mr *mr;
873
874	if (!pd->device->alloc_fast_reg_mr)
875		return ERR_PTR(-ENOSYS);
876
877	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
878
879	if (!IS_ERR(mr)) {
880		mr->device  = pd->device;
881		mr->pd      = pd;
882		mr->uobject = NULL;
883		atomic_inc(&pd->usecnt);
884		atomic_set(&mr->usecnt, 0);
885	}
886
887	return mr;
888}
889EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
890
891struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
892							  int max_page_list_len)
893{
894	struct ib_fast_reg_page_list *page_list;
895
896	if (!device->alloc_fast_reg_page_list)
897		return ERR_PTR(-ENOSYS);
898
899	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
900
901	if (!IS_ERR(page_list)) {
902		page_list->device = device;
903		page_list->max_page_list_len = max_page_list_len;
904	}
905
906	return page_list;
907}
908EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
909
910void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
911{
912	page_list->device->free_fast_reg_page_list(page_list);
913}
914EXPORT_SYMBOL(ib_free_fast_reg_page_list);
915
916/* Memory windows */
917
918struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
919{
920	struct ib_mw *mw;
921
922	if (!pd->device->alloc_mw)
923		return ERR_PTR(-ENOSYS);
924
925	mw = pd->device->alloc_mw(pd);
926	if (!IS_ERR(mw)) {
927		mw->device  = pd->device;
928		mw->pd      = pd;
929		mw->uobject = NULL;
930		atomic_inc(&pd->usecnt);
931	}
932
933	return mw;
934}
935EXPORT_SYMBOL(ib_alloc_mw);
936
937int ib_dealloc_mw(struct ib_mw *mw)
938{
939	struct ib_pd *pd;
940	int ret;
941
942	pd = mw->pd;
943	ret = mw->device->dealloc_mw(mw);
944	if (!ret)
945		atomic_dec(&pd->usecnt);
946
947	return ret;
948}
949EXPORT_SYMBOL(ib_dealloc_mw);
950
951/* "Fast" memory regions */
952
953struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
954			    int mr_access_flags,
955			    struct ib_fmr_attr *fmr_attr)
956{
957	struct ib_fmr *fmr;
958
959	if (!pd->device->alloc_fmr)
960		return ERR_PTR(-ENOSYS);
961
962	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
963	if (!IS_ERR(fmr)) {
964		fmr->device = pd->device;
965		fmr->pd     = pd;
966		atomic_inc(&pd->usecnt);
967	}
968
969	return fmr;
970}
971EXPORT_SYMBOL(ib_alloc_fmr);
972
973int ib_unmap_fmr(struct list_head *fmr_list)
974{
975	struct ib_fmr *fmr;
976
977	if (list_empty(fmr_list))
978		return 0;
979
980	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
981	return fmr->device->unmap_fmr(fmr_list);
982}
983EXPORT_SYMBOL(ib_unmap_fmr);
984
985int ib_dealloc_fmr(struct ib_fmr *fmr)
986{
987	struct ib_pd *pd;
988	int ret;
989
990	pd = fmr->pd;
991	ret = fmr->device->dealloc_fmr(fmr);
992	if (!ret)
993		atomic_dec(&pd->usecnt);
994
995	return ret;
996}
997EXPORT_SYMBOL(ib_dealloc_fmr);
998
999/* Multicast groups */
1000
1001int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1002{
1003	if (!qp->device->attach_mcast)
1004		return -ENOSYS;
1005
1006	switch (rdma_node_get_transport(qp->device->node_type)) {
1007	case RDMA_TRANSPORT_IB:
1008		if (qp->qp_type == IB_QPT_RAW_ETH) {
1009			/* In raw Etherent mgids the 63 msb's should be 0 */
1010			if (gid->global.subnet_prefix & cpu_to_be64(~1ULL))
1011				return -EINVAL;
1012		} else if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1013			return -EINVAL;
1014		break;
1015	case RDMA_TRANSPORT_IWARP:
1016		if (qp->qp_type != IB_QPT_RAW_ETH)
1017			return -EINVAL;
1018		break;
1019	}
1020	return qp->device->attach_mcast(qp, gid, lid);
1021}
1022EXPORT_SYMBOL(ib_attach_mcast);
1023
1024int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1025{
1026	if (!qp->device->detach_mcast)
1027		return -ENOSYS;
1028
1029	switch (rdma_node_get_transport(qp->device->node_type)) {
1030	case RDMA_TRANSPORT_IB:
1031		if (qp->qp_type == IB_QPT_RAW_ETH) {
1032			/* In raw Etherent mgids the 63 msb's should be 0 */
1033			if (gid->global.subnet_prefix & cpu_to_be64(~1ULL))
1034				return -EINVAL;
1035		} else if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1036			return -EINVAL;
1037		break;
1038	case RDMA_TRANSPORT_IWARP:
1039		if (qp->qp_type != IB_QPT_RAW_ETH)
1040			return -EINVAL;
1041		break;
1042	}
1043	return qp->device->detach_mcast(qp, gid, lid);
1044}
1045EXPORT_SYMBOL(ib_detach_mcast);
1046
1047int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1048{
1049	if (atomic_read(&xrcd->usecnt))
1050		return -EBUSY;
1051
1052	return xrcd->device->dealloc_xrcd(xrcd);
1053}
1054EXPORT_SYMBOL(ib_dealloc_xrcd);
1055
1056struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1057{
1058	struct ib_xrcd *xrcd;
1059
1060	if (!device->alloc_xrcd)
1061		return ERR_PTR(-ENOSYS);
1062
1063	xrcd = device->alloc_xrcd(device, NULL, NULL);
1064	if (!IS_ERR(xrcd)) {
1065		xrcd->device = device;
1066		xrcd->inode = NULL;
1067		xrcd->uobject = NULL;
1068		atomic_set(&xrcd->usecnt, 0);
1069	}
1070	return xrcd;
1071}
1072EXPORT_SYMBOL(ib_alloc_xrcd);
1073
1074