1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 *
38 * $Id: verbs.c,v 1.1.1.1 2007/10/11 23:31:50 Exp $
39 */
40
41#include <linux/errno.h>
42#include <linux/err.h>
43#include <linux/string.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h>
47
48int ib_rate_to_mult(enum ib_rate rate)
49{
50	switch (rate) {
51	case IB_RATE_2_5_GBPS: return  1;
52	case IB_RATE_5_GBPS:   return  2;
53	case IB_RATE_10_GBPS:  return  4;
54	case IB_RATE_20_GBPS:  return  8;
55	case IB_RATE_30_GBPS:  return 12;
56	case IB_RATE_40_GBPS:  return 16;
57	case IB_RATE_60_GBPS:  return 24;
58	case IB_RATE_80_GBPS:  return 32;
59	case IB_RATE_120_GBPS: return 48;
60	default:	       return -1;
61	}
62}
63EXPORT_SYMBOL(ib_rate_to_mult);
64
65enum ib_rate mult_to_ib_rate(int mult)
66{
67	switch (mult) {
68	case 1:  return IB_RATE_2_5_GBPS;
69	case 2:  return IB_RATE_5_GBPS;
70	case 4:  return IB_RATE_10_GBPS;
71	case 8:  return IB_RATE_20_GBPS;
72	case 12: return IB_RATE_30_GBPS;
73	case 16: return IB_RATE_40_GBPS;
74	case 24: return IB_RATE_60_GBPS;
75	case 32: return IB_RATE_80_GBPS;
76	case 48: return IB_RATE_120_GBPS;
77	default: return IB_RATE_PORT_CURRENT;
78	}
79}
80EXPORT_SYMBOL(mult_to_ib_rate);
81
82enum rdma_transport_type
83rdma_node_get_transport(enum rdma_node_type node_type)
84{
85	switch (node_type) {
86	case RDMA_NODE_IB_CA:
87	case RDMA_NODE_IB_SWITCH:
88	case RDMA_NODE_IB_ROUTER:
89		return RDMA_TRANSPORT_IB;
90	case RDMA_NODE_RNIC:
91		return RDMA_TRANSPORT_IWARP;
92	default:
93		BUG();
94		return 0;
95	}
96}
97EXPORT_SYMBOL(rdma_node_get_transport);
98
99/* Protection domains */
100
101struct ib_pd *ib_alloc_pd(struct ib_device *device)
102{
103	struct ib_pd *pd;
104
105	pd = device->alloc_pd(device, NULL, NULL);
106
107	if (!IS_ERR(pd)) {
108		pd->device  = device;
109		pd->uobject = NULL;
110		atomic_set(&pd->usecnt, 0);
111	}
112
113	return pd;
114}
115EXPORT_SYMBOL(ib_alloc_pd);
116
117int ib_dealloc_pd(struct ib_pd *pd)
118{
119	if (atomic_read(&pd->usecnt))
120		return -EBUSY;
121
122	return pd->device->dealloc_pd(pd);
123}
124EXPORT_SYMBOL(ib_dealloc_pd);
125
126/* Address handles */
127
128struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
129{
130	struct ib_ah *ah;
131
132	ah = pd->device->create_ah(pd, ah_attr);
133
134	if (!IS_ERR(ah)) {
135		ah->device  = pd->device;
136		ah->pd      = pd;
137		ah->uobject = NULL;
138		atomic_inc(&pd->usecnt);
139	}
140
141	return ah;
142}
143EXPORT_SYMBOL(ib_create_ah);
144
145int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
146		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
147{
148	u32 flow_class;
149	u16 gid_index;
150	int ret;
151
152	memset(ah_attr, 0, sizeof *ah_attr);
153	ah_attr->dlid = wc->slid;
154	ah_attr->sl = wc->sl;
155	ah_attr->src_path_bits = wc->dlid_path_bits;
156	ah_attr->port_num = port_num;
157
158	if (wc->wc_flags & IB_WC_GRH) {
159		ah_attr->ah_flags = IB_AH_GRH;
160		ah_attr->grh.dgid = grh->sgid;
161
162		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
163					 &gid_index);
164		if (ret)
165			return ret;
166
167		ah_attr->grh.sgid_index = (u8) gid_index;
168		flow_class = be32_to_cpu(grh->version_tclass_flow);
169		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
170		ah_attr->grh.hop_limit = 0xFF;
171		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
172	}
173	return 0;
174}
175EXPORT_SYMBOL(ib_init_ah_from_wc);
176
177struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
178				   struct ib_grh *grh, u8 port_num)
179{
180	struct ib_ah_attr ah_attr;
181	int ret;
182
183	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
184	if (ret)
185		return ERR_PTR(ret);
186
187	return ib_create_ah(pd, &ah_attr);
188}
189EXPORT_SYMBOL(ib_create_ah_from_wc);
190
191int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
192{
193	return ah->device->modify_ah ?
194		ah->device->modify_ah(ah, ah_attr) :
195		-ENOSYS;
196}
197EXPORT_SYMBOL(ib_modify_ah);
198
199int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
200{
201	return ah->device->query_ah ?
202		ah->device->query_ah(ah, ah_attr) :
203		-ENOSYS;
204}
205EXPORT_SYMBOL(ib_query_ah);
206
207int ib_destroy_ah(struct ib_ah *ah)
208{
209	struct ib_pd *pd;
210	int ret;
211
212	pd = ah->pd;
213	ret = ah->device->destroy_ah(ah);
214	if (!ret)
215		atomic_dec(&pd->usecnt);
216
217	return ret;
218}
219EXPORT_SYMBOL(ib_destroy_ah);
220
221/* Shared receive queues */
222
223struct ib_srq *ib_create_srq(struct ib_pd *pd,
224			     struct ib_srq_init_attr *srq_init_attr)
225{
226	struct ib_srq *srq;
227
228	if (!pd->device->create_srq)
229		return ERR_PTR(-ENOSYS);
230
231	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
232
233	if (!IS_ERR(srq)) {
234		srq->device    	   = pd->device;
235		srq->pd        	   = pd;
236		srq->uobject       = NULL;
237		srq->event_handler = srq_init_attr->event_handler;
238		srq->srq_context   = srq_init_attr->srq_context;
239		atomic_inc(&pd->usecnt);
240		atomic_set(&srq->usecnt, 0);
241	}
242
243	return srq;
244}
245EXPORT_SYMBOL(ib_create_srq);
246
247int ib_modify_srq(struct ib_srq *srq,
248		  struct ib_srq_attr *srq_attr,
249		  enum ib_srq_attr_mask srq_attr_mask)
250{
251	return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
252}
253EXPORT_SYMBOL(ib_modify_srq);
254
255int ib_query_srq(struct ib_srq *srq,
256		 struct ib_srq_attr *srq_attr)
257{
258	return srq->device->query_srq ?
259		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
260}
261EXPORT_SYMBOL(ib_query_srq);
262
263int ib_destroy_srq(struct ib_srq *srq)
264{
265	struct ib_pd *pd;
266	int ret;
267
268	if (atomic_read(&srq->usecnt))
269		return -EBUSY;
270
271	pd = srq->pd;
272
273	ret = srq->device->destroy_srq(srq);
274	if (!ret)
275		atomic_dec(&pd->usecnt);
276
277	return ret;
278}
279EXPORT_SYMBOL(ib_destroy_srq);
280
281/* Queue pairs */
282
283struct ib_qp *ib_create_qp(struct ib_pd *pd,
284			   struct ib_qp_init_attr *qp_init_attr)
285{
286	struct ib_qp *qp;
287
288	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
289
290	if (!IS_ERR(qp)) {
291		qp->device     	  = pd->device;
292		qp->pd         	  = pd;
293		qp->send_cq    	  = qp_init_attr->send_cq;
294		qp->recv_cq    	  = qp_init_attr->recv_cq;
295		qp->srq	       	  = qp_init_attr->srq;
296		qp->uobject       = NULL;
297		qp->event_handler = qp_init_attr->event_handler;
298		qp->qp_context    = qp_init_attr->qp_context;
299		qp->qp_type	  = qp_init_attr->qp_type;
300		atomic_inc(&pd->usecnt);
301		atomic_inc(&qp_init_attr->send_cq->usecnt);
302		atomic_inc(&qp_init_attr->recv_cq->usecnt);
303		if (qp_init_attr->srq)
304			atomic_inc(&qp_init_attr->srq->usecnt);
305	}
306
307	return qp;
308}
309EXPORT_SYMBOL(ib_create_qp);
310
311static const struct {
312	int			valid;
313	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
314	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
315} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
316	[IB_QPS_RESET] = {
317		[IB_QPS_RESET] = { .valid = 1 },
318		[IB_QPS_ERR]   = { .valid = 1 },
319		[IB_QPS_INIT]  = {
320			.valid = 1,
321			.req_param = {
322				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
323						IB_QP_PORT			|
324						IB_QP_QKEY),
325				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
326						IB_QP_PORT			|
327						IB_QP_ACCESS_FLAGS),
328				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
329						IB_QP_PORT			|
330						IB_QP_ACCESS_FLAGS),
331				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
332						IB_QP_QKEY),
333				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
334						IB_QP_QKEY),
335			}
336		},
337	},
338	[IB_QPS_INIT]  = {
339		[IB_QPS_RESET] = { .valid = 1 },
340		[IB_QPS_ERR] =   { .valid = 1 },
341		[IB_QPS_INIT]  = {
342			.valid = 1,
343			.opt_param = {
344				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
345						IB_QP_PORT			|
346						IB_QP_QKEY),
347				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
348						IB_QP_PORT			|
349						IB_QP_ACCESS_FLAGS),
350				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
351						IB_QP_PORT			|
352						IB_QP_ACCESS_FLAGS),
353				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
354						IB_QP_QKEY),
355				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
356						IB_QP_QKEY),
357			}
358		},
359		[IB_QPS_RTR]   = {
360			.valid = 1,
361			.req_param = {
362				[IB_QPT_UC]  = (IB_QP_AV			|
363						IB_QP_PATH_MTU			|
364						IB_QP_DEST_QPN			|
365						IB_QP_RQ_PSN),
366				[IB_QPT_RC]  = (IB_QP_AV			|
367						IB_QP_PATH_MTU			|
368						IB_QP_DEST_QPN			|
369						IB_QP_RQ_PSN			|
370						IB_QP_MAX_DEST_RD_ATOMIC	|
371						IB_QP_MIN_RNR_TIMER),
372			},
373			.opt_param = {
374				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
375						 IB_QP_QKEY),
376				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
377						 IB_QP_ACCESS_FLAGS		|
378						 IB_QP_PKEY_INDEX),
379				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
380						 IB_QP_ACCESS_FLAGS		|
381						 IB_QP_PKEY_INDEX),
382				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
383						 IB_QP_QKEY),
384				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
385						 IB_QP_QKEY),
386			 }
387		}
388	},
389	[IB_QPS_RTR]   = {
390		[IB_QPS_RESET] = { .valid = 1 },
391		[IB_QPS_ERR] =   { .valid = 1 },
392		[IB_QPS_RTS]   = {
393			.valid = 1,
394			.req_param = {
395				[IB_QPT_UD]  = IB_QP_SQ_PSN,
396				[IB_QPT_UC]  = IB_QP_SQ_PSN,
397				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
398						IB_QP_RETRY_CNT			|
399						IB_QP_RNR_RETRY			|
400						IB_QP_SQ_PSN			|
401						IB_QP_MAX_QP_RD_ATOMIC),
402				[IB_QPT_SMI] = IB_QP_SQ_PSN,
403				[IB_QPT_GSI] = IB_QP_SQ_PSN,
404			},
405			.opt_param = {
406				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
407						 IB_QP_QKEY),
408				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
409						 IB_QP_ALT_PATH			|
410						 IB_QP_ACCESS_FLAGS		|
411						 IB_QP_PATH_MIG_STATE),
412				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
413						 IB_QP_ALT_PATH			|
414						 IB_QP_ACCESS_FLAGS		|
415						 IB_QP_MIN_RNR_TIMER		|
416						 IB_QP_PATH_MIG_STATE),
417				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
418						 IB_QP_QKEY),
419				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
420						 IB_QP_QKEY),
421			 }
422		}
423	},
424	[IB_QPS_RTS]   = {
425		[IB_QPS_RESET] = { .valid = 1 },
426		[IB_QPS_ERR] =   { .valid = 1 },
427		[IB_QPS_RTS]   = {
428			.valid = 1,
429			.opt_param = {
430				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
431						IB_QP_QKEY),
432				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
433						IB_QP_ACCESS_FLAGS		|
434						IB_QP_ALT_PATH			|
435						IB_QP_PATH_MIG_STATE),
436				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
437						IB_QP_ACCESS_FLAGS		|
438						IB_QP_ALT_PATH			|
439						IB_QP_PATH_MIG_STATE		|
440						IB_QP_MIN_RNR_TIMER),
441				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
442						IB_QP_QKEY),
443				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
444						IB_QP_QKEY),
445			}
446		},
447		[IB_QPS_SQD]   = {
448			.valid = 1,
449			.opt_param = {
450				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
451				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
452				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
453				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
454				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
455			}
456		},
457	},
458	[IB_QPS_SQD]   = {
459		[IB_QPS_RESET] = { .valid = 1 },
460		[IB_QPS_ERR] =   { .valid = 1 },
461		[IB_QPS_RTS]   = {
462			.valid = 1,
463			.opt_param = {
464				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
465						IB_QP_QKEY),
466				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
467						IB_QP_ALT_PATH			|
468						IB_QP_ACCESS_FLAGS		|
469						IB_QP_PATH_MIG_STATE),
470				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
471						IB_QP_ALT_PATH			|
472						IB_QP_ACCESS_FLAGS		|
473						IB_QP_MIN_RNR_TIMER		|
474						IB_QP_PATH_MIG_STATE),
475				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
476						IB_QP_QKEY),
477				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
478						IB_QP_QKEY),
479			}
480		},
481		[IB_QPS_SQD]   = {
482			.valid = 1,
483			.opt_param = {
484				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
485						IB_QP_QKEY),
486				[IB_QPT_UC]  = (IB_QP_AV			|
487						IB_QP_ALT_PATH			|
488						IB_QP_ACCESS_FLAGS		|
489						IB_QP_PKEY_INDEX		|
490						IB_QP_PATH_MIG_STATE),
491				[IB_QPT_RC]  = (IB_QP_PORT			|
492						IB_QP_AV			|
493						IB_QP_TIMEOUT			|
494						IB_QP_RETRY_CNT			|
495						IB_QP_RNR_RETRY			|
496						IB_QP_MAX_QP_RD_ATOMIC		|
497						IB_QP_MAX_DEST_RD_ATOMIC	|
498						IB_QP_ALT_PATH			|
499						IB_QP_ACCESS_FLAGS		|
500						IB_QP_PKEY_INDEX		|
501						IB_QP_MIN_RNR_TIMER		|
502						IB_QP_PATH_MIG_STATE),
503				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
504						IB_QP_QKEY),
505				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
506						IB_QP_QKEY),
507			}
508		}
509	},
510	[IB_QPS_SQE]   = {
511		[IB_QPS_RESET] = { .valid = 1 },
512		[IB_QPS_ERR] =   { .valid = 1 },
513		[IB_QPS_RTS]   = {
514			.valid = 1,
515			.opt_param = {
516				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
517						IB_QP_QKEY),
518				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
519						IB_QP_ACCESS_FLAGS),
520				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
521						IB_QP_QKEY),
522				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
523						IB_QP_QKEY),
524			}
525		}
526	},
527	[IB_QPS_ERR] = {
528		[IB_QPS_RESET] = { .valid = 1 },
529		[IB_QPS_ERR] =   { .valid = 1 }
530	}
531};
532
533int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
534		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
535{
536	enum ib_qp_attr_mask req_param, opt_param;
537
538	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
539	    next_state < 0 || next_state > IB_QPS_ERR)
540		return 0;
541
542	if (mask & IB_QP_CUR_STATE  &&
543	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
544	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
545		return 0;
546
547	if (!qp_state_table[cur_state][next_state].valid)
548		return 0;
549
550	req_param = qp_state_table[cur_state][next_state].req_param[type];
551	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
552
553	if ((mask & req_param) != req_param)
554		return 0;
555
556	if (mask & ~(req_param | opt_param | IB_QP_STATE))
557		return 0;
558
559	return 1;
560}
561EXPORT_SYMBOL(ib_modify_qp_is_ok);
562
563int ib_modify_qp(struct ib_qp *qp,
564		 struct ib_qp_attr *qp_attr,
565		 int qp_attr_mask)
566{
567	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
568}
569EXPORT_SYMBOL(ib_modify_qp);
570
571int ib_query_qp(struct ib_qp *qp,
572		struct ib_qp_attr *qp_attr,
573		int qp_attr_mask,
574		struct ib_qp_init_attr *qp_init_attr)
575{
576	return qp->device->query_qp ?
577		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
578		-ENOSYS;
579}
580EXPORT_SYMBOL(ib_query_qp);
581
582int ib_destroy_qp(struct ib_qp *qp)
583{
584	struct ib_pd *pd;
585	struct ib_cq *scq, *rcq;
586	struct ib_srq *srq;
587	int ret;
588
589	pd  = qp->pd;
590	scq = qp->send_cq;
591	rcq = qp->recv_cq;
592	srq = qp->srq;
593
594	ret = qp->device->destroy_qp(qp);
595	if (!ret) {
596		atomic_dec(&pd->usecnt);
597		atomic_dec(&scq->usecnt);
598		atomic_dec(&rcq->usecnt);
599		if (srq)
600			atomic_dec(&srq->usecnt);
601	}
602
603	return ret;
604}
605EXPORT_SYMBOL(ib_destroy_qp);
606
607/* Completion queues */
608
609struct ib_cq *ib_create_cq(struct ib_device *device,
610			   ib_comp_handler comp_handler,
611			   void (*event_handler)(struct ib_event *, void *),
612			   void *cq_context, int cqe, int comp_vector)
613{
614	struct ib_cq *cq;
615
616	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
617
618	if (!IS_ERR(cq)) {
619		cq->device        = device;
620		cq->uobject       = NULL;
621		cq->comp_handler  = comp_handler;
622		cq->event_handler = event_handler;
623		cq->cq_context    = cq_context;
624		atomic_set(&cq->usecnt, 0);
625	}
626
627	return cq;
628}
629EXPORT_SYMBOL(ib_create_cq);
630
631int ib_destroy_cq(struct ib_cq *cq)
632{
633	if (atomic_read(&cq->usecnt))
634		return -EBUSY;
635
636	return cq->device->destroy_cq(cq);
637}
638EXPORT_SYMBOL(ib_destroy_cq);
639
640int ib_resize_cq(struct ib_cq *cq, int cqe)
641{
642	return cq->device->resize_cq ?
643		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
644}
645EXPORT_SYMBOL(ib_resize_cq);
646
647/* Memory regions */
648
649struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
650{
651	struct ib_mr *mr;
652
653	mr = pd->device->get_dma_mr(pd, mr_access_flags);
654
655	if (!IS_ERR(mr)) {
656		mr->device  = pd->device;
657		mr->pd      = pd;
658		mr->uobject = NULL;
659		atomic_inc(&pd->usecnt);
660		atomic_set(&mr->usecnt, 0);
661	}
662
663	return mr;
664}
665EXPORT_SYMBOL(ib_get_dma_mr);
666
667struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
668			     struct ib_phys_buf *phys_buf_array,
669			     int num_phys_buf,
670			     int mr_access_flags,
671			     u64 *iova_start)
672{
673	struct ib_mr *mr;
674
675	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
676				     mr_access_flags, iova_start);
677
678	if (!IS_ERR(mr)) {
679		mr->device  = pd->device;
680		mr->pd      = pd;
681		mr->uobject = NULL;
682		atomic_inc(&pd->usecnt);
683		atomic_set(&mr->usecnt, 0);
684	}
685
686	return mr;
687}
688EXPORT_SYMBOL(ib_reg_phys_mr);
689
690int ib_rereg_phys_mr(struct ib_mr *mr,
691		     int mr_rereg_mask,
692		     struct ib_pd *pd,
693		     struct ib_phys_buf *phys_buf_array,
694		     int num_phys_buf,
695		     int mr_access_flags,
696		     u64 *iova_start)
697{
698	struct ib_pd *old_pd;
699	int ret;
700
701	if (!mr->device->rereg_phys_mr)
702		return -ENOSYS;
703
704	if (atomic_read(&mr->usecnt))
705		return -EBUSY;
706
707	old_pd = mr->pd;
708
709	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
710					phys_buf_array, num_phys_buf,
711					mr_access_flags, iova_start);
712
713	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
714		atomic_dec(&old_pd->usecnt);
715		atomic_inc(&pd->usecnt);
716	}
717
718	return ret;
719}
720EXPORT_SYMBOL(ib_rereg_phys_mr);
721
722int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
723{
724	return mr->device->query_mr ?
725		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
726}
727EXPORT_SYMBOL(ib_query_mr);
728
729int ib_dereg_mr(struct ib_mr *mr)
730{
731	struct ib_pd *pd;
732	int ret;
733
734	if (atomic_read(&mr->usecnt))
735		return -EBUSY;
736
737	pd = mr->pd;
738	ret = mr->device->dereg_mr(mr);
739	if (!ret)
740		atomic_dec(&pd->usecnt);
741
742	return ret;
743}
744EXPORT_SYMBOL(ib_dereg_mr);
745
746/* Memory windows */
747
748struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
749{
750	struct ib_mw *mw;
751
752	if (!pd->device->alloc_mw)
753		return ERR_PTR(-ENOSYS);
754
755	mw = pd->device->alloc_mw(pd);
756	if (!IS_ERR(mw)) {
757		mw->device  = pd->device;
758		mw->pd      = pd;
759		mw->uobject = NULL;
760		atomic_inc(&pd->usecnt);
761	}
762
763	return mw;
764}
765EXPORT_SYMBOL(ib_alloc_mw);
766
767int ib_dealloc_mw(struct ib_mw *mw)
768{
769	struct ib_pd *pd;
770	int ret;
771
772	pd = mw->pd;
773	ret = mw->device->dealloc_mw(mw);
774	if (!ret)
775		atomic_dec(&pd->usecnt);
776
777	return ret;
778}
779EXPORT_SYMBOL(ib_dealloc_mw);
780
781/* "Fast" memory regions */
782
783struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
784			    int mr_access_flags,
785			    struct ib_fmr_attr *fmr_attr)
786{
787	struct ib_fmr *fmr;
788
789	if (!pd->device->alloc_fmr)
790		return ERR_PTR(-ENOSYS);
791
792	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
793	if (!IS_ERR(fmr)) {
794		fmr->device = pd->device;
795		fmr->pd     = pd;
796		atomic_inc(&pd->usecnt);
797	}
798
799	return fmr;
800}
801EXPORT_SYMBOL(ib_alloc_fmr);
802
803int ib_unmap_fmr(struct list_head *fmr_list)
804{
805	struct ib_fmr *fmr;
806
807	if (list_empty(fmr_list))
808		return 0;
809
810	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
811	return fmr->device->unmap_fmr(fmr_list);
812}
813EXPORT_SYMBOL(ib_unmap_fmr);
814
815int ib_dealloc_fmr(struct ib_fmr *fmr)
816{
817	struct ib_pd *pd;
818	int ret;
819
820	pd = fmr->pd;
821	ret = fmr->device->dealloc_fmr(fmr);
822	if (!ret)
823		atomic_dec(&pd->usecnt);
824
825	return ret;
826}
827EXPORT_SYMBOL(ib_dealloc_fmr);
828
829/* Multicast groups */
830
831int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
832{
833	if (!qp->device->attach_mcast)
834		return -ENOSYS;
835	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
836		return -EINVAL;
837
838	return qp->device->attach_mcast(qp, gid, lid);
839}
840EXPORT_SYMBOL(ib_attach_mcast);
841
842int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
843{
844	if (!qp->device->detach_mcast)
845		return -ENOSYS;
846	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
847		return -EINVAL;
848
849	return qp->device->detach_mcast(qp, gid, lid);
850}
851EXPORT_SYMBOL(ib_detach_mcast);
852