1/*
2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#if HAVE_CONFIG_H
35#  include <config.h>
36#endif /* HAVE_CONFIG_H */
37
38#include <stdlib.h>
39#include <stdio.h>
40#include <strings.h>
41#include <pthread.h>
42#include <errno.h>
43#include <netinet/in.h>
44
45#include "mthca.h"
46#include "mthca-abi.h"
47
48int mthca_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
49{
50	struct ibv_query_device cmd;
51	uint64_t raw_fw_ver;
52	unsigned major, minor, sub_minor;
53	int ret;
54
55	ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
56	if (ret)
57		return ret;
58
59	major     = (raw_fw_ver >> 32) & 0xffff;
60	minor     = (raw_fw_ver >> 16) & 0xffff;
61	sub_minor = raw_fw_ver & 0xffff;
62
63	snprintf(attr->fw_ver, sizeof attr->fw_ver,
64		 "%d.%d.%d", major, minor, sub_minor);
65
66	return 0;
67}
68
69int mthca_query_port(struct ibv_context *context, uint8_t port,
70		     struct ibv_port_attr *attr)
71{
72	struct ibv_query_port cmd;
73
74	return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
75}
76
77struct ibv_pd *mthca_alloc_pd(struct ibv_context *context)
78{
79	struct ibv_alloc_pd        cmd;
80	struct mthca_alloc_pd_resp resp;
81	struct mthca_pd           *pd;
82
83	pd = malloc(sizeof *pd);
84	if (!pd)
85		return NULL;
86
87	if (!mthca_is_memfree(context)) {
88		pd->ah_list = NULL;
89		if (pthread_mutex_init(&pd->ah_mutex, NULL)) {
90			free(pd);
91			return NULL;
92		}
93	}
94
95	if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
96			     &resp.ibv_resp, sizeof resp)) {
97		free(pd);
98		return NULL;
99	}
100
101	pd->pdn = resp.pdn;
102
103	return &pd->ibv_pd;
104}
105
106int mthca_free_pd(struct ibv_pd *pd)
107{
108	int ret;
109
110	ret = ibv_cmd_dealloc_pd(pd);
111	if (ret)
112		return ret;
113
114	free(to_mpd(pd));
115	return 0;
116}
117
118static struct ibv_mr *__mthca_reg_mr(struct ibv_pd *pd, void *addr,
119				     size_t length, uint64_t hca_va,
120				     enum ibv_access_flags access,
121				     int dma_sync)
122{
123	struct ibv_mr *mr;
124	struct mthca_reg_mr cmd;
125	int ret;
126
127	/*
128	 * Old kernels just ignore the extra data we pass in with the
129	 * reg_mr command structure, so there's no need to add an ABI
130	 * version check here (and indeed the kernel ABI was not
131	 * incremented due to this change).
132	 */
133	cmd.mr_attrs = dma_sync ? MTHCA_MR_DMASYNC : 0;
134	cmd.reserved = 0;
135
136	mr = malloc(sizeof *mr);
137	if (!mr)
138		return NULL;
139
140#ifdef IBV_CMD_REG_MR_HAS_RESP_PARAMS
141	{
142		struct ibv_reg_mr_resp resp;
143
144		ret = ibv_cmd_reg_mr(pd, addr, length, hca_va, access, mr,
145				     &cmd.ibv_cmd, sizeof cmd, &resp, sizeof resp);
146	}
147#else
148	ret = ibv_cmd_reg_mr(pd, addr, length, hca_va, access, mr,
149			     &cmd.ibv_cmd, sizeof cmd);
150#endif
151	if (ret) {
152		free(mr);
153		return NULL;
154	}
155
156	return mr;
157}
158
159struct ibv_mr *mthca_reg_mr(struct ibv_pd *pd, void *addr,
160			    size_t length, enum ibv_access_flags access)
161{
162	return __mthca_reg_mr(pd, addr, length, (uintptr_t) addr, access, 0);
163}
164
165int mthca_dereg_mr(struct ibv_mr *mr)
166{
167	int ret;
168
169	ret = ibv_cmd_dereg_mr(mr);
170	if (ret)
171		return ret;
172
173	free(mr);
174	return 0;
175}
176
177static int align_cq_size(int cqe)
178{
179	int nent;
180
181	for (nent = 1; nent <= cqe; nent <<= 1)
182		; /* nothing */
183
184	return nent;
185}
186
187struct ibv_cq *mthca_create_cq(struct ibv_context *context, int cqe,
188			       struct ibv_comp_channel *channel,
189			       int comp_vector)
190{
191	struct mthca_create_cq      cmd;
192	struct mthca_create_cq_resp resp;
193	struct mthca_cq      	   *cq;
194	int                  	    ret;
195
196	/* Sanity check CQ size before proceeding */
197	if (cqe > 131072)
198		return NULL;
199
200	cq = malloc(sizeof *cq);
201	if (!cq)
202		return NULL;
203
204	cq->cons_index = 0;
205
206	if (pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE))
207		goto err;
208
209	cqe = align_cq_size(cqe);
210	if (mthca_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe))
211		goto err;
212
213	cq->mr = __mthca_reg_mr(to_mctx(context)->pd, cq->buf.buf,
214				cqe * MTHCA_CQ_ENTRY_SIZE,
215				0, IBV_ACCESS_LOCAL_WRITE, 1);
216	if (!cq->mr)
217		goto err_buf;
218
219	cq->mr->context = context;
220
221	if (mthca_is_memfree(context)) {
222		cq->arm_sn          = 1;
223		cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
224						     MTHCA_DB_TYPE_CQ_SET_CI,
225						     &cq->set_ci_db);
226		if (cq->set_ci_db_index < 0)
227			goto err_unreg;
228
229		cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
230						     MTHCA_DB_TYPE_CQ_ARM,
231						     &cq->arm_db);
232		if (cq->arm_db_index < 0)
233			goto err_set_db;
234
235		cmd.arm_db_page  = db_align(cq->arm_db);
236		cmd.set_db_page  = db_align(cq->set_ci_db);
237		cmd.arm_db_index = cq->arm_db_index;
238		cmd.set_db_index = cq->set_ci_db_index;
239	} else {
240		cmd.arm_db_page  = cmd.set_db_page  =
241		cmd.arm_db_index = cmd.set_db_index = 0;
242	}
243
244	cmd.lkey   = cq->mr->lkey;
245	cmd.pdn    = to_mpd(to_mctx(context)->pd)->pdn;
246	ret = ibv_cmd_create_cq(context, cqe - 1, channel, comp_vector,
247				&cq->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
248				&resp.ibv_resp, sizeof resp);
249	if (ret)
250		goto err_arm_db;
251
252	cq->cqn = resp.cqn;
253
254	if (mthca_is_memfree(context)) {
255		mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
256		mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
257	}
258
259	return &cq->ibv_cq;
260
261err_arm_db:
262	if (mthca_is_memfree(context))
263		mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
264			      cq->arm_db_index);
265
266err_set_db:
267	if (mthca_is_memfree(context))
268		mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
269			      cq->set_ci_db_index);
270
271err_unreg:
272	mthca_dereg_mr(cq->mr);
273
274err_buf:
275	mthca_free_buf(&cq->buf);
276
277err:
278	free(cq);
279
280	return NULL;
281}
282
283int mthca_resize_cq(struct ibv_cq *ibcq, int cqe)
284{
285	struct mthca_cq *cq = to_mcq(ibcq);
286	struct mthca_resize_cq cmd;
287	struct ibv_mr *mr;
288	struct mthca_buf buf;
289	int old_cqe;
290	int ret;
291
292	/* Sanity check CQ size before proceeding */
293	if (cqe > 131072)
294		return EINVAL;
295
296	pthread_spin_lock(&cq->lock);
297
298	cqe = align_cq_size(cqe);
299	if (cqe == ibcq->cqe + 1) {
300		ret = 0;
301		goto out;
302	}
303
304	ret = mthca_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
305	if (ret)
306		goto out;
307
308	mr = __mthca_reg_mr(to_mctx(ibcq->context)->pd, buf.buf,
309			    cqe * MTHCA_CQ_ENTRY_SIZE,
310			    0, IBV_ACCESS_LOCAL_WRITE, 1);
311	if (!mr) {
312		mthca_free_buf(&buf);
313		ret = ENOMEM;
314		goto out;
315	}
316
317	mr->context = ibcq->context;
318
319	old_cqe = ibcq->cqe;
320
321	cmd.lkey = mr->lkey;
322#ifdef IBV_CMD_RESIZE_CQ_HAS_RESP_PARAMS
323	{
324		struct ibv_resize_cq_resp resp;
325		ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
326					&resp, sizeof resp);
327	}
328#else
329	ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd);
330#endif
331	if (ret) {
332		mthca_dereg_mr(mr);
333		mthca_free_buf(&buf);
334		goto out;
335	}
336
337	mthca_cq_resize_copy_cqes(cq, buf.buf, old_cqe);
338
339	mthca_dereg_mr(cq->mr);
340	mthca_free_buf(&cq->buf);
341
342	cq->buf = buf;
343	cq->mr  = mr;
344
345out:
346	pthread_spin_unlock(&cq->lock);
347	return ret;
348}
349
350int mthca_destroy_cq(struct ibv_cq *cq)
351{
352	int ret;
353
354	ret = ibv_cmd_destroy_cq(cq);
355	if (ret)
356		return ret;
357
358	if (mthca_is_memfree(cq->context)) {
359		mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
360			      to_mcq(cq)->set_ci_db_index);
361		mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
362			      to_mcq(cq)->arm_db_index);
363	}
364
365	mthca_dereg_mr(to_mcq(cq)->mr);
366	mthca_free_buf(&to_mcq(cq)->buf);
367	free(to_mcq(cq));
368
369	return 0;
370}
371
372static int align_queue_size(struct ibv_context *context, int size, int spare)
373{
374	int ret;
375
376	/*
377	 * If someone asks for a 0-sized queue, presumably they're not
378	 * going to use it.  So don't mess with their size.
379	 */
380	if (!size)
381		return 0;
382
383	if (mthca_is_memfree(context)) {
384		for (ret = 1; ret < size + spare; ret <<= 1)
385			; /* nothing */
386
387		return ret;
388	} else
389		return size + spare;
390}
391
392struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
393				 struct ibv_srq_init_attr *attr)
394{
395	struct mthca_create_srq      cmd;
396	struct mthca_create_srq_resp resp;
397	struct mthca_srq            *srq;
398	int                          ret;
399
400	/* Sanity check SRQ size before proceeding */
401	if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
402		return NULL;
403
404	srq = malloc(sizeof *srq);
405	if (!srq)
406		return NULL;
407
408	if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
409		goto err;
410
411	srq->max     = align_queue_size(pd->context, attr->attr.max_wr, 1);
412	srq->max_gs  = attr->attr.max_sge;
413	srq->counter = 0;
414
415	if (mthca_alloc_srq_buf(pd, &attr->attr, srq))
416		goto err;
417
418	srq->mr = __mthca_reg_mr(pd, srq->buf.buf, srq->buf_size, 0, 0, 0);
419	if (!srq->mr)
420		goto err_free;
421
422	srq->mr->context = pd->context;
423
424	if (mthca_is_memfree(pd->context)) {
425		srq->db_index = mthca_alloc_db(to_mctx(pd->context)->db_tab,
426					       MTHCA_DB_TYPE_SRQ, &srq->db);
427		if (srq->db_index < 0)
428			goto err_unreg;
429
430		cmd.db_page  = db_align(srq->db);
431		cmd.db_index = srq->db_index;
432	} else {
433		cmd.db_page  = cmd.db_index = 0;
434	}
435
436	cmd.lkey = srq->mr->lkey;
437
438	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
439				 &cmd.ibv_cmd, sizeof cmd,
440				 &resp.ibv_resp, sizeof resp);
441	if (ret)
442		goto err_db;
443
444	srq->srqn = resp.srqn;
445
446	if (mthca_is_memfree(pd->context))
447		mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn);
448
449	return &srq->ibv_srq;
450
451err_db:
452	if (mthca_is_memfree(pd->context))
453		mthca_free_db(to_mctx(pd->context)->db_tab, MTHCA_DB_TYPE_SRQ,
454			      srq->db_index);
455
456err_unreg:
457	mthca_dereg_mr(srq->mr);
458
459err_free:
460	free(srq->wrid);
461	mthca_free_buf(&srq->buf);
462
463err:
464	free(srq);
465
466	return NULL;
467}
468
469int mthca_modify_srq(struct ibv_srq *srq,
470		     struct ibv_srq_attr *attr,
471		     enum ibv_srq_attr_mask attr_mask)
472{
473	struct ibv_modify_srq cmd;
474
475	return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
476}
477
478int mthca_query_srq(struct ibv_srq *srq,
479		    struct ibv_srq_attr *attr)
480{
481	struct ibv_query_srq cmd;
482
483	return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
484}
485
486int mthca_destroy_srq(struct ibv_srq *srq)
487{
488	int ret;
489
490	ret = ibv_cmd_destroy_srq(srq);
491	if (ret)
492		return ret;
493
494	if (mthca_is_memfree(srq->context))
495		mthca_free_db(to_mctx(srq->context)->db_tab, MTHCA_DB_TYPE_SRQ,
496			      to_msrq(srq)->db_index);
497
498	mthca_dereg_mr(to_msrq(srq)->mr);
499
500	mthca_free_buf(&to_msrq(srq)->buf);
501	free(to_msrq(srq)->wrid);
502	free(to_msrq(srq));
503
504	return 0;
505}
506
507struct ibv_qp *mthca_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
508{
509	struct mthca_create_qp    cmd;
510	struct ibv_create_qp_resp resp;
511	struct mthca_qp          *qp;
512	int                       ret;
513
514	/* Sanity check QP size before proceeding */
515	if (attr->cap.max_send_wr     > 65536 ||
516	    attr->cap.max_recv_wr     > 65536 ||
517	    attr->cap.max_send_sge    > 64    ||
518	    attr->cap.max_recv_sge    > 64    ||
519	    attr->cap.max_inline_data > 1024)
520		return NULL;
521
522	qp = malloc(sizeof *qp);
523	if (!qp)
524		return NULL;
525
526	qp->sq.max = align_queue_size(pd->context, attr->cap.max_send_wr, 0);
527	qp->rq.max = align_queue_size(pd->context, attr->cap.max_recv_wr, 0);
528
529	if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
530		goto err;
531
532	mthca_init_qp_indices(qp);
533
534	if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
535	    pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
536		goto err_free;
537
538	qp->mr = __mthca_reg_mr(pd, qp->buf.buf, qp->buf_size, 0, 0, 0);
539	if (!qp->mr)
540		goto err_free;
541
542	qp->mr->context = pd->context;
543
544	cmd.lkey     = qp->mr->lkey;
545	cmd.reserved = 0;
546
547	if (mthca_is_memfree(pd->context)) {
548		qp->sq.db_index = mthca_alloc_db(to_mctx(pd->context)->db_tab,
549						 MTHCA_DB_TYPE_SQ,
550						 &qp->sq.db);
551		if (qp->sq.db_index < 0)
552			goto err_unreg;
553
554		qp->rq.db_index = mthca_alloc_db(to_mctx(pd->context)->db_tab,
555						 MTHCA_DB_TYPE_RQ,
556						 &qp->rq.db);
557		if (qp->rq.db_index < 0)
558			goto err_sq_db;
559
560		cmd.sq_db_page  = db_align(qp->sq.db);
561		cmd.rq_db_page  = db_align(qp->rq.db);
562		cmd.sq_db_index = qp->sq.db_index;
563		cmd.rq_db_index = qp->rq.db_index;
564	} else {
565		cmd.sq_db_page  = cmd.rq_db_page  =
566		cmd.sq_db_index = cmd.rq_db_index = 0;
567	}
568
569	pthread_mutex_lock(&to_mctx(pd->context)->qp_table_mutex);
570	ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
571				&resp, sizeof resp);
572	if (ret)
573		goto err_rq_db;
574
575	if (mthca_is_memfree(pd->context)) {
576		mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
577		mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
578	}
579
580	ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
581	if (ret)
582		goto err_destroy;
583	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
584
585	qp->sq.max 	    = attr->cap.max_send_wr;
586	qp->rq.max 	    = attr->cap.max_recv_wr;
587	qp->sq.max_gs 	    = attr->cap.max_send_sge;
588	qp->rq.max_gs 	    = attr->cap.max_recv_sge;
589	qp->max_inline_data = attr->cap.max_inline_data;
590
591	return &qp->ibv_qp;
592
593err_destroy:
594	ibv_cmd_destroy_qp(&qp->ibv_qp);
595
596err_rq_db:
597	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
598	if (mthca_is_memfree(pd->context))
599		mthca_free_db(to_mctx(pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
600			      qp->rq.db_index);
601
602err_sq_db:
603	if (mthca_is_memfree(pd->context))
604		mthca_free_db(to_mctx(pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
605			      qp->sq.db_index);
606
607err_unreg:
608	mthca_dereg_mr(qp->mr);
609
610err_free:
611	free(qp->wrid);
612	mthca_free_buf(&qp->buf);
613
614err:
615	free(qp);
616
617	return NULL;
618}
619
620int mthca_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
621		   enum ibv_qp_attr_mask attr_mask,
622		   struct ibv_qp_init_attr *init_attr)
623{
624	struct ibv_query_qp cmd;
625
626	return ibv_cmd_query_qp(qp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
627}
628
629int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
630		    enum ibv_qp_attr_mask attr_mask)
631{
632	struct ibv_modify_qp cmd;
633	int ret;
634
635	ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
636
637	if (!ret		       &&
638	    (attr_mask & IBV_QP_STATE) &&
639	    attr->qp_state == IBV_QPS_RESET) {
640		mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
641			       qp->srq ? to_msrq(qp->srq) : NULL);
642		if (qp->send_cq != qp->recv_cq)
643			mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
644
645		mthca_init_qp_indices(to_mqp(qp));
646
647		if (mthca_is_memfree(qp->context)) {
648			*to_mqp(qp)->sq.db = 0;
649			*to_mqp(qp)->rq.db = 0;
650		}
651	}
652
653	return ret;
654}
655
656static void mthca_lock_cqs(struct ibv_qp *qp)
657{
658	struct mthca_cq *send_cq = to_mcq(qp->send_cq);
659	struct mthca_cq *recv_cq = to_mcq(qp->recv_cq);
660
661	if (send_cq == recv_cq)
662		pthread_spin_lock(&send_cq->lock);
663	else if (send_cq->cqn < recv_cq->cqn) {
664		pthread_spin_lock(&send_cq->lock);
665		pthread_spin_lock(&recv_cq->lock);
666	} else {
667		pthread_spin_lock(&recv_cq->lock);
668		pthread_spin_lock(&send_cq->lock);
669	}
670}
671
672static void mthca_unlock_cqs(struct ibv_qp *qp)
673{
674	struct mthca_cq *send_cq = to_mcq(qp->send_cq);
675	struct mthca_cq *recv_cq = to_mcq(qp->recv_cq);
676
677	if (send_cq == recv_cq)
678		pthread_spin_unlock(&send_cq->lock);
679	else if (send_cq->cqn < recv_cq->cqn) {
680		pthread_spin_unlock(&recv_cq->lock);
681		pthread_spin_unlock(&send_cq->lock);
682	} else {
683		pthread_spin_unlock(&send_cq->lock);
684		pthread_spin_unlock(&recv_cq->lock);
685	}
686}
687
688int mthca_destroy_qp(struct ibv_qp *qp)
689{
690	int ret;
691
692	pthread_mutex_lock(&to_mctx(qp->context)->qp_table_mutex);
693	ret = ibv_cmd_destroy_qp(qp);
694	if (ret) {
695		pthread_mutex_unlock(&to_mctx(qp->context)->qp_table_mutex);
696		return ret;
697	}
698
699	mthca_lock_cqs(qp);
700
701	__mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
702			 qp->srq ? to_msrq(qp->srq) : NULL);
703	if (qp->send_cq != qp->recv_cq)
704		__mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
705
706	mthca_clear_qp(to_mctx(qp->context), qp->qp_num);
707
708	mthca_unlock_cqs(qp);
709	pthread_mutex_unlock(&to_mctx(qp->context)->qp_table_mutex);
710
711	if (mthca_is_memfree(qp->context)) {
712		mthca_free_db(to_mctx(qp->context)->db_tab, MTHCA_DB_TYPE_RQ,
713			      to_mqp(qp)->rq.db_index);
714		mthca_free_db(to_mctx(qp->context)->db_tab, MTHCA_DB_TYPE_SQ,
715			      to_mqp(qp)->sq.db_index);
716	}
717
718	mthca_dereg_mr(to_mqp(qp)->mr);
719	mthca_free_buf(&to_mqp(qp)->buf);
720	free(to_mqp(qp)->wrid);
721	free(to_mqp(qp));
722
723	return 0;
724}
725
726struct ibv_ah *mthca_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
727{
728	struct mthca_ah *ah;
729
730	ah = malloc(sizeof *ah);
731	if (!ah)
732		return NULL;
733
734	if (mthca_alloc_av(to_mpd(pd), attr, ah)) {
735		free(ah);
736		return NULL;
737	}
738
739	return &ah->ibv_ah;
740}
741
742int mthca_destroy_ah(struct ibv_ah *ah)
743{
744	mthca_free_av(to_mah(ah));
745	free(to_mah(ah));
746
747	return 0;
748}
749
750int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
751{
752	return ibv_cmd_attach_mcast(qp, gid, lid);
753}
754
755int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
756{
757	return ibv_cmd_detach_mcast(qp, gid, lid);
758}
759