1/*
2 * Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#if HAVE_CONFIG_H
33#  include <config.h>
34#endif				/* HAVE_CONFIG_H */
35
36#include <stdlib.h>
37#include <stdio.h>
38#include <string.h>
39#include <errno.h>
40#include <pthread.h>
41#include <sys/mman.h>
42#include <netinet/in.h>
43#include <inttypes.h>
44#include <assert.h>
45
46#include "libcxgb4.h"
47#include "cxgb4-abi.h"
48
49#define MASKED(x) (void *)((unsigned long)(x) & c4iw_page_mask)
50
51int c4iw_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
52{
53	struct ibv_query_device cmd;
54	uint64_t raw_fw_ver;
55	unsigned major, minor, sub_minor;
56	int ret;
57
58	ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
59				   sizeof cmd);
60	if (ret)
61		return ret;
62
63	major = (raw_fw_ver >> 32) & 0xffff;
64	minor = (raw_fw_ver >> 16) & 0xffff;
65	sub_minor = raw_fw_ver & 0xffff;
66
67	snprintf(attr->fw_ver, sizeof attr->fw_ver,
68		 "%d.%d.%d", major, minor, sub_minor);
69
70	return 0;
71}
72
73int c4iw_query_port(struct ibv_context *context, uint8_t port,
74		    struct ibv_port_attr *attr)
75{
76	struct ibv_query_port cmd;
77
78	return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
79}
80
81struct ibv_pd *c4iw_alloc_pd(struct ibv_context *context)
82{
83	struct ibv_alloc_pd cmd;
84	struct c4iw_alloc_pd_resp resp;
85	struct c4iw_pd *pd;
86
87	pd = malloc(sizeof *pd);
88	if (!pd)
89		return NULL;
90
91	if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
92			     &resp.ibv_resp, sizeof resp)) {
93		free(pd);
94		return NULL;
95	}
96
97	return &pd->ibv_pd;
98}
99
100int c4iw_free_pd(struct ibv_pd *pd)
101{
102	int ret;
103
104	ret = ibv_cmd_dealloc_pd(pd);
105	if (ret)
106		return ret;
107
108	free(pd);
109	return 0;
110}
111
112static struct ibv_mr *__c4iw_reg_mr(struct ibv_pd *pd, void *addr,
113				    size_t length, uint64_t hca_va,
114				    int access)
115{
116	struct c4iw_mr *mhp;
117	struct ibv_reg_mr cmd;
118	struct ibv_reg_mr_resp resp;
119	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
120
121	mhp = malloc(sizeof *mhp);
122	if (!mhp)
123		return NULL;
124
125	if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
126			   access, &mhp->ibv_mr, &cmd, sizeof cmd,
127			   &resp, sizeof resp)) {
128		free(mhp);
129		return NULL;
130	}
131
132	mhp->va_fbo = hca_va;
133	mhp->len = length;
134
135	PDBG("%s stag 0x%x va_fbo 0x%" PRIx64 " len %d\n",
136	     __func__, mhp->ibv_mr.rkey, mhp->va_fbo, mhp->len);
137
138	pthread_spin_lock(&dev->lock);
139	dev->mmid2ptr[c4iw_mmid(mhp->ibv_mr.lkey)] = mhp;
140	pthread_spin_unlock(&dev->lock);
141	INC_STAT(mr);
142	return &mhp->ibv_mr;
143}
144
145struct ibv_mr *c4iw_reg_mr(struct ibv_pd *pd, void *addr,
146			   size_t length, int access)
147{
148	PDBG("%s addr %p length %ld\n", __func__, addr, length);
149	return __c4iw_reg_mr(pd, addr, length, (uintptr_t) addr, access);
150}
151
152int c4iw_dereg_mr(struct ibv_mr *mr)
153{
154	int ret;
155	struct c4iw_dev *dev = to_c4iw_dev(mr->pd->context->device);
156
157	ret = ibv_cmd_dereg_mr(mr);
158	if (ret)
159		return ret;
160
161	pthread_spin_lock(&dev->lock);
162	dev->mmid2ptr[c4iw_mmid(mr->lkey)] = NULL;
163	pthread_spin_unlock(&dev->lock);
164
165	free(to_c4iw_mr(mr));
166
167	return 0;
168}
169
170struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
171			      struct ibv_comp_channel *channel, int comp_vector)
172{
173	struct ibv_create_cq cmd;
174	struct c4iw_create_cq_resp resp;
175	struct c4iw_cq *chp;
176	struct c4iw_dev *dev = to_c4iw_dev(context->device);
177	int ret;
178
179	chp = calloc(1, sizeof *chp);
180	if (!chp) {
181		return NULL;
182	}
183
184	resp.reserved = 0;
185	ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
186				&chp->ibv_cq, &cmd, sizeof cmd,
187				&resp.ibv_resp, sizeof resp);
188	if (ret)
189		goto err1;
190
191	if (resp.reserved)
192		PDBG("%s c4iw_create_cq_resp reserved field modified by kernel\n",
193		     __FUNCTION__);
194
195	pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
196#ifdef STALL_DETECTION
197	gettimeofday(&chp->time, NULL);
198#endif
199	chp->rhp = dev;
200	chp->cq.qid_mask = resp.qid_mask;
201	chp->cq.cqid = resp.cqid;
202	chp->cq.size = resp.size;
203	chp->cq.memsize = resp.memsize;
204	chp->cq.gen = 1;
205	chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE,
206			     MAP_SHARED, context->cmd_fd, resp.key);
207	if (chp->cq.queue == MAP_FAILED)
208		goto err2;
209
210	chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
211			   context->cmd_fd, resp.gts_key);
212	if (chp->cq.ugts == MAP_FAILED)
213		goto err3;
214
215	if (dev_is_t4(chp->rhp))
216		chp->cq.ugts += 1;
217	else
218		chp->cq.ugts += 5;
219	chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue);
220	if (!chp->cq.sw_queue)
221		goto err4;
222
223	PDBG("%s cqid 0x%x key %" PRIx64 " va %p memsize %lu gts_key %"
224	       PRIx64 " va %p qid_mask 0x%x\n",
225	       __func__, chp->cq.cqid, resp.key, chp->cq.queue,
226	       chp->cq.memsize, resp.gts_key, chp->cq.ugts, chp->cq.qid_mask);
227
228	pthread_spin_lock(&dev->lock);
229	dev->cqid2ptr[chp->cq.cqid] = chp;
230	pthread_spin_unlock(&dev->lock);
231	INC_STAT(cq);
232	return &chp->ibv_cq;
233err4:
234	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
235err3:
236	munmap(chp->cq.queue, chp->cq.memsize);
237err2:
238	(void)ibv_cmd_destroy_cq(&chp->ibv_cq);
239err1:
240	free(chp);
241	return NULL;
242}
243
244int c4iw_resize_cq(struct ibv_cq *ibcq, int cqe)
245{
246#if 0
247	int ret;
248
249	struct ibv_resize_cq cmd;
250	struct ibv_resize_cq_resp resp;
251	ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd, &resp, sizeof resp);
252	PDBG("%s ret %d\n", __func__, ret);
253	return ret;
254#else
255	return -ENOSYS;
256#endif
257}
258
259int c4iw_destroy_cq(struct ibv_cq *ibcq)
260{
261	int ret;
262	struct c4iw_cq *chp = to_c4iw_cq(ibcq);
263	struct c4iw_dev *dev = to_c4iw_dev(ibcq->context->device);
264
265	chp->cq.error = 1;
266	ret = ibv_cmd_destroy_cq(ibcq);
267	if (ret) {
268		return ret;
269	}
270	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
271	munmap(chp->cq.queue, chp->cq.memsize);
272
273	pthread_spin_lock(&dev->lock);
274	dev->cqid2ptr[chp->cq.cqid] = NULL;
275	pthread_spin_unlock(&dev->lock);
276
277	free(chp->cq.sw_queue);
278	free(chp);
279	return 0;
280}
281
282struct ibv_srq *c4iw_create_srq(struct ibv_pd *pd,
283				struct ibv_srq_init_attr *attr)
284{
285	return NULL;
286}
287
288int c4iw_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
289		    int attr_mask)
290{
291	return ENOSYS;
292}
293
294int c4iw_destroy_srq(struct ibv_srq *srq)
295{
296	return ENOSYS;
297}
298
299int c4iw_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
300		       struct ibv_recv_wr **bad_wr)
301{
302	return ENOSYS;
303}
304
305static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
306				   struct ibv_qp_init_attr *attr)
307{
308	struct ibv_create_qp cmd;
309	struct c4iw_create_qp_resp_v0 resp;
310	struct c4iw_qp *qhp;
311	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
312	int ret;
313	void *dbva;
314
315	PDBG("%s enter qp\n", __func__);
316	qhp = calloc(1, sizeof *qhp);
317	if (!qhp)
318		goto err1;
319
320	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
321				sizeof cmd, &resp.ibv_resp, sizeof resp);
322	if (ret)
323		goto err2;
324
325	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
326	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
327	       " qid_mask 0x%x\n",
328		__func__,
329		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
330		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
331
332	qhp->wq.qid_mask = resp.qid_mask;
333	qhp->rhp = dev;
334	qhp->wq.sq.qid = resp.sqid;
335	qhp->wq.sq.size = resp.sq_size;
336	qhp->wq.sq.memsize = resp.sq_memsize;
337	qhp->wq.sq.flags = 0;
338	qhp->wq.rq.msn = 1;
339	qhp->wq.rq.qid = resp.rqid;
340	qhp->wq.rq.size = resp.rq_size;
341	qhp->wq.rq.memsize = resp.rq_memsize;
342	pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
343
344	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
345		    pd->context->cmd_fd, resp.sq_db_gts_key);
346	if (dbva == MAP_FAILED)
347		goto err3;
348
349	qhp->wq.sq.udb = dbva;
350	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
351			    PROT_WRITE, MAP_SHARED,
352			    pd->context->cmd_fd, resp.sq_key);
353	if (qhp->wq.sq.queue == MAP_FAILED)
354		goto err4;
355
356	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
357		    pd->context->cmd_fd, resp.rq_db_gts_key);
358	if (dbva == MAP_FAILED)
359		goto err5;
360	qhp->wq.rq.udb = dbva;
361	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
362			    PROT_WRITE, MAP_SHARED,
363			    pd->context->cmd_fd, resp.rq_key);
364	if (qhp->wq.rq.queue == MAP_FAILED)
365		goto err6;
366
367	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
368	if (!qhp->wq.sq.sw_sq)
369		goto err7;
370
371	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
372	if (!qhp->wq.rq.sw_rq)
373		goto err8;
374
375	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
376	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
377	     __func__,
378	     qhp->wq.sq.udb, qhp->wq.sq.queue,
379	     qhp->wq.sq.size, qhp->wq.sq.memsize,
380	     qhp->wq.rq.udb, qhp->wq.rq.queue,
381	     qhp->wq.rq.size, qhp->wq.rq.memsize);
382
383	qhp->sq_sig_all = attr->sq_sig_all;
384
385	pthread_spin_lock(&dev->lock);
386	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
387	pthread_spin_unlock(&dev->lock);
388	INC_STAT(qp);
389	return &qhp->ibv_qp;
390err8:
391	free(qhp->wq.sq.sw_sq);
392err7:
393	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
394err6:
395	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
396err5:
397	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
398err4:
399	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
400err3:
401	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
402err2:
403	free(qhp);
404err1:
405	return NULL;
406}
407
408static struct ibv_qp *create_qp(struct ibv_pd *pd,
409				struct ibv_qp_init_attr *attr)
410{
411	struct ibv_create_qp cmd;
412	struct c4iw_create_qp_resp resp;
413	struct c4iw_qp *qhp;
414	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
415	struct c4iw_context *ctx = to_c4iw_context(pd->context);
416	int ret;
417	void *dbva;
418
419	PDBG("%s enter qp\n", __func__);
420	qhp = calloc(1, sizeof *qhp);
421	if (!qhp)
422		goto err1;
423
424	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
425				sizeof cmd, &resp.ibv_resp, sizeof resp);
426	if (ret)
427		goto err2;
428
429	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
430	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
431	       " qid_mask 0x%x\n",
432		__func__,
433		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
434		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
435
436	qhp->wq.qid_mask = resp.qid_mask;
437	qhp->rhp = dev;
438	qhp->wq.sq.qid = resp.sqid;
439	qhp->wq.sq.size = resp.sq_size;
440	qhp->wq.sq.memsize = resp.sq_memsize;
441	qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0;
442	qhp->wq.sq.flush_cidx = -1;
443	qhp->wq.rq.msn = 1;
444	qhp->wq.rq.qid = resp.rqid;
445	qhp->wq.rq.size = resp.rq_size;
446	qhp->wq.rq.memsize = resp.rq_memsize;
447	if (ma_wr && resp.sq_memsize < (resp.sq_size + 1) *
448	    sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) {
449		ma_wr = 0;
450		fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. "
451			"MA workaround disabled.\n");
452	}
453	pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
454
455	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
456		    pd->context->cmd_fd, resp.sq_db_gts_key);
457	if (dbva == MAP_FAILED) {
458		PDBG(" %s mmap for sq db failed\n", __func__);
459		abort();
460		goto err3;
461	}
462	qhp->wq.sq.udb = dbva;
463	if (!dev_is_t4(qhp->rhp)) {
464		unsigned long segment_offset = 128 * (qhp->wq.sq.qid & qhp->wq.qid_mask);
465
466		if (segment_offset < c4iw_page_size) {
467			qhp->wq.sq.udb += segment_offset / 4;
468			qhp->wq.sq.wc_reg_available = 1;
469		} else
470			qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask;
471		qhp->wq.sq.udb += 2;
472	}
473
474	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
475			    PROT_READ | PROT_WRITE, MAP_SHARED,
476			    pd->context->cmd_fd, resp.sq_key);
477	if (qhp->wq.sq.queue == MAP_FAILED) {
478		PDBG(" %s mmap for sq q failed size is qhp->wq.sq.memsize %zu \n", __func__, qhp->wq.sq.memsize);
479		abort();
480		goto err4;
481	}
482
483	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
484		    pd->context->cmd_fd, resp.rq_db_gts_key);
485	if (dbva == MAP_FAILED)
486		goto err5;
487	qhp->wq.rq.udb = dbva;
488	if (!dev_is_t4(qhp->rhp)) {
489		unsigned long segment_offset = 128 * (qhp->wq.rq.qid & qhp->wq.qid_mask);
490
491		if (segment_offset < c4iw_page_size) {
492			qhp->wq.rq.udb += segment_offset / 4;
493			qhp->wq.rq.wc_reg_available = 1;
494		} else
495			qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask;
496		qhp->wq.rq.udb += 2;
497	}
498	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
499			    PROT_READ | PROT_WRITE, MAP_SHARED,
500			    pd->context->cmd_fd, resp.rq_key);
501	if (qhp->wq.rq.queue == MAP_FAILED)
502		goto err6;
503
504	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
505	if (!qhp->wq.sq.sw_sq)
506		goto err7;
507
508	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
509	if (!qhp->wq.rq.sw_rq)
510		goto err8;
511
512	if (t4_sq_onchip(&qhp->wq)) {
513		qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
514					  MAP_SHARED, pd->context->cmd_fd,
515					  resp.ma_sync_key);
516		if (qhp->wq.sq.ma_sync == MAP_FAILED)
517			goto err9;
518		qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
519	}
520
521	if (ctx->status_page_size) {
522		qhp->wq.db_offp = &ctx->status_page->db_off;
523	} else {
524		qhp->wq.db_offp =
525			&qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off;
526	}
527
528	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
529	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
530	     __func__,
531	     qhp->wq.sq.udb, qhp->wq.sq.queue,
532	     qhp->wq.sq.size, qhp->wq.sq.memsize,
533	     qhp->wq.rq.udb, qhp->wq.rq.queue,
534	     qhp->wq.rq.size, qhp->wq.rq.memsize);
535
536	qhp->sq_sig_all = attr->sq_sig_all;
537
538	pthread_spin_lock(&dev->lock);
539	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
540	pthread_spin_unlock(&dev->lock);
541	INC_STAT(qp);
542	return &qhp->ibv_qp;
543err9:
544	free(qhp->wq.rq.sw_rq);
545err8:
546	free(qhp->wq.sq.sw_sq);
547err7:
548	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
549err6:
550	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
551err5:
552	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
553err4:
554	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
555err3:
556	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
557err2:
558	free(qhp);
559err1:
560	return NULL;
561}
562
563struct ibv_qp *c4iw_create_qp(struct ibv_pd *pd,
564				     struct ibv_qp_init_attr *attr)
565{
566	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
567
568	if (dev->abi_version == 0)
569		return create_qp_v0(pd, attr);
570	return create_qp(pd, attr);
571}
572
573static void reset_qp(struct c4iw_qp *qhp)
574{
575	PDBG("%s enter qp %p\n", __func__, qhp);
576	qhp->wq.sq.cidx = 0;
577	qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0;
578	qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
579	qhp->wq.sq.oldest_read = NULL;
580	memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
581	memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
582}
583
584int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
585		   int attr_mask)
586{
587	struct ibv_modify_qp cmd;
588	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
589	int ret;
590
591	PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
592	pthread_spin_lock(&qhp->lock);
593	if (t4_wq_in_error(&qhp->wq))
594		c4iw_flush_qp(qhp);
595	ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
596	if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
597		reset_qp(qhp);
598	pthread_spin_unlock(&qhp->lock);
599	return ret;
600}
601
602int c4iw_destroy_qp(struct ibv_qp *ibqp)
603{
604	int ret;
605	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
606	struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device);
607
608	PDBG("%s enter qp %p\n", __func__, ibqp);
609	pthread_spin_lock(&qhp->lock);
610	c4iw_flush_qp(qhp);
611	pthread_spin_unlock(&qhp->lock);
612
613	ret = ibv_cmd_destroy_qp(ibqp);
614	if (ret) {
615		return ret;
616	}
617	if (t4_sq_onchip(&qhp->wq)) {
618		qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
619		munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size);
620	}
621	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
622	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
623	munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize);
624	munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize);
625
626	pthread_spin_lock(&dev->lock);
627	dev->qpid2ptr[qhp->wq.sq.qid] = NULL;
628	pthread_spin_unlock(&dev->lock);
629
630	free(qhp->wq.rq.sw_rq);
631	free(qhp->wq.sq.sw_sq);
632	free(qhp);
633	return 0;
634}
635
636int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
637		  int attr_mask, struct ibv_qp_init_attr *init_attr)
638{
639	struct ibv_query_qp cmd;
640	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
641	int ret;
642
643	pthread_spin_lock(&qhp->lock);
644	if (t4_wq_in_error(&qhp->wq))
645		c4iw_flush_qp(qhp);
646	ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
647	pthread_spin_unlock(&qhp->lock);
648	return ret;
649}
650
651struct ibv_ah *c4iw_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
652{
653	return NULL;
654}
655
656int c4iw_destroy_ah(struct ibv_ah *ah)
657{
658	return ENOSYS;
659}
660
661int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
662		      uint16_t lid)
663{
664	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
665	int ret;
666
667	pthread_spin_lock(&qhp->lock);
668	if (t4_wq_in_error(&qhp->wq))
669		c4iw_flush_qp(qhp);
670	ret = ibv_cmd_attach_mcast(ibqp, gid, lid);
671	pthread_spin_unlock(&qhp->lock);
672	return ret;
673}
674
675int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
676		      uint16_t lid)
677{
678	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
679	int ret;
680
681	pthread_spin_lock(&qhp->lock);
682	if (t4_wq_in_error(&qhp->wq))
683		c4iw_flush_qp(qhp);
684	ret = ibv_cmd_detach_mcast(ibqp, gid, lid);
685	pthread_spin_unlock(&qhp->lock);
686	return ret;
687}
688
689void c4iw_async_event(struct ibv_async_event *event)
690{
691	PDBG("%s type %d obj %p\n", __func__, event->event_type,
692	event->element.cq);
693
694	switch (event->event_type) {
695	case IBV_EVENT_CQ_ERR:
696		break;
697	case IBV_EVENT_QP_FATAL:
698	case IBV_EVENT_QP_REQ_ERR:
699	case IBV_EVENT_QP_ACCESS_ERR:
700	case IBV_EVENT_PATH_MIG_ERR: {
701		struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
702		pthread_spin_lock(&qhp->lock);
703		c4iw_flush_qp(qhp);
704		pthread_spin_unlock(&qhp->lock);
705		break;
706	}
707	case IBV_EVENT_SQ_DRAINED:
708	case IBV_EVENT_PATH_MIG:
709	case IBV_EVENT_COMM_EST:
710	case IBV_EVENT_QP_LAST_WQE_REACHED:
711	default:
712		break;
713	}
714}
715