iw_cxgb_provider.c revision 255932
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13    contributors may be used to endorse or promote products derived from
14    this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 255932 2013-09-29 00:35:03Z alfred $");
31
32#include "opt_inet.h"
33
34#ifdef TCP_OFFLOAD
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/pciio.h>
40#include <sys/conf.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/bus_dma.h>
44#include <sys/rman.h>
45#include <sys/ioccom.h>
46#include <sys/mbuf.h>
47#include <sys/mutex.h>
48#include <sys/rwlock.h>
49#include <sys/linker.h>
50#include <sys/firmware.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/smp.h>
54#include <sys/sysctl.h>
55#include <sys/syslog.h>
56#include <sys/queue.h>
57#include <sys/taskqueue.h>
58#include <sys/proc.h>
59#include <sys/queue.h>
60
61#include <netinet/in.h>
62
63
64#include <vm/vm.h>
65#include <vm/pmap.h>
66
67#include <rdma/ib_verbs.h>
68#include <rdma/ib_umem.h>
69#include <rdma/ib_user_verbs.h>
70#include <linux/idr.h>
71#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72
73
74#include <cxgb_include.h>
75#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79#include <ulp/iw_cxgb/iw_cxgb.h>
80#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81#include <ulp/iw_cxgb/iw_cxgb_user.h>
82
83static int
84iwch_modify_port(struct ib_device *ibdev,
85			    u8 port, int port_modify_mask,
86			    struct ib_port_modify *props)
87{
88	return (-ENOSYS);
89}
90
91static struct ib_ah *
92iwch_ah_create(struct ib_pd *pd,
93				    struct ib_ah_attr *ah_attr)
94{
95	return ERR_PTR(-ENOSYS);
96}
97
98static int
99iwch_ah_destroy(struct ib_ah *ah)
100{
101	return (-ENOSYS);
102}
103
104static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105{
106	return (-ENOSYS);
107}
108
109static int
110iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111{
112	return (-ENOSYS);
113}
114
115static int
116iwch_process_mad(struct ib_device *ibdev,
117			    int mad_flags,
118			    u8 port_num,
119			    struct ib_wc *in_wc,
120			    struct ib_grh *in_grh,
121			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122{
123	return (-ENOSYS);
124}
125
126static int
127iwch_dealloc_ucontext(struct ib_ucontext *context)
128{
129	struct iwch_dev *rhp = to_iwch_dev(context->device);
130	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131	struct iwch_mm_entry *mm, *tmp;
132
133	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136		cxfree(mm);
137	}
138	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139	cxfree(ucontext);
140	return 0;
141}
142
143static struct ib_ucontext *
144iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145{
146	struct iwch_ucontext *context;
147	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148
149	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151	if (!context)
152		return ERR_PTR(-ENOMEM);
153	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154	TAILQ_INIT(&context->mmaps);
155	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156	return &context->ibucontext;
157}
158
159static int
160iwch_destroy_cq(struct ib_cq *ib_cq)
161{
162	struct iwch_cq *chp;
163
164	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165	chp = to_iwch_cq(ib_cq);
166
167	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168	mtx_lock(&chp->lock);
169	if (--chp->refcnt)
170		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171	mtx_unlock(&chp->lock);
172
173	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174	cxfree(chp);
175	return 0;
176}
177
178static struct ib_cq *
179iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
180			     struct ib_ucontext *ib_context,
181			     struct ib_udata *udata)
182{
183	struct iwch_dev *rhp;
184	struct iwch_cq *chp;
185	struct iwch_create_cq_resp uresp;
186	struct iwch_create_cq_req ureq;
187	struct iwch_ucontext *ucontext = NULL;
188	static int warned;
189	size_t resplen;
190
191	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
192	rhp = to_iwch_dev(ibdev);
193	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
194	if (!chp) {
195		return ERR_PTR(-ENOMEM);
196	}
197	if (ib_context) {
198		ucontext = to_iwch_ucontext(ib_context);
199		if (!t3a_device(rhp)) {
200			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
201				cxfree(chp);
202				return ERR_PTR(-EFAULT);
203			}
204			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
205		}
206	}
207
208	if (t3a_device(rhp)) {
209
210		/*
211		 * T3A: Add some fluff to handle extra CQEs inserted
212		 * for various errors.
213		 * Additional CQE possibilities:
214		 *      TERMINATE,
215		 *      incoming RDMA WRITE Failures
216		 *      incoming RDMA READ REQUEST FAILUREs
217		 * NOTE: We cannot ensure the CQ won't overflow.
218		 */
219		entries += 16;
220	}
221	entries = roundup_pow_of_two(entries);
222	chp->cq.size_log2 = ilog2(entries);
223
224	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
225		cxfree(chp);
226		return ERR_PTR(-ENOMEM);
227	}
228	chp->rhp = rhp;
229	chp->ibcq.cqe = 1 << chp->cq.size_log2;
230	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
231	chp->refcnt = 1;
232	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
233		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
234		cxfree(chp);
235		return ERR_PTR(-ENOMEM);
236	}
237
238	if (ucontext) {
239		struct iwch_mm_entry *mm;
240
241		mm = kmalloc(sizeof *mm, M_NOWAIT);
242		if (!mm) {
243			iwch_destroy_cq(&chp->ibcq);
244			return ERR_PTR(-ENOMEM);
245		}
246		uresp.cqid = chp->cq.cqid;
247		uresp.size_log2 = chp->cq.size_log2;
248		mtx_lock(&ucontext->mmap_lock);
249		uresp.key = ucontext->key;
250		ucontext->key += PAGE_SIZE;
251		mtx_unlock(&ucontext->mmap_lock);
252		mm->key = uresp.key;
253		mm->addr = vtophys(chp->cq.queue);
254               	if (udata->outlen < sizeof uresp) {
255                	if (!warned++)
256                        	CTR1(KTR_IW_CXGB, "%s Warning - "
257                                	"downlevel libcxgb3 (non-fatal).\n",
258					__func__);
259                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
260                       				sizeof(struct t3_cqe));
261                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
262               	} else {
263                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
264                        			sizeof(struct t3_cqe));
265                       	uresp.memsize = mm->len;
266                      	resplen = sizeof uresp;
267               	}
268              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
269			cxfree(mm);
270			iwch_destroy_cq(&chp->ibcq);
271			return ERR_PTR(-EFAULT);
272		}
273		insert_mmap(ucontext, mm);
274	}
275	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
276	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
277	     (unsigned long long) chp->cq.dma_addr);
278	return &chp->ibcq;
279}
280
281static int
282iwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
283    struct ib_udata *udata __unused)
284{
285
286	return (-ENOSYS);
287}
288
289static int
290iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
291{
292	struct iwch_dev *rhp;
293	struct iwch_cq *chp;
294	enum t3_cq_opcode cq_op;
295	int err;
296	u32 rptr;
297
298	chp = to_iwch_cq(ibcq);
299	rhp = chp->rhp;
300	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
301		cq_op = CQ_ARM_SE;
302	else
303		cq_op = CQ_ARM_AN;
304	if (chp->user_rptr_addr) {
305		if (copyin(&rptr, chp->user_rptr_addr, 4))
306			return (-EFAULT);
307		mtx_lock(&chp->lock);
308		chp->cq.rptr = rptr;
309	} else
310		mtx_lock(&chp->lock);
311	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
312	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
313	mtx_unlock(&chp->lock);
314	if (err < 0)
315		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
316		       chp->cq.cqid);
317	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
318		err = 0;
319	return err;
320}
321
322static int
323iwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
324{
325
326	return (-ENOSYS);
327}
328
329static int iwch_deallocate_pd(struct ib_pd *pd)
330{
331	struct iwch_dev *rhp;
332	struct iwch_pd *php;
333
334	php = to_iwch_pd(pd);
335	rhp = php->rhp;
336	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
337	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
338	cxfree(php);
339	return 0;
340}
341
342static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
343			       struct ib_ucontext *context,
344			       struct ib_udata *udata)
345{
346	struct iwch_pd *php;
347	u32 pdid;
348	struct iwch_dev *rhp;
349
350	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
351	rhp = (struct iwch_dev *) ibdev;
352	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
353	if (!pdid)
354		return ERR_PTR(-EINVAL);
355	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
356	if (!php) {
357		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
358		return ERR_PTR(-ENOMEM);
359	}
360	php->pdid = pdid;
361	php->rhp = rhp;
362	if (context) {
363		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
364			iwch_deallocate_pd(&php->ibpd);
365			return ERR_PTR(-EFAULT);
366		}
367	}
368	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
369	return &php->ibpd;
370}
371
372static int iwch_dereg_mr(struct ib_mr *ib_mr)
373{
374	struct iwch_dev *rhp;
375	struct iwch_mr *mhp;
376	u32 mmid;
377
378	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
379	/* There can be no memory windows */
380	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
381		return (-EINVAL);
382
383	mhp = to_iwch_mr(ib_mr);
384	rhp = mhp->rhp;
385	mmid = mhp->attr.stag >> 8;
386	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
387		       mhp->attr.pbl_addr);
388	iwch_free_pbl(mhp);
389	remove_handle(rhp, &rhp->mmidr, mmid);
390	if (mhp->kva)
391		cxfree((void *) (unsigned long) mhp->kva);
392	if (mhp->umem)
393		ib_umem_release(mhp->umem);
394	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
395	cxfree(mhp);
396	return 0;
397}
398
399static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
400					struct ib_phys_buf *buffer_list,
401					int num_phys_buf,
402					int acc,
403					u64 *iova_start)
404{
405	__be64 *page_list;
406	int shift;
407	u64 total_size;
408	int npages;
409	struct iwch_dev *rhp;
410	struct iwch_pd *php;
411	struct iwch_mr *mhp;
412	int ret;
413
414	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
415	php = to_iwch_pd(pd);
416	rhp = php->rhp;
417
418	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
419	if (!mhp)
420		return ERR_PTR(-ENOMEM);
421
422	mhp->rhp = rhp;
423
424	/* First check that we have enough alignment */
425	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
426		ret = -EINVAL;
427		goto err;
428	}
429
430	if (num_phys_buf > 1 &&
431	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
432		ret = -EINVAL;
433		goto err;
434	}
435
436	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
437				   &total_size, &npages, &shift, &page_list);
438	if (ret)
439		goto err;
440
441	ret = iwch_alloc_pbl(mhp, npages);
442	if (ret) {
443		cxfree(page_list);
444		goto err_pbl;
445	}
446
447	ret = iwch_write_pbl(mhp, page_list, npages, 0);
448	cxfree(page_list);
449	if (ret)
450		goto err;
451
452	mhp->attr.pdid = php->pdid;
453	mhp->attr.zbva = 0;
454
455	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
456	mhp->attr.va_fbo = *iova_start;
457	mhp->attr.page_size = shift - 12;
458
459	mhp->attr.len = (u32) total_size;
460	mhp->attr.pbl_size = npages;
461	ret = iwch_register_mem(rhp, php, mhp, shift);
462	if (ret)
463		goto err_pbl;
464
465	return &mhp->ibmr;
466
467err_pbl:
468	iwch_free_pbl(mhp);
469
470err:
471	cxfree(mhp);
472	return ERR_PTR(ret);
473
474}
475
476static int iwch_reregister_phys_mem(struct ib_mr *mr,
477				     int mr_rereg_mask,
478				     struct ib_pd *pd,
479	                             struct ib_phys_buf *buffer_list,
480	                             int num_phys_buf,
481	                             int acc, u64 * iova_start)
482{
483
484	struct iwch_mr mh, *mhp;
485	struct iwch_pd *php;
486	struct iwch_dev *rhp;
487	__be64 *page_list = NULL;
488	int shift = 0;
489	u64 total_size;
490	int npages = 0;
491	int ret;
492
493	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
494
495	/* There can be no memory windows */
496	if (atomic_load_acq_int(&mr->usecnt.counter))
497		return (-EINVAL);
498
499	mhp = to_iwch_mr(mr);
500	rhp = mhp->rhp;
501	php = to_iwch_pd(mr->pd);
502
503	/* make sure we are on the same adapter */
504	if (rhp != php->rhp)
505		return (-EINVAL);
506
507	memcpy(&mh, mhp, sizeof *mhp);
508
509	if (mr_rereg_mask & IB_MR_REREG_PD)
510		php = to_iwch_pd(pd);
511	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
512		mh.attr.perms = iwch_ib_to_tpt_access(acc);
513	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
514		ret = build_phys_page_list(buffer_list, num_phys_buf,
515					   iova_start,
516					   &total_size, &npages,
517					   &shift, &page_list);
518		if (ret)
519			return ret;
520	}
521
522	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
523	cxfree(page_list);
524	if (ret) {
525		return ret;
526	}
527	if (mr_rereg_mask & IB_MR_REREG_PD)
528		mhp->attr.pdid = php->pdid;
529	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
530		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
531	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
532		mhp->attr.zbva = 0;
533		mhp->attr.va_fbo = *iova_start;
534		mhp->attr.page_size = shift - 12;
535		mhp->attr.len = (u32) total_size;
536		mhp->attr.pbl_size = npages;
537	}
538
539	return 0;
540}
541
542
543static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
544				      u64 virt, int acc, struct ib_udata *udata,
545				      int mr_id)
546{
547	__be64 *pages;
548	int shift, i, n;
549	int err = 0;
550	struct ib_umem_chunk *chunk;
551	struct iwch_dev *rhp;
552	struct iwch_pd *php;
553	struct iwch_mr *mhp;
554	struct iwch_reg_user_mr_resp uresp;
555#ifdef notyet
556	int j, k, len;
557#endif
558
559	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
560
561	php = to_iwch_pd(pd);
562	rhp = php->rhp;
563	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
564	if (!mhp)
565		return ERR_PTR(-ENOMEM);
566
567	mhp->rhp = rhp;
568
569	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
570	if (IS_ERR(mhp->umem)) {
571		err = PTR_ERR(mhp->umem);
572		cxfree(mhp);
573		return ERR_PTR(-err);
574	}
575
576	shift = ffs(mhp->umem->page_size) - 1;
577
578	n = 0;
579	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
580		n += chunk->nents;
581
582	err = iwch_alloc_pbl(mhp, n);
583	if (err)
584		goto err;
585
586	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
587	if (!pages) {
588		err = -ENOMEM;
589		goto err_pbl;
590	}
591
592	i = n = 0;
593
594#ifdef notyet
595	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
596		for (j = 0; j < chunk->nmap; ++j) {
597			len = sg_dma_len(&chunk->page_list[j]) >> shift;
598			for (k = 0; k < len; ++k) {
599				pages[i++] = htobe64(sg_dma_address(
600					&chunk->page_list[j]) +
601					mhp->umem->page_size * k);
602				if (i == PAGE_SIZE / sizeof *pages) {
603					err = iwch_write_pbl(mhp, pages, i, n);
604					if (err)
605						goto pbl_done;
606					n += i;
607					i = 0;
608				}
609			}
610		}
611#endif
612
613	if (i)
614		err = iwch_write_pbl(mhp, pages, i, n);
615#ifdef notyet
616pbl_done:
617#endif
618	cxfree(pages);
619	if (err)
620		goto err_pbl;
621
622	mhp->attr.pdid = php->pdid;
623	mhp->attr.zbva = 0;
624	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
625	mhp->attr.va_fbo = virt;
626	mhp->attr.page_size = shift - 12;
627	mhp->attr.len = (u32) length;
628
629	err = iwch_register_mem(rhp, php, mhp, shift);
630	if (err)
631		goto err_pbl;
632
633	if (udata && !t3a_device(rhp)) {
634		uresp.pbl_addr = (mhp->attr.pbl_addr -
635	                         rhp->rdev.rnic_info.pbl_base) >> 3;
636		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
637		     uresp.pbl_addr);
638
639		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
640			iwch_dereg_mr(&mhp->ibmr);
641			err = EFAULT;
642			goto err;
643		}
644	}
645
646	return &mhp->ibmr;
647
648err_pbl:
649	iwch_free_pbl(mhp);
650
651err:
652	ib_umem_release(mhp->umem);
653	cxfree(mhp);
654	return ERR_PTR(-err);
655}
656
657static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
658{
659	struct ib_phys_buf bl;
660	u64 kva;
661	struct ib_mr *ibmr;
662
663	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
664
665	/*
666	 * T3 only supports 32 bits of size.
667	 */
668	bl.size = 0xffffffff;
669	bl.addr = 0;
670	kva = 0;
671	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
672	return ibmr;
673}
674
675static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
676{
677	struct iwch_dev *rhp;
678	struct iwch_pd *php;
679	struct iwch_mw *mhp;
680	u32 mmid;
681	u32 stag = 0;
682	int ret;
683
684	php = to_iwch_pd(pd);
685	rhp = php->rhp;
686	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
687	if (!mhp)
688		return ERR_PTR(-ENOMEM);
689	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
690	if (ret) {
691		cxfree(mhp);
692		return ERR_PTR(-ret);
693	}
694	mhp->rhp = rhp;
695	mhp->attr.pdid = php->pdid;
696	mhp->attr.type = TPT_MW;
697	mhp->attr.stag = stag;
698	mmid = (stag) >> 8;
699	mhp->ibmw.rkey = stag;
700	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
701		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
702		cxfree(mhp);
703		return ERR_PTR(-ENOMEM);
704	}
705	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
706	return &(mhp->ibmw);
707}
708
709static int iwch_dealloc_mw(struct ib_mw *mw)
710{
711	struct iwch_dev *rhp;
712	struct iwch_mw *mhp;
713	u32 mmid;
714
715	mhp = to_iwch_mw(mw);
716	rhp = mhp->rhp;
717	mmid = (mw->rkey) >> 8;
718	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
719	remove_handle(rhp, &rhp->mmidr, mmid);
720	cxfree(mhp);
721	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
722	return 0;
723}
724
725static int iwch_destroy_qp(struct ib_qp *ib_qp)
726{
727	struct iwch_dev *rhp;
728	struct iwch_qp *qhp;
729	struct iwch_qp_attributes attrs;
730	struct iwch_ucontext *ucontext;
731
732	qhp = to_iwch_qp(ib_qp);
733	rhp = qhp->rhp;
734
735	attrs.next_state = IWCH_QP_STATE_ERROR;
736	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
737	mtx_lock(&qhp->lock);
738	if (qhp->ep)
739		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
740	mtx_unlock(&qhp->lock);
741
742	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
743
744	mtx_lock(&qhp->lock);
745	if (--qhp->refcnt)
746		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
747	mtx_unlock(&qhp->lock);
748
749	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
750				  : NULL;
751	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
752			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
753
754	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
755	     ib_qp, qhp->wq.qpid, qhp);
756	cxfree(qhp);
757	return 0;
758}
759
760static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
761			     struct ib_qp_init_attr *attrs,
762			     struct ib_udata *udata)
763{
764	struct iwch_dev *rhp;
765	struct iwch_qp *qhp;
766	struct iwch_pd *php;
767	struct iwch_cq *schp;
768	struct iwch_cq *rchp;
769	struct iwch_create_qp_resp uresp;
770	int wqsize, sqsize, rqsize;
771	struct iwch_ucontext *ucontext;
772
773	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
774	if (attrs->qp_type != IB_QPT_RC)
775		return ERR_PTR(-EINVAL);
776	php = to_iwch_pd(pd);
777	rhp = php->rhp;
778	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
779	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
780	if (!schp || !rchp)
781		return ERR_PTR(-EINVAL);
782
783	/* The RQT size must be # of entries + 1 rounded up to a power of two */
784	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
785	if (rqsize == attrs->cap.max_recv_wr)
786		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
787
788	/* T3 doesn't support RQT depth < 16 */
789	if (rqsize < 16)
790		rqsize = 16;
791
792	if (rqsize > T3_MAX_RQ_SIZE)
793		return ERR_PTR(-EINVAL);
794
795	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
796		return ERR_PTR(-EINVAL);
797
798	/*
799	 * NOTE: The SQ and total WQ sizes don't need to be
800	 * a power of two.  However, all the code assumes
801	 * they are. EG: Q_FREECNT() and friends.
802	 */
803	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
804	wqsize = roundup_pow_of_two(rqsize + sqsize);
805	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
806	     wqsize, sqsize, rqsize);
807	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
808	if (!qhp)
809		return ERR_PTR(-ENOMEM);
810	qhp->wq.size_log2 = ilog2(wqsize);
811	qhp->wq.rq_size_log2 = ilog2(rqsize);
812	qhp->wq.sq_size_log2 = ilog2(sqsize);
813	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
814	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
815			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
816		cxfree(qhp);
817		return ERR_PTR(-ENOMEM);
818	}
819
820	attrs->cap.max_recv_wr = rqsize - 1;
821	attrs->cap.max_send_wr = sqsize;
822	attrs->cap.max_inline_data = T3_MAX_INLINE;
823
824	qhp->rhp = rhp;
825	qhp->attr.pd = php->pdid;
826	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
827	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
828	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
829	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
830	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
831	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
832	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
833	qhp->attr.state = IWCH_QP_STATE_IDLE;
834	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
835
836	/*
837	 * XXX - These don't get passed in from the openib user
838	 * at create time.  The CM sets them via a QP modify.
839	 * Need to fix...  I think the CM should
840	 */
841	qhp->attr.enable_rdma_read = 1;
842	qhp->attr.enable_rdma_write = 1;
843	qhp->attr.enable_bind = 1;
844	qhp->attr.max_ord = 1;
845	qhp->attr.max_ird = 1;
846
847	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
848	qhp->refcnt = 1;
849
850	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
851		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
852			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
853		cxfree(qhp);
854		return ERR_PTR(-ENOMEM);
855	}
856
857	if (udata) {
858
859		struct iwch_mm_entry *mm1, *mm2;
860
861		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
862		if (!mm1) {
863			iwch_destroy_qp(&qhp->ibqp);
864			return ERR_PTR(-ENOMEM);
865		}
866
867		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
868		if (!mm2) {
869			cxfree(mm1);
870			iwch_destroy_qp(&qhp->ibqp);
871			return ERR_PTR(-ENOMEM);
872		}
873
874		uresp.qpid = qhp->wq.qpid;
875		uresp.size_log2 = qhp->wq.size_log2;
876		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
877		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
878		mtx_lock(&ucontext->mmap_lock);
879		uresp.key = ucontext->key;
880		ucontext->key += PAGE_SIZE;
881		uresp.db_key = ucontext->key;
882		ucontext->key += PAGE_SIZE;
883		mtx_unlock(&ucontext->mmap_lock);
884		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
885			cxfree(mm1);
886			cxfree(mm2);
887			iwch_destroy_qp(&qhp->ibqp);
888			return ERR_PTR(-EFAULT);
889		}
890		mm1->key = uresp.key;
891		mm1->addr = vtophys(qhp->wq.queue);
892		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
893		insert_mmap(ucontext, mm1);
894		mm2->key = uresp.db_key;
895		mm2->addr = qhp->wq.udb & PAGE_MASK;
896		mm2->len = PAGE_SIZE;
897		insert_mmap(ucontext, mm2);
898	}
899	qhp->ibqp.qp_num = qhp->wq.qpid;
900	callout_init(&(qhp->timer), TRUE);
901	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
902	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
903	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
904	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
905	     1 << qhp->wq.size_log2);
906	return &qhp->ibqp;
907}
908
909static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
910		      int attr_mask, struct ib_udata *udata)
911{
912	struct iwch_dev *rhp;
913	struct iwch_qp *qhp;
914	enum iwch_qp_attr_mask mask = 0;
915	struct iwch_qp_attributes attrs;
916
917	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
918
919	/* iwarp does not support the RTR state */
920	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
921		attr_mask &= ~IB_QP_STATE;
922
923	/* Make sure we still have something left to do */
924	if (!attr_mask)
925		return 0;
926
927	memset(&attrs, 0, sizeof attrs);
928	qhp = to_iwch_qp(ibqp);
929	rhp = qhp->rhp;
930
931	attrs.next_state = iwch_convert_state(attr->qp_state);
932	attrs.enable_rdma_read = (attr->qp_access_flags &
933			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
934	attrs.enable_rdma_write = (attr->qp_access_flags &
935				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
936	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
937
938
939	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
940	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
941			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
942			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
943			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
944
945	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
946}
947
948void iwch_qp_add_ref(struct ib_qp *qp)
949{
950	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
951	mtx_lock(&to_iwch_qp(qp)->lock);
952	to_iwch_qp(qp)->refcnt++;
953	mtx_unlock(&to_iwch_qp(qp)->lock);
954}
955
956void iwch_qp_rem_ref(struct ib_qp *qp)
957{
958	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
959	mtx_lock(&to_iwch_qp(qp)->lock);
960	if (--to_iwch_qp(qp)->refcnt == 0)
961	        wakeup(to_iwch_qp(qp));
962	mtx_unlock(&to_iwch_qp(qp)->lock);
963}
964
965static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
966{
967	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
968	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
969}
970
971
972static int iwch_query_pkey(struct ib_device *ibdev,
973			   u8 port, u16 index, u16 * pkey)
974{
975	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
976	*pkey = 0;
977	return 0;
978}
979
980static int iwch_query_gid(struct ib_device *ibdev, u8 port,
981			  int index, union ib_gid *gid)
982{
983	struct iwch_dev *dev;
984	struct port_info *pi;
985	struct adapter *sc;
986
987	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
988	       __FUNCTION__, ibdev, port, index, gid);
989	dev = to_iwch_dev(ibdev);
990	sc = dev->rdev.adap;
991	PANIC_IF(port == 0 || port > 2);
992	pi = &sc->port[port - 1];
993	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
994	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
995	return 0;
996}
997
998static int iwch_query_device(struct ib_device *ibdev,
999			     struct ib_device_attr *props)
1000{
1001	struct iwch_dev *dev;
1002	struct adapter *sc;
1003
1004	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1005
1006	dev = to_iwch_dev(ibdev);
1007	sc = dev->rdev.adap;
1008	memset(props, 0, sizeof *props);
1009	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1010	props->device_cap_flags = dev->device_cap_flags;
1011	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1012	props->vendor_id = pci_get_vendor(sc->dev);
1013	props->vendor_part_id = pci_get_device(sc->dev);
1014	props->max_mr_size = dev->attr.max_mr_size;
1015	props->max_qp = dev->attr.max_qps;
1016	props->max_qp_wr = dev->attr.max_wrs;
1017	props->max_sge = dev->attr.max_sge_per_wr;
1018	props->max_sge_rd = 1;
1019	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1020	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1021	props->max_cq = dev->attr.max_cqs;
1022	props->max_cqe = dev->attr.max_cqes_per_cq;
1023	props->max_mr = dev->attr.max_mem_regs;
1024	props->max_pd = dev->attr.max_pds;
1025	props->local_ca_ack_delay = 0;
1026
1027	return 0;
1028}
1029
1030static int iwch_query_port(struct ib_device *ibdev,
1031			   u8 port, struct ib_port_attr *props)
1032{
1033	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1034	memset(props, 0, sizeof(struct ib_port_attr));
1035	props->max_mtu = IB_MTU_4096;
1036	props->active_mtu = IB_MTU_2048;
1037	props->state = IB_PORT_ACTIVE;
1038	props->port_cap_flags =
1039	    IB_PORT_CM_SUP |
1040	    IB_PORT_SNMP_TUNNEL_SUP |
1041	    IB_PORT_REINIT_SUP |
1042	    IB_PORT_DEVICE_MGMT_SUP |
1043	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1044	props->gid_tbl_len = 1;
1045	props->pkey_tbl_len = 1;
1046	props->active_width = 2;
1047	props->active_speed = 2;
1048	props->max_msg_sz = -1;
1049
1050	return 0;
1051}
1052
1053int iwch_register_device(struct iwch_dev *dev)
1054{
1055	int ret;
1056	struct adapter *sc = dev->rdev.adap;
1057
1058	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1059	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1060	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1061	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1062	dev->device_cap_flags =
1063		(IB_DEVICE_LOCAL_DMA_LKEY |
1064		 IB_DEVICE_MEM_WINDOW);
1065
1066	dev->ibdev.uverbs_cmd_mask =
1067	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1068	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1069	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1070	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1071	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1072	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1073	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1074	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1075	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1076	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1077	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1078	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1079	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1080	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1081	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1082	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1083	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1084	dev->ibdev.node_type = RDMA_NODE_RNIC;
1085	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1086	dev->ibdev.phys_port_cnt = sc->params.nports;
1087	dev->ibdev.num_comp_vectors = 1;
1088	dev->ibdev.dma_device = dev->rdev.adap->dev;
1089	dev->ibdev.query_device = iwch_query_device;
1090	dev->ibdev.query_port = iwch_query_port;
1091	dev->ibdev.modify_port = iwch_modify_port;
1092	dev->ibdev.query_pkey = iwch_query_pkey;
1093	dev->ibdev.query_gid = iwch_query_gid;
1094	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1095	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1096	dev->ibdev.mmap = iwch_mmap;
1097	dev->ibdev.alloc_pd = iwch_allocate_pd;
1098	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1099	dev->ibdev.create_ah = iwch_ah_create;
1100	dev->ibdev.destroy_ah = iwch_ah_destroy;
1101	dev->ibdev.create_qp = iwch_create_qp;
1102	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1103	dev->ibdev.destroy_qp = iwch_destroy_qp;
1104	dev->ibdev.create_cq = iwch_create_cq;
1105	dev->ibdev.destroy_cq = iwch_destroy_cq;
1106	dev->ibdev.resize_cq = iwch_resize_cq;
1107	dev->ibdev.poll_cq = iwch_poll_cq;
1108	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1109	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1110	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1111	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1112	dev->ibdev.dereg_mr = iwch_dereg_mr;
1113	dev->ibdev.alloc_mw = iwch_alloc_mw;
1114	dev->ibdev.bind_mw = iwch_bind_mw;
1115	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1116
1117	dev->ibdev.attach_mcast = iwch_multicast_attach;
1118	dev->ibdev.detach_mcast = iwch_multicast_detach;
1119	dev->ibdev.process_mad = iwch_process_mad;
1120
1121	dev->ibdev.req_notify_cq = iwch_arm_cq;
1122	dev->ibdev.post_send = iwch_post_send;
1123	dev->ibdev.post_recv = iwch_post_receive;
1124	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1125
1126	dev->ibdev.iwcm =
1127	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1128	if (!dev->ibdev.iwcm)
1129		return (ENOMEM);
1130
1131	dev->ibdev.iwcm->connect = iwch_connect;
1132	dev->ibdev.iwcm->accept = iwch_accept_cr;
1133	dev->ibdev.iwcm->reject = iwch_reject_cr;
1134	dev->ibdev.iwcm->create_listen = iwch_create_listen;
1135	dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1136	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1137	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1138	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1139
1140	ret = ib_register_device(&dev->ibdev, NULL);
1141	if (ret)
1142		goto bail1;
1143
1144	return (0);
1145
1146bail1:
1147	cxfree(dev->ibdev.iwcm);
1148	return (ret);
1149}
1150
1151void iwch_unregister_device(struct iwch_dev *dev)
1152{
1153
1154	ib_unregister_device(&dev->ibdev);
1155	cxfree(dev->ibdev.iwcm);
1156	return;
1157}
1158#endif
1159