1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13    contributors may be used to endorse or promote products derived from
14    this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 318798 2017-05-24 18:14:57Z np $");
31
32#include "opt_inet.h"
33
34#ifdef TCP_OFFLOAD
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/pciio.h>
40#include <sys/conf.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/bus_dma.h>
44#include <sys/rman.h>
45#include <sys/ioccom.h>
46#include <sys/mbuf.h>
47#include <sys/mutex.h>
48#include <sys/rwlock.h>
49#include <sys/linker.h>
50#include <sys/firmware.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/smp.h>
54#include <sys/sysctl.h>
55#include <sys/syslog.h>
56#include <sys/queue.h>
57#include <sys/taskqueue.h>
58#include <sys/proc.h>
59#include <sys/queue.h>
60
61#include <netinet/in.h>
62
63
64#include <vm/vm.h>
65#include <vm/pmap.h>
66
67#include <rdma/ib_verbs.h>
68#include <rdma/ib_umem.h>
69#include <rdma/ib_user_verbs.h>
70#include <linux/idr.h>
71#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72
73
74#include <cxgb_include.h>
75#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79#include <ulp/iw_cxgb/iw_cxgb.h>
80#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81#include <ulp/iw_cxgb/iw_cxgb_user.h>
82
83static int
84iwch_modify_port(struct ib_device *ibdev,
85			    u8 port, int port_modify_mask,
86			    struct ib_port_modify *props)
87{
88	return (-ENOSYS);
89}
90
91static struct ib_ah *
92iwch_ah_create(struct ib_pd *pd,
93				    struct ib_ah_attr *ah_attr)
94{
95	return ERR_PTR(-ENOSYS);
96}
97
98static int
99iwch_ah_destroy(struct ib_ah *ah)
100{
101	return (-ENOSYS);
102}
103
104static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105{
106	return (-ENOSYS);
107}
108
109static int
110iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111{
112	return (-ENOSYS);
113}
114
115static int
116iwch_process_mad(struct ib_device *ibdev,
117			    int mad_flags,
118			    u8 port_num,
119			    struct ib_wc *in_wc,
120			    struct ib_grh *in_grh,
121			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122{
123	return (-ENOSYS);
124}
125
126static int
127iwch_dealloc_ucontext(struct ib_ucontext *context)
128{
129	struct iwch_dev *rhp = to_iwch_dev(context->device);
130	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131	struct iwch_mm_entry *mm, *tmp;
132
133	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136		cxfree(mm);
137	}
138	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139	cxfree(ucontext);
140	return 0;
141}
142
143static struct ib_ucontext *
144iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145{
146	struct iwch_ucontext *context;
147	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148
149	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151	if (!context)
152		return ERR_PTR(-ENOMEM);
153	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154	TAILQ_INIT(&context->mmaps);
155	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156	return &context->ibucontext;
157}
158
159static int
160iwch_destroy_cq(struct ib_cq *ib_cq)
161{
162	struct iwch_cq *chp;
163
164	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165	chp = to_iwch_cq(ib_cq);
166
167	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168	mtx_lock(&chp->lock);
169	if (--chp->refcnt)
170		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171	mtx_unlock(&chp->lock);
172
173	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174	cxfree(chp);
175	return 0;
176}
177
178static struct ib_cq *
179iwch_create_cq(struct ib_device *ibdev, struct ib_cq_init_attr *attr,
180			     struct ib_ucontext *ib_context,
181			     struct ib_udata *udata)
182{
183	struct iwch_dev *rhp;
184	struct iwch_cq *chp;
185	struct iwch_create_cq_resp uresp;
186	struct iwch_create_cq_req ureq;
187	struct iwch_ucontext *ucontext = NULL;
188	static int warned;
189	size_t resplen;
190	int entries = attr->cqe;
191
192	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
193	rhp = to_iwch_dev(ibdev);
194	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
195	if (!chp) {
196		return ERR_PTR(-ENOMEM);
197	}
198	if (ib_context) {
199		ucontext = to_iwch_ucontext(ib_context);
200		if (!t3a_device(rhp)) {
201			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
202				cxfree(chp);
203				return ERR_PTR(-EFAULT);
204			}
205			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
206		}
207	}
208
209	if (t3a_device(rhp)) {
210
211		/*
212		 * T3A: Add some fluff to handle extra CQEs inserted
213		 * for various errors.
214		 * Additional CQE possibilities:
215		 *      TERMINATE,
216		 *      incoming RDMA WRITE Failures
217		 *      incoming RDMA READ REQUEST FAILUREs
218		 * NOTE: We cannot ensure the CQ won't overflow.
219		 */
220		entries += 16;
221	}
222	entries = roundup_pow_of_two(entries);
223	chp->cq.size_log2 = ilog2(entries);
224
225	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
226		cxfree(chp);
227		return ERR_PTR(-ENOMEM);
228	}
229	chp->rhp = rhp;
230	chp->ibcq.cqe = 1 << chp->cq.size_log2;
231	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
232	chp->refcnt = 1;
233	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
234		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
235		cxfree(chp);
236		return ERR_PTR(-ENOMEM);
237	}
238
239	if (ucontext) {
240		struct iwch_mm_entry *mm;
241
242		mm = kmalloc(sizeof *mm, M_NOWAIT);
243		if (!mm) {
244			iwch_destroy_cq(&chp->ibcq);
245			return ERR_PTR(-ENOMEM);
246		}
247		uresp.cqid = chp->cq.cqid;
248		uresp.size_log2 = chp->cq.size_log2;
249		mtx_lock(&ucontext->mmap_lock);
250		uresp.key = ucontext->key;
251		ucontext->key += PAGE_SIZE;
252		mtx_unlock(&ucontext->mmap_lock);
253		mm->key = uresp.key;
254		mm->addr = vtophys(chp->cq.queue);
255               	if (udata->outlen < sizeof uresp) {
256                	if (!warned++)
257                        	CTR1(KTR_IW_CXGB, "%s Warning - "
258                                	"downlevel libcxgb3 (non-fatal).\n",
259					__func__);
260                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
261                       				sizeof(struct t3_cqe));
262                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
263               	} else {
264                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
265                        			sizeof(struct t3_cqe));
266                       	uresp.memsize = mm->len;
267                      	resplen = sizeof uresp;
268               	}
269              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
270			cxfree(mm);
271			iwch_destroy_cq(&chp->ibcq);
272			return ERR_PTR(-EFAULT);
273		}
274		insert_mmap(ucontext, mm);
275	}
276	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
277	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
278	     (unsigned long long) chp->cq.dma_addr);
279	return &chp->ibcq;
280}
281
282static int
283iwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
284    struct ib_udata *udata __unused)
285{
286
287	return (-ENOSYS);
288}
289
290static int
291iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
292{
293	struct iwch_dev *rhp;
294	struct iwch_cq *chp;
295	enum t3_cq_opcode cq_op;
296	int err;
297	u32 rptr;
298
299	chp = to_iwch_cq(ibcq);
300	rhp = chp->rhp;
301	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
302		cq_op = CQ_ARM_SE;
303	else
304		cq_op = CQ_ARM_AN;
305	if (chp->user_rptr_addr) {
306		if (copyin(chp->user_rptr_addr, &rptr, sizeof(rptr)))
307			return (-EFAULT);
308		mtx_lock(&chp->lock);
309		chp->cq.rptr = rptr;
310	} else
311		mtx_lock(&chp->lock);
312	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
313	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
314	mtx_unlock(&chp->lock);
315	if (err < 0)
316		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
317		       chp->cq.cqid);
318	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
319		err = 0;
320	return err;
321}
322
323static int
324iwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
325{
326
327	return (-ENOSYS);
328}
329
330static int iwch_deallocate_pd(struct ib_pd *pd)
331{
332	struct iwch_dev *rhp;
333	struct iwch_pd *php;
334
335	php = to_iwch_pd(pd);
336	rhp = php->rhp;
337	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
338	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
339	cxfree(php);
340	return 0;
341}
342
343static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
344			       struct ib_ucontext *context,
345			       struct ib_udata *udata)
346{
347	struct iwch_pd *php;
348	u32 pdid;
349	struct iwch_dev *rhp;
350
351	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
352	rhp = (struct iwch_dev *) ibdev;
353	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
354	if (!pdid)
355		return ERR_PTR(-EINVAL);
356	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
357	if (!php) {
358		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
359		return ERR_PTR(-ENOMEM);
360	}
361	php->pdid = pdid;
362	php->rhp = rhp;
363	if (context) {
364		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
365			iwch_deallocate_pd(&php->ibpd);
366			return ERR_PTR(-EFAULT);
367		}
368	}
369	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
370	return &php->ibpd;
371}
372
373static int iwch_dereg_mr(struct ib_mr *ib_mr)
374{
375	struct iwch_dev *rhp;
376	struct iwch_mr *mhp;
377	u32 mmid;
378
379	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
380	/* There can be no memory windows */
381	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
382		return (-EINVAL);
383
384	mhp = to_iwch_mr(ib_mr);
385	rhp = mhp->rhp;
386	mmid = mhp->attr.stag >> 8;
387	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
388		       mhp->attr.pbl_addr);
389	iwch_free_pbl(mhp);
390	remove_handle(rhp, &rhp->mmidr, mmid);
391	if (mhp->kva)
392		cxfree((void *) (unsigned long) mhp->kva);
393	if (mhp->umem)
394		ib_umem_release(mhp->umem);
395	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
396	cxfree(mhp);
397	return 0;
398}
399
400static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
401					struct ib_phys_buf *buffer_list,
402					int num_phys_buf,
403					int acc,
404					u64 *iova_start)
405{
406	__be64 *page_list;
407	int shift;
408	u64 total_size;
409	int npages;
410	struct iwch_dev *rhp;
411	struct iwch_pd *php;
412	struct iwch_mr *mhp;
413	int ret;
414
415	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
416	php = to_iwch_pd(pd);
417	rhp = php->rhp;
418
419	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
420	if (!mhp)
421		return ERR_PTR(-ENOMEM);
422
423	mhp->rhp = rhp;
424
425	/* First check that we have enough alignment */
426	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
427		ret = -EINVAL;
428		goto err;
429	}
430
431	if (num_phys_buf > 1 &&
432	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
433		ret = -EINVAL;
434		goto err;
435	}
436
437	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
438				   &total_size, &npages, &shift, &page_list);
439	if (ret)
440		goto err;
441
442	ret = iwch_alloc_pbl(mhp, npages);
443	if (ret) {
444		cxfree(page_list);
445		goto err_pbl;
446	}
447
448	ret = iwch_write_pbl(mhp, page_list, npages, 0);
449	cxfree(page_list);
450	if (ret)
451		goto err;
452
453	mhp->attr.pdid = php->pdid;
454	mhp->attr.zbva = 0;
455
456	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
457	mhp->attr.va_fbo = *iova_start;
458	mhp->attr.page_size = shift - 12;
459
460	mhp->attr.len = (u32) total_size;
461	mhp->attr.pbl_size = npages;
462	ret = iwch_register_mem(rhp, php, mhp, shift);
463	if (ret)
464		goto err_pbl;
465
466	return &mhp->ibmr;
467
468err_pbl:
469	iwch_free_pbl(mhp);
470
471err:
472	cxfree(mhp);
473	return ERR_PTR(ret);
474
475}
476
477static int iwch_reregister_phys_mem(struct ib_mr *mr,
478				     int mr_rereg_mask,
479				     struct ib_pd *pd,
480	                             struct ib_phys_buf *buffer_list,
481	                             int num_phys_buf,
482	                             int acc, u64 * iova_start)
483{
484
485	struct iwch_mr mh, *mhp;
486	struct iwch_pd *php;
487	struct iwch_dev *rhp;
488	__be64 *page_list = NULL;
489	int shift = 0;
490	u64 total_size;
491	int npages = 0;
492	int ret;
493
494	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
495
496	/* There can be no memory windows */
497	if (atomic_load_acq_int(&mr->usecnt.counter))
498		return (-EINVAL);
499
500	mhp = to_iwch_mr(mr);
501	rhp = mhp->rhp;
502	php = to_iwch_pd(mr->pd);
503
504	/* make sure we are on the same adapter */
505	if (rhp != php->rhp)
506		return (-EINVAL);
507
508	memcpy(&mh, mhp, sizeof *mhp);
509
510	if (mr_rereg_mask & IB_MR_REREG_PD)
511		php = to_iwch_pd(pd);
512	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
513		mh.attr.perms = iwch_ib_to_tpt_access(acc);
514	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
515		ret = build_phys_page_list(buffer_list, num_phys_buf,
516					   iova_start,
517					   &total_size, &npages,
518					   &shift, &page_list);
519		if (ret)
520			return ret;
521	}
522
523	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
524	cxfree(page_list);
525	if (ret) {
526		return ret;
527	}
528	if (mr_rereg_mask & IB_MR_REREG_PD)
529		mhp->attr.pdid = php->pdid;
530	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
531		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
532	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
533		mhp->attr.zbva = 0;
534		mhp->attr.va_fbo = *iova_start;
535		mhp->attr.page_size = shift - 12;
536		mhp->attr.len = (u32) total_size;
537		mhp->attr.pbl_size = npages;
538	}
539
540	return 0;
541}
542
543
544static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
545				      u64 virt, int acc, struct ib_udata *udata,
546				      int mr_id)
547{
548	__be64 *pages;
549	int shift, n, len;
550	int i, k, entry;
551	int err = 0;
552	struct iwch_dev *rhp;
553	struct iwch_pd *php;
554	struct iwch_mr *mhp;
555	struct iwch_reg_user_mr_resp uresp;
556	struct scatterlist *sg;
557
558	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
559
560	php = to_iwch_pd(pd);
561	rhp = php->rhp;
562	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
563	if (!mhp)
564		return ERR_PTR(-ENOMEM);
565
566	mhp->rhp = rhp;
567
568	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569	if (IS_ERR(mhp->umem)) {
570		err = PTR_ERR(mhp->umem);
571		cxfree(mhp);
572		return ERR_PTR(-err);
573	}
574
575	shift = ffs(mhp->umem->page_size) - 1;
576
577	n = mhp->umem->nmap;
578
579	err = iwch_alloc_pbl(mhp, n);
580	if (err)
581		goto err;
582
583	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
584	if (!pages) {
585		err = -ENOMEM;
586		goto err_pbl;
587	}
588
589	i = n = 0;
590
591	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
592		len = sg_dma_len(sg) >> shift;
593		for (k = 0; k < len; ++k) {
594			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
595					mhp->umem->page_size * k);
596			if (i == PAGE_SIZE / sizeof *pages) {
597				err = iwch_write_pbl(mhp, pages, i, n);
598				if (err)
599					goto pbl_done;
600				n += i;
601				i = 0;
602			}
603		}
604	}
605#if 0
606	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
607		for (j = 0; j < chunk->nmap; ++j) {
608			len = sg_dma_len(&chunk->page_list[j]) >> shift;
609			for (k = 0; k < len; ++k) {
610				pages[i++] = htobe64(sg_dma_address(
611					&chunk->page_list[j]) +
612					mhp->umem->page_size * k);
613				if (i == PAGE_SIZE / sizeof *pages) {
614					err = iwch_write_pbl(mhp, pages, i, n);
615					if (err)
616						goto pbl_done;
617					n += i;
618					i = 0;
619				}
620			}
621		}
622#endif
623
624	if (i)
625		err = iwch_write_pbl(mhp, pages, i, n);
626pbl_done:
627	cxfree(pages);
628	if (err)
629		goto err_pbl;
630
631	mhp->attr.pdid = php->pdid;
632	mhp->attr.zbva = 0;
633	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634	mhp->attr.va_fbo = virt;
635	mhp->attr.page_size = shift - 12;
636	mhp->attr.len = (u32) length;
637
638	err = iwch_register_mem(rhp, php, mhp, shift);
639	if (err)
640		goto err_pbl;
641
642	if (udata && !t3a_device(rhp)) {
643		uresp.pbl_addr = (mhp->attr.pbl_addr -
644	                         rhp->rdev.rnic_info.pbl_base) >> 3;
645		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
646		     uresp.pbl_addr);
647
648		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
649			iwch_dereg_mr(&mhp->ibmr);
650			err = EFAULT;
651			goto err;
652		}
653	}
654
655	return &mhp->ibmr;
656
657err_pbl:
658	iwch_free_pbl(mhp);
659
660err:
661	ib_umem_release(mhp->umem);
662	cxfree(mhp);
663	return ERR_PTR(-err);
664}
665
666static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
667{
668	struct ib_phys_buf bl;
669	u64 kva;
670	struct ib_mr *ibmr;
671
672	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
673
674	/*
675	 * T3 only supports 32 bits of size.
676	 */
677	bl.size = 0xffffffff;
678	bl.addr = 0;
679	kva = 0;
680	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
681	return ibmr;
682}
683
684static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
685{
686	struct iwch_dev *rhp;
687	struct iwch_pd *php;
688	struct iwch_mw *mhp;
689	u32 mmid;
690	u32 stag = 0;
691	int ret;
692
693	php = to_iwch_pd(pd);
694	rhp = php->rhp;
695	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
696	if (!mhp)
697		return ERR_PTR(-ENOMEM);
698	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
699	if (ret) {
700		cxfree(mhp);
701		return ERR_PTR(-ret);
702	}
703	mhp->rhp = rhp;
704	mhp->attr.pdid = php->pdid;
705	mhp->attr.type = TPT_MW;
706	mhp->attr.stag = stag;
707	mmid = (stag) >> 8;
708	mhp->ibmw.rkey = stag;
709	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
710		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
711		cxfree(mhp);
712		return ERR_PTR(-ENOMEM);
713	}
714	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
715	return &(mhp->ibmw);
716}
717
718static int iwch_dealloc_mw(struct ib_mw *mw)
719{
720	struct iwch_dev *rhp;
721	struct iwch_mw *mhp;
722	u32 mmid;
723
724	mhp = to_iwch_mw(mw);
725	rhp = mhp->rhp;
726	mmid = (mw->rkey) >> 8;
727	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
728	remove_handle(rhp, &rhp->mmidr, mmid);
729	cxfree(mhp);
730	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
731	return 0;
732}
733
734static int iwch_destroy_qp(struct ib_qp *ib_qp)
735{
736	struct iwch_dev *rhp;
737	struct iwch_qp *qhp;
738	struct iwch_qp_attributes attrs;
739	struct iwch_ucontext *ucontext;
740
741	qhp = to_iwch_qp(ib_qp);
742	rhp = qhp->rhp;
743
744	attrs.next_state = IWCH_QP_STATE_ERROR;
745	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
746	mtx_lock(&qhp->lock);
747	if (qhp->ep)
748		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
749	mtx_unlock(&qhp->lock);
750
751	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
752
753	mtx_lock(&qhp->lock);
754	if (--qhp->refcnt)
755		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
756	mtx_unlock(&qhp->lock);
757
758	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
759				  : NULL;
760	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
761			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
762
763	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
764	     ib_qp, qhp->wq.qpid, qhp);
765	cxfree(qhp);
766	return 0;
767}
768
769static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
770			     struct ib_qp_init_attr *attrs,
771			     struct ib_udata *udata)
772{
773	struct iwch_dev *rhp;
774	struct iwch_qp *qhp;
775	struct iwch_pd *php;
776	struct iwch_cq *schp;
777	struct iwch_cq *rchp;
778	struct iwch_create_qp_resp uresp;
779	int wqsize, sqsize, rqsize;
780	struct iwch_ucontext *ucontext;
781
782	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
783	if (attrs->qp_type != IB_QPT_RC)
784		return ERR_PTR(-EINVAL);
785	php = to_iwch_pd(pd);
786	rhp = php->rhp;
787	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
788	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
789	if (!schp || !rchp)
790		return ERR_PTR(-EINVAL);
791
792	/* The RQT size must be # of entries + 1 rounded up to a power of two */
793	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
794	if (rqsize == attrs->cap.max_recv_wr)
795		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
796
797	/* T3 doesn't support RQT depth < 16 */
798	if (rqsize < 16)
799		rqsize = 16;
800
801	if (rqsize > T3_MAX_RQ_SIZE)
802		return ERR_PTR(-EINVAL);
803
804	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
805		return ERR_PTR(-EINVAL);
806
807	/*
808	 * NOTE: The SQ and total WQ sizes don't need to be
809	 * a power of two.  However, all the code assumes
810	 * they are. EG: Q_FREECNT() and friends.
811	 */
812	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
813	wqsize = roundup_pow_of_two(rqsize + sqsize);
814	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
815	     wqsize, sqsize, rqsize);
816	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
817	if (!qhp)
818		return ERR_PTR(-ENOMEM);
819	qhp->wq.size_log2 = ilog2(wqsize);
820	qhp->wq.rq_size_log2 = ilog2(rqsize);
821	qhp->wq.sq_size_log2 = ilog2(sqsize);
822	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
823	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
824			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
825		cxfree(qhp);
826		return ERR_PTR(-ENOMEM);
827	}
828
829	attrs->cap.max_recv_wr = rqsize - 1;
830	attrs->cap.max_send_wr = sqsize;
831	attrs->cap.max_inline_data = T3_MAX_INLINE;
832
833	qhp->rhp = rhp;
834	qhp->attr.pd = php->pdid;
835	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
836	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
837	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
838	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
839	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
840	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
841	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
842	qhp->attr.state = IWCH_QP_STATE_IDLE;
843	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
844
845	/*
846	 * XXX - These don't get passed in from the openib user
847	 * at create time.  The CM sets them via a QP modify.
848	 * Need to fix...  I think the CM should
849	 */
850	qhp->attr.enable_rdma_read = 1;
851	qhp->attr.enable_rdma_write = 1;
852	qhp->attr.enable_bind = 1;
853	qhp->attr.max_ord = 1;
854	qhp->attr.max_ird = 1;
855
856	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
857	qhp->refcnt = 1;
858
859	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
860		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
861			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
862		cxfree(qhp);
863		return ERR_PTR(-ENOMEM);
864	}
865
866	if (udata) {
867
868		struct iwch_mm_entry *mm1, *mm2;
869
870		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
871		if (!mm1) {
872			iwch_destroy_qp(&qhp->ibqp);
873			return ERR_PTR(-ENOMEM);
874		}
875
876		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
877		if (!mm2) {
878			cxfree(mm1);
879			iwch_destroy_qp(&qhp->ibqp);
880			return ERR_PTR(-ENOMEM);
881		}
882
883		uresp.qpid = qhp->wq.qpid;
884		uresp.size_log2 = qhp->wq.size_log2;
885		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
886		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
887		mtx_lock(&ucontext->mmap_lock);
888		uresp.key = ucontext->key;
889		ucontext->key += PAGE_SIZE;
890		uresp.db_key = ucontext->key;
891		ucontext->key += PAGE_SIZE;
892		mtx_unlock(&ucontext->mmap_lock);
893		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
894			cxfree(mm1);
895			cxfree(mm2);
896			iwch_destroy_qp(&qhp->ibqp);
897			return ERR_PTR(-EFAULT);
898		}
899		mm1->key = uresp.key;
900		mm1->addr = vtophys(qhp->wq.queue);
901		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
902		insert_mmap(ucontext, mm1);
903		mm2->key = uresp.db_key;
904		mm2->addr = qhp->wq.udb & PAGE_MASK;
905		mm2->len = PAGE_SIZE;
906		insert_mmap(ucontext, mm2);
907	}
908	qhp->ibqp.qp_num = qhp->wq.qpid;
909	callout_init(&(qhp->timer), 1);
910	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
911	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
912	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
913	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
914	     1 << qhp->wq.size_log2);
915	return &qhp->ibqp;
916}
917
918static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
919		      int attr_mask, struct ib_udata *udata)
920{
921	struct iwch_dev *rhp;
922	struct iwch_qp *qhp;
923	enum iwch_qp_attr_mask mask = 0;
924	struct iwch_qp_attributes attrs;
925
926	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
927
928	/* iwarp does not support the RTR state */
929	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
930		attr_mask &= ~IB_QP_STATE;
931
932	/* Make sure we still have something left to do */
933	if (!attr_mask)
934		return 0;
935
936	memset(&attrs, 0, sizeof attrs);
937	qhp = to_iwch_qp(ibqp);
938	rhp = qhp->rhp;
939
940	attrs.next_state = iwch_convert_state(attr->qp_state);
941	attrs.enable_rdma_read = (attr->qp_access_flags &
942			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
943	attrs.enable_rdma_write = (attr->qp_access_flags &
944				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
945	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
946
947
948	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
949	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
950			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
951			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
952			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
953
954	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
955}
956
957void iwch_qp_add_ref(struct ib_qp *qp)
958{
959	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
960	mtx_lock(&to_iwch_qp(qp)->lock);
961	to_iwch_qp(qp)->refcnt++;
962	mtx_unlock(&to_iwch_qp(qp)->lock);
963}
964
965void iwch_qp_rem_ref(struct ib_qp *qp)
966{
967	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
968	mtx_lock(&to_iwch_qp(qp)->lock);
969	if (--to_iwch_qp(qp)->refcnt == 0)
970	        wakeup(to_iwch_qp(qp));
971	mtx_unlock(&to_iwch_qp(qp)->lock);
972}
973
974static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
975{
976	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
977	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
978}
979
980
981static int iwch_query_pkey(struct ib_device *ibdev,
982			   u8 port, u16 index, u16 * pkey)
983{
984	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
985	*pkey = 0;
986	return 0;
987}
988
989static int iwch_query_gid(struct ib_device *ibdev, u8 port,
990			  int index, union ib_gid *gid)
991{
992	struct iwch_dev *dev;
993	struct port_info *pi;
994	struct adapter *sc;
995
996	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
997	       __FUNCTION__, ibdev, port, index, gid);
998	dev = to_iwch_dev(ibdev);
999	sc = dev->rdev.adap;
1000	PANIC_IF(port == 0 || port > 2);
1001	pi = &sc->port[port - 1];
1002	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1003	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
1004	return 0;
1005}
1006
1007static int iwch_query_device(struct ib_device *ibdev,
1008			     struct ib_device_attr *props)
1009{
1010	struct iwch_dev *dev;
1011	struct adapter *sc;
1012
1013	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1014
1015	dev = to_iwch_dev(ibdev);
1016	sc = dev->rdev.adap;
1017	memset(props, 0, sizeof *props);
1018	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1019	props->device_cap_flags = dev->device_cap_flags;
1020	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1021	props->vendor_id = pci_get_vendor(sc->dev);
1022	props->vendor_part_id = pci_get_device(sc->dev);
1023	props->max_mr_size = dev->attr.max_mr_size;
1024	props->max_qp = dev->attr.max_qps;
1025	props->max_qp_wr = dev->attr.max_wrs;
1026	props->max_sge = dev->attr.max_sge_per_wr;
1027	props->max_sge_rd = 1;
1028	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1029	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1030	props->max_cq = dev->attr.max_cqs;
1031	props->max_cqe = dev->attr.max_cqes_per_cq;
1032	props->max_mr = dev->attr.max_mem_regs;
1033	props->max_pd = dev->attr.max_pds;
1034	props->local_ca_ack_delay = 0;
1035
1036	return 0;
1037}
1038
1039static int iwch_query_port(struct ib_device *ibdev,
1040			   u8 port, struct ib_port_attr *props)
1041{
1042	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1043	memset(props, 0, sizeof(struct ib_port_attr));
1044	props->max_mtu = IB_MTU_4096;
1045	props->active_mtu = IB_MTU_2048;
1046	props->state = IB_PORT_ACTIVE;
1047	props->port_cap_flags =
1048	    IB_PORT_CM_SUP |
1049	    IB_PORT_SNMP_TUNNEL_SUP |
1050	    IB_PORT_REINIT_SUP |
1051	    IB_PORT_DEVICE_MGMT_SUP |
1052	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1053	props->gid_tbl_len = 1;
1054	props->pkey_tbl_len = 1;
1055	props->active_width = 2;
1056	props->active_speed = 2;
1057	props->max_msg_sz = -1;
1058
1059	return 0;
1060}
1061
1062int iwch_register_device(struct iwch_dev *dev)
1063{
1064	int ret;
1065	struct adapter *sc = dev->rdev.adap;
1066
1067	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1068	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1069	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1070	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1071	dev->device_cap_flags =
1072		(IB_DEVICE_LOCAL_DMA_LKEY |
1073		 IB_DEVICE_MEM_WINDOW);
1074
1075	dev->ibdev.uverbs_cmd_mask =
1076	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1077	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1078	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1079	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1080	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1081	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1082	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1083	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1084	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1085	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1086	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1087	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1088	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1089	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1090	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1091	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1092	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1093	dev->ibdev.node_type = RDMA_NODE_RNIC;
1094	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1095	dev->ibdev.phys_port_cnt = sc->params.nports;
1096	dev->ibdev.num_comp_vectors = 1;
1097	dev->ibdev.dma_device = NULL;
1098	dev->ibdev.query_device = iwch_query_device;
1099	dev->ibdev.query_port = iwch_query_port;
1100	dev->ibdev.modify_port = iwch_modify_port;
1101	dev->ibdev.query_pkey = iwch_query_pkey;
1102	dev->ibdev.query_gid = iwch_query_gid;
1103	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1104	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1105	dev->ibdev.mmap = iwch_mmap;
1106	dev->ibdev.alloc_pd = iwch_allocate_pd;
1107	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1108	dev->ibdev.create_ah = iwch_ah_create;
1109	dev->ibdev.destroy_ah = iwch_ah_destroy;
1110	dev->ibdev.create_qp = iwch_create_qp;
1111	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1112	dev->ibdev.destroy_qp = iwch_destroy_qp;
1113	dev->ibdev.create_cq = iwch_create_cq;
1114	dev->ibdev.destroy_cq = iwch_destroy_cq;
1115	dev->ibdev.resize_cq = iwch_resize_cq;
1116	dev->ibdev.poll_cq = iwch_poll_cq;
1117	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1118	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1119	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1120	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1121	dev->ibdev.dereg_mr = iwch_dereg_mr;
1122	dev->ibdev.alloc_mw = iwch_alloc_mw;
1123	dev->ibdev.bind_mw = iwch_bind_mw;
1124	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1125
1126	dev->ibdev.attach_mcast = iwch_multicast_attach;
1127	dev->ibdev.detach_mcast = iwch_multicast_detach;
1128	dev->ibdev.process_mad = iwch_process_mad;
1129
1130	dev->ibdev.req_notify_cq = iwch_arm_cq;
1131	dev->ibdev.post_send = iwch_post_send;
1132	dev->ibdev.post_recv = iwch_post_receive;
1133	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1134
1135	dev->ibdev.iwcm =
1136	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1137	if (!dev->ibdev.iwcm)
1138		return (ENOMEM);
1139
1140	dev->ibdev.iwcm->connect = iwch_connect;
1141	dev->ibdev.iwcm->accept = iwch_accept_cr;
1142	dev->ibdev.iwcm->reject = iwch_reject_cr;
1143	dev->ibdev.iwcm->create_listen_ep = iwch_create_listen_ep;
1144	dev->ibdev.iwcm->destroy_listen_ep = iwch_destroy_listen_ep;
1145	dev->ibdev.iwcm->newconn = process_newconn;
1146	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1147	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1148	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1149
1150	ret = ib_register_device(&dev->ibdev, NULL);
1151	if (ret)
1152		goto bail1;
1153
1154	return (0);
1155
1156bail1:
1157	cxfree(dev->ibdev.iwcm);
1158	return (ret);
1159}
1160
1161void iwch_unregister_device(struct iwch_dev *dev)
1162{
1163
1164	ib_unregister_device(&dev->ibdev);
1165	cxfree(dev->ibdev.iwcm);
1166	return;
1167}
1168#endif
1169