iw_cxgb_provider.c revision 237263
1178786Skmacy/**************************************************************************
2178786Skmacy
3178786SkmacyCopyright (c) 2007, Chelsio Inc.
4178786SkmacyAll rights reserved.
5178786Skmacy
6178786SkmacyRedistribution and use in source and binary forms, with or without
7178786Skmacymodification, are permitted provided that the following conditions are met:
8178786Skmacy
9178786Skmacy 1. Redistributions of source code must retain the above copyright notice,
10178786Skmacy    this list of conditions and the following disclaimer.
11178786Skmacy
12178786Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13178786Skmacy    contributors may be used to endorse or promote products derived from
14178786Skmacy    this software without specific prior written permission.
15178786Skmacy
16178786SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17178786SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18178786SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19178786SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20178786SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21178786SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22178786SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23178786SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24178786SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25178786SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26178786SkmacyPOSSIBILITY OF SUCH DAMAGE.
27178786Skmacy
28178786Skmacy***************************************************************************/
29178786Skmacy#include <sys/cdefs.h>
30178786Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 237263 2012-06-19 07:34:13Z np $");
31178786Skmacy
32237263Snp#include "opt_inet.h"
33237263Snp
34237263Snp#ifdef TCP_OFFLOAD
35178786Skmacy#include <sys/param.h>
36178786Skmacy#include <sys/systm.h>
37178786Skmacy#include <sys/kernel.h>
38178786Skmacy#include <sys/bus.h>
39178786Skmacy#include <sys/pciio.h>
40178786Skmacy#include <sys/conf.h>
41178786Skmacy#include <machine/bus.h>
42178786Skmacy#include <machine/resource.h>
43178786Skmacy#include <sys/bus_dma.h>
44178786Skmacy#include <sys/rman.h>
45178786Skmacy#include <sys/ioccom.h>
46178786Skmacy#include <sys/mbuf.h>
47178786Skmacy#include <sys/mutex.h>
48178786Skmacy#include <sys/rwlock.h>
49178786Skmacy#include <sys/linker.h>
50178786Skmacy#include <sys/firmware.h>
51178786Skmacy#include <sys/socket.h>
52178786Skmacy#include <sys/sockio.h>
53178786Skmacy#include <sys/smp.h>
54178786Skmacy#include <sys/sysctl.h>
55178786Skmacy#include <sys/syslog.h>
56178786Skmacy#include <sys/queue.h>
57178786Skmacy#include <sys/taskqueue.h>
58178786Skmacy#include <sys/proc.h>
59178786Skmacy#include <sys/queue.h>
60178786Skmacy
61178786Skmacy#include <netinet/in.h>
62178786Skmacy
63178786Skmacy
64178786Skmacy#include <vm/vm.h>
65178786Skmacy#include <vm/pmap.h>
66178786Skmacy
67237263Snp#include <rdma/ib_verbs.h>
68237263Snp#include <rdma/ib_umem.h>
69237263Snp#include <rdma/ib_user_verbs.h>
70237263Snp#include <linux/idr.h>
71237263Snp#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72178786Skmacy
73237263Snp
74178786Skmacy#include <cxgb_include.h>
75178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79178786Skmacy#include <ulp/iw_cxgb/iw_cxgb.h>
80178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_user.h>
82178786Skmacy
83178786Skmacystatic int
84178786Skmacyiwch_modify_port(struct ib_device *ibdev,
85178786Skmacy			    u8 port, int port_modify_mask,
86178786Skmacy			    struct ib_port_modify *props)
87178786Skmacy{
88178786Skmacy	return (-ENOSYS);
89178786Skmacy}
90178786Skmacy
91178786Skmacystatic struct ib_ah *
92178786Skmacyiwch_ah_create(struct ib_pd *pd,
93178786Skmacy				    struct ib_ah_attr *ah_attr)
94178786Skmacy{
95178786Skmacy	return ERR_PTR(-ENOSYS);
96178786Skmacy}
97178786Skmacy
98178786Skmacystatic int
99178786Skmacyiwch_ah_destroy(struct ib_ah *ah)
100178786Skmacy{
101178786Skmacy	return (-ENOSYS);
102178786Skmacy}
103178786Skmacy
104178786Skmacystatic int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105178786Skmacy{
106178786Skmacy	return (-ENOSYS);
107178786Skmacy}
108178786Skmacy
109178786Skmacystatic int
110178786Skmacyiwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111178786Skmacy{
112178786Skmacy	return (-ENOSYS);
113178786Skmacy}
114178786Skmacy
115178786Skmacystatic int
116178786Skmacyiwch_process_mad(struct ib_device *ibdev,
117178786Skmacy			    int mad_flags,
118178786Skmacy			    u8 port_num,
119178786Skmacy			    struct ib_wc *in_wc,
120178786Skmacy			    struct ib_grh *in_grh,
121178786Skmacy			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122178786Skmacy{
123178786Skmacy	return (-ENOSYS);
124178786Skmacy}
125178786Skmacy
126178786Skmacystatic int
127178786Skmacyiwch_dealloc_ucontext(struct ib_ucontext *context)
128178786Skmacy{
129178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(context->device);
130178786Skmacy	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131178786Skmacy	struct iwch_mm_entry *mm, *tmp;
132178786Skmacy
133178786Skmacy	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134178786Skmacy	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135178786Skmacy		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136178786Skmacy		cxfree(mm);
137178786Skmacy	}
138178786Skmacy	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139178786Skmacy	cxfree(ucontext);
140178786Skmacy	return 0;
141178786Skmacy}
142178786Skmacy
143178786Skmacystatic struct ib_ucontext *
144178786Skmacyiwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145178786Skmacy{
146178786Skmacy	struct iwch_ucontext *context;
147178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148178786Skmacy
149178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150178786Skmacy	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151178786Skmacy	if (!context)
152178786Skmacy		return ERR_PTR(-ENOMEM);
153178786Skmacy	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154178786Skmacy	TAILQ_INIT(&context->mmaps);
155178786Skmacy	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156178786Skmacy	return &context->ibucontext;
157178786Skmacy}
158178786Skmacy
159178786Skmacystatic int
160178786Skmacyiwch_destroy_cq(struct ib_cq *ib_cq)
161178786Skmacy{
162178786Skmacy	struct iwch_cq *chp;
163178786Skmacy
164178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165178786Skmacy	chp = to_iwch_cq(ib_cq);
166178786Skmacy
167178786Skmacy	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168178786Skmacy	mtx_lock(&chp->lock);
169178786Skmacy	if (--chp->refcnt)
170178786Skmacy		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171178786Skmacy	mtx_unlock(&chp->lock);
172178786Skmacy
173178786Skmacy	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174178786Skmacy	cxfree(chp);
175178786Skmacy	return 0;
176178786Skmacy}
177178786Skmacy
178178786Skmacystatic struct ib_cq *
179178786Skmacyiwch_create_cq(struct ib_device *ibdev, int entries, int vector,
180178786Skmacy			     struct ib_ucontext *ib_context,
181178786Skmacy			     struct ib_udata *udata)
182178786Skmacy{
183178786Skmacy	struct iwch_dev *rhp;
184178786Skmacy	struct iwch_cq *chp;
185178786Skmacy	struct iwch_create_cq_resp uresp;
186178786Skmacy	struct iwch_create_cq_req ureq;
187178786Skmacy	struct iwch_ucontext *ucontext = NULL;
188237263Snp	static int warned;
189237263Snp	size_t resplen;
190178786Skmacy
191178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
192178786Skmacy	rhp = to_iwch_dev(ibdev);
193178786Skmacy	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
194178786Skmacy	if (!chp) {
195178786Skmacy		return ERR_PTR(-ENOMEM);
196178786Skmacy	}
197178786Skmacy	if (ib_context) {
198178786Skmacy		ucontext = to_iwch_ucontext(ib_context);
199178786Skmacy		if (!t3a_device(rhp)) {
200178786Skmacy			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
201178786Skmacy				cxfree(chp);
202178786Skmacy				return ERR_PTR(-EFAULT);
203178786Skmacy			}
204178786Skmacy			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
205178786Skmacy		}
206178786Skmacy	}
207178786Skmacy
208178786Skmacy	if (t3a_device(rhp)) {
209178786Skmacy
210178786Skmacy		/*
211178786Skmacy		 * T3A: Add some fluff to handle extra CQEs inserted
212178786Skmacy		 * for various errors.
213178786Skmacy		 * Additional CQE possibilities:
214178786Skmacy		 *      TERMINATE,
215178786Skmacy		 *      incoming RDMA WRITE Failures
216178786Skmacy		 *      incoming RDMA READ REQUEST FAILUREs
217178786Skmacy		 * NOTE: We cannot ensure the CQ won't overflow.
218178786Skmacy		 */
219178786Skmacy		entries += 16;
220178786Skmacy	}
221178786Skmacy	entries = roundup_pow_of_two(entries);
222178786Skmacy	chp->cq.size_log2 = ilog2(entries);
223178786Skmacy
224237263Snp	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
225178786Skmacy		cxfree(chp);
226178786Skmacy		return ERR_PTR(-ENOMEM);
227178786Skmacy	}
228178786Skmacy	chp->rhp = rhp;
229178786Skmacy	chp->ibcq.cqe = 1 << chp->cq.size_log2;
230178786Skmacy	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
231178786Skmacy	chp->refcnt = 1;
232237263Snp	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
233237263Snp		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
234237263Snp		cxfree(chp);
235237263Snp		return ERR_PTR(-ENOMEM);
236237263Snp	}
237178786Skmacy
238178786Skmacy	if (ucontext) {
239178786Skmacy		struct iwch_mm_entry *mm;
240178786Skmacy
241178786Skmacy		mm = kmalloc(sizeof *mm, M_NOWAIT);
242178786Skmacy		if (!mm) {
243178786Skmacy			iwch_destroy_cq(&chp->ibcq);
244178786Skmacy			return ERR_PTR(-ENOMEM);
245178786Skmacy		}
246178786Skmacy		uresp.cqid = chp->cq.cqid;
247178786Skmacy		uresp.size_log2 = chp->cq.size_log2;
248178786Skmacy		mtx_lock(&ucontext->mmap_lock);
249178786Skmacy		uresp.key = ucontext->key;
250178786Skmacy		ucontext->key += PAGE_SIZE;
251178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
252237263Snp		mm->key = uresp.key;
253237263Snp		mm->addr = vtophys(chp->cq.queue);
254237263Snp               	if (udata->outlen < sizeof uresp) {
255237263Snp                	if (!warned++)
256237263Snp                        	CTR1(KTR_IW_CXGB, "%s Warning - "
257237263Snp                                	"downlevel libcxgb3 (non-fatal).\n",
258237263Snp					__func__);
259237263Snp                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
260237263Snp                       				sizeof(struct t3_cqe));
261237263Snp                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
262237263Snp               	} else {
263237263Snp                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
264237263Snp                        			sizeof(struct t3_cqe));
265237263Snp                       	uresp.memsize = mm->len;
266237263Snp                      	resplen = sizeof uresp;
267237263Snp               	}
268237263Snp              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
269178786Skmacy			cxfree(mm);
270178786Skmacy			iwch_destroy_cq(&chp->ibcq);
271178786Skmacy			return ERR_PTR(-EFAULT);
272178786Skmacy		}
273178786Skmacy		insert_mmap(ucontext, mm);
274178786Skmacy	}
275178786Skmacy	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
276178786Skmacy	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
277178786Skmacy	     (unsigned long long) chp->cq.dma_addr);
278178786Skmacy	return &chp->ibcq;
279178786Skmacy}
280178786Skmacy
281178786Skmacystatic int
282237263Snpiwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
283237263Snp    struct ib_udata *udata __unused)
284178786Skmacy{
285178786Skmacy
286178786Skmacy	return (-ENOSYS);
287178786Skmacy}
288178786Skmacy
289178786Skmacystatic int
290178786Skmacyiwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
291178786Skmacy{
292178786Skmacy	struct iwch_dev *rhp;
293178786Skmacy	struct iwch_cq *chp;
294178786Skmacy	enum t3_cq_opcode cq_op;
295178786Skmacy	int err;
296178786Skmacy	u32 rptr;
297178786Skmacy
298178786Skmacy	chp = to_iwch_cq(ibcq);
299178786Skmacy	rhp = chp->rhp;
300178786Skmacy	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
301178786Skmacy		cq_op = CQ_ARM_SE;
302178786Skmacy	else
303178786Skmacy		cq_op = CQ_ARM_AN;
304178786Skmacy	if (chp->user_rptr_addr) {
305178786Skmacy		if (copyin(&rptr, chp->user_rptr_addr, 4))
306178786Skmacy			return (-EFAULT);
307178786Skmacy		mtx_lock(&chp->lock);
308178786Skmacy		chp->cq.rptr = rptr;
309178786Skmacy	} else
310178786Skmacy		mtx_lock(&chp->lock);
311178786Skmacy	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
312178786Skmacy	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
313178786Skmacy	mtx_unlock(&chp->lock);
314178786Skmacy	if (err < 0)
315178786Skmacy		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
316178786Skmacy		       chp->cq.cqid);
317178786Skmacy	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
318178786Skmacy		err = 0;
319178786Skmacy	return err;
320178786Skmacy}
321178786Skmacy
322178786Skmacystatic int
323237263Snpiwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
324178786Skmacy{
325178786Skmacy
326237263Snp	return (-ENOSYS);
327178786Skmacy}
328178786Skmacy
329178786Skmacystatic int iwch_deallocate_pd(struct ib_pd *pd)
330178786Skmacy{
331178786Skmacy	struct iwch_dev *rhp;
332178786Skmacy	struct iwch_pd *php;
333178786Skmacy
334178786Skmacy	php = to_iwch_pd(pd);
335178786Skmacy	rhp = php->rhp;
336178786Skmacy	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
337178786Skmacy	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
338178786Skmacy	cxfree(php);
339178786Skmacy	return 0;
340178786Skmacy}
341178786Skmacy
342178786Skmacystatic struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
343178786Skmacy			       struct ib_ucontext *context,
344178786Skmacy			       struct ib_udata *udata)
345178786Skmacy{
346178786Skmacy	struct iwch_pd *php;
347178786Skmacy	u32 pdid;
348178786Skmacy	struct iwch_dev *rhp;
349178786Skmacy
350178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
351178786Skmacy	rhp = (struct iwch_dev *) ibdev;
352178786Skmacy	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
353178786Skmacy	if (!pdid)
354178786Skmacy		return ERR_PTR(-EINVAL);
355178786Skmacy	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
356178786Skmacy	if (!php) {
357178786Skmacy		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
358178786Skmacy		return ERR_PTR(-ENOMEM);
359178786Skmacy	}
360178786Skmacy	php->pdid = pdid;
361178786Skmacy	php->rhp = rhp;
362178786Skmacy	if (context) {
363178786Skmacy		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
364178786Skmacy			iwch_deallocate_pd(&php->ibpd);
365178786Skmacy			return ERR_PTR(-EFAULT);
366178786Skmacy		}
367178786Skmacy	}
368178786Skmacy	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
369178786Skmacy	return &php->ibpd;
370178786Skmacy}
371178786Skmacy
372178786Skmacystatic int iwch_dereg_mr(struct ib_mr *ib_mr)
373178786Skmacy{
374178786Skmacy	struct iwch_dev *rhp;
375178786Skmacy	struct iwch_mr *mhp;
376178786Skmacy	u32 mmid;
377178786Skmacy
378178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
379178786Skmacy	/* There can be no memory windows */
380237263Snp	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
381178786Skmacy		return (-EINVAL);
382178786Skmacy
383178786Skmacy	mhp = to_iwch_mr(ib_mr);
384178786Skmacy	rhp = mhp->rhp;
385178786Skmacy	mmid = mhp->attr.stag >> 8;
386178786Skmacy	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
387178786Skmacy		       mhp->attr.pbl_addr);
388237263Snp	iwch_free_pbl(mhp);
389178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
390178786Skmacy	if (mhp->kva)
391178786Skmacy		cxfree((void *) (unsigned long) mhp->kva);
392178786Skmacy	if (mhp->umem)
393178786Skmacy		ib_umem_release(mhp->umem);
394178786Skmacy	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
395178786Skmacy	cxfree(mhp);
396178786Skmacy	return 0;
397178786Skmacy}
398178786Skmacy
399178786Skmacystatic struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
400178786Skmacy					struct ib_phys_buf *buffer_list,
401178786Skmacy					int num_phys_buf,
402178786Skmacy					int acc,
403178786Skmacy					u64 *iova_start)
404178786Skmacy{
405178786Skmacy	__be64 *page_list;
406178786Skmacy	int shift;
407178786Skmacy	u64 total_size;
408178786Skmacy	int npages;
409178786Skmacy	struct iwch_dev *rhp;
410178786Skmacy	struct iwch_pd *php;
411178786Skmacy	struct iwch_mr *mhp;
412178786Skmacy	int ret;
413178786Skmacy
414178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
415178786Skmacy	php = to_iwch_pd(pd);
416178786Skmacy	rhp = php->rhp;
417178786Skmacy
418178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
419178786Skmacy	if (!mhp)
420178786Skmacy		return ERR_PTR(-ENOMEM);
421178786Skmacy
422237263Snp	mhp->rhp = rhp;
423237263Snp
424178786Skmacy	/* First check that we have enough alignment */
425178786Skmacy	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
426178786Skmacy		ret = -EINVAL;
427178786Skmacy		goto err;
428178786Skmacy	}
429178786Skmacy
430178786Skmacy	if (num_phys_buf > 1 &&
431178786Skmacy	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
432178786Skmacy		ret = -EINVAL;
433178786Skmacy		goto err;
434178786Skmacy	}
435178786Skmacy
436178786Skmacy	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
437178786Skmacy				   &total_size, &npages, &shift, &page_list);
438178786Skmacy	if (ret)
439178786Skmacy		goto err;
440178786Skmacy
441237263Snp	ret = iwch_alloc_pbl(mhp, npages);
442237263Snp	if (ret) {
443237263Snp		cxfree(page_list);
444237263Snp		goto err_pbl;
445237263Snp	}
446237263Snp
447237263Snp	ret = iwch_write_pbl(mhp, page_list, npages, 0);
448237263Snp	cxfree(page_list);
449237263Snp	if (ret)
450237263Snp		goto err;
451237263Snp
452178786Skmacy	mhp->attr.pdid = php->pdid;
453178786Skmacy	mhp->attr.zbva = 0;
454178786Skmacy
455178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
456178786Skmacy	mhp->attr.va_fbo = *iova_start;
457178786Skmacy	mhp->attr.page_size = shift - 12;
458178786Skmacy
459178786Skmacy	mhp->attr.len = (u32) total_size;
460178786Skmacy	mhp->attr.pbl_size = npages;
461237263Snp	ret = iwch_register_mem(rhp, php, mhp, shift);
462237263Snp	if (ret)
463237263Snp		goto err_pbl;
464237263Snp
465178786Skmacy	return &mhp->ibmr;
466237263Snp
467237263Snperr_pbl:
468237263Snp	iwch_free_pbl(mhp);
469237263Snp
470178786Skmacyerr:
471178786Skmacy	cxfree(mhp);
472237263Snp	return ERR_PTR(ret);
473178786Skmacy
474178786Skmacy}
475178786Skmacy
476178786Skmacystatic int iwch_reregister_phys_mem(struct ib_mr *mr,
477178786Skmacy				     int mr_rereg_mask,
478178786Skmacy				     struct ib_pd *pd,
479178786Skmacy	                             struct ib_phys_buf *buffer_list,
480178786Skmacy	                             int num_phys_buf,
481178786Skmacy	                             int acc, u64 * iova_start)
482178786Skmacy{
483178786Skmacy
484178786Skmacy	struct iwch_mr mh, *mhp;
485178786Skmacy	struct iwch_pd *php;
486178786Skmacy	struct iwch_dev *rhp;
487178786Skmacy	__be64 *page_list = NULL;
488178786Skmacy	int shift = 0;
489178786Skmacy	u64 total_size;
490178786Skmacy	int npages;
491178786Skmacy	int ret;
492178786Skmacy
493178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
494178786Skmacy
495178786Skmacy	/* There can be no memory windows */
496237263Snp	if (atomic_load_acq_int(&mr->usecnt.counter))
497178786Skmacy		return (-EINVAL);
498178786Skmacy
499178786Skmacy	mhp = to_iwch_mr(mr);
500178786Skmacy	rhp = mhp->rhp;
501178786Skmacy	php = to_iwch_pd(mr->pd);
502178786Skmacy
503178786Skmacy	/* make sure we are on the same adapter */
504178786Skmacy	if (rhp != php->rhp)
505178786Skmacy		return (-EINVAL);
506178786Skmacy
507178786Skmacy	memcpy(&mh, mhp, sizeof *mhp);
508178786Skmacy
509178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
510178786Skmacy		php = to_iwch_pd(pd);
511178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
512178786Skmacy		mh.attr.perms = iwch_ib_to_tpt_access(acc);
513178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
514178786Skmacy		ret = build_phys_page_list(buffer_list, num_phys_buf,
515178786Skmacy					   iova_start,
516178786Skmacy					   &total_size, &npages,
517178786Skmacy					   &shift, &page_list);
518178786Skmacy		if (ret)
519178786Skmacy			return ret;
520178786Skmacy	}
521178786Skmacy
522237263Snp	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
523178786Skmacy	cxfree(page_list);
524178786Skmacy	if (ret) {
525178786Skmacy		return ret;
526178786Skmacy	}
527178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
528178786Skmacy		mhp->attr.pdid = php->pdid;
529178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
530178786Skmacy		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
531178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
532178786Skmacy		mhp->attr.zbva = 0;
533178786Skmacy		mhp->attr.va_fbo = *iova_start;
534178786Skmacy		mhp->attr.page_size = shift - 12;
535178786Skmacy		mhp->attr.len = (u32) total_size;
536178786Skmacy		mhp->attr.pbl_size = npages;
537178786Skmacy	}
538178786Skmacy
539178786Skmacy	return 0;
540178786Skmacy}
541178786Skmacy
542178786Skmacy
543178786Skmacystatic struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
544178786Skmacy				      u64 virt, int acc, struct ib_udata *udata)
545178786Skmacy{
546178786Skmacy	__be64 *pages;
547178786Skmacy	int shift, i, n;
548178786Skmacy	int err = 0;
549178786Skmacy	struct ib_umem_chunk *chunk;
550178786Skmacy	struct iwch_dev *rhp;
551178786Skmacy	struct iwch_pd *php;
552178786Skmacy	struct iwch_mr *mhp;
553178786Skmacy	struct iwch_reg_user_mr_resp uresp;
554178786Skmacy#ifdef notyet
555178786Skmacy	int j, k, len;
556178786Skmacy#endif
557178786Skmacy
558178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
559178786Skmacy
560178786Skmacy	php = to_iwch_pd(pd);
561178786Skmacy	rhp = php->rhp;
562178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
563178786Skmacy	if (!mhp)
564178786Skmacy		return ERR_PTR(-ENOMEM);
565178786Skmacy
566237263Snp	mhp->rhp = rhp;
567237263Snp
568237263Snp	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569178786Skmacy	if (IS_ERR(mhp->umem)) {
570178786Skmacy		err = PTR_ERR(mhp->umem);
571178786Skmacy		cxfree(mhp);
572178786Skmacy		return ERR_PTR(-err);
573178786Skmacy	}
574178786Skmacy
575178786Skmacy	shift = ffs(mhp->umem->page_size) - 1;
576178786Skmacy
577178786Skmacy	n = 0;
578237263Snp	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
579178786Skmacy		n += chunk->nents;
580178786Skmacy
581237263Snp	err = iwch_alloc_pbl(mhp, n);
582237263Snp	if (err)
583237263Snp		goto err;
584237263Snp
585237263Snp	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
586178786Skmacy	if (!pages) {
587178786Skmacy		err = -ENOMEM;
588237263Snp		goto err_pbl;
589178786Skmacy	}
590178786Skmacy
591178786Skmacy	i = n = 0;
592178786Skmacy
593237263Snp#ifdef notyet
594178786Skmacy	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
595178786Skmacy		for (j = 0; j < chunk->nmap; ++j) {
596178786Skmacy			len = sg_dma_len(&chunk->page_list[j]) >> shift;
597178786Skmacy			for (k = 0; k < len; ++k) {
598178786Skmacy				pages[i++] = htobe64(sg_dma_address(
599178786Skmacy					&chunk->page_list[j]) +
600178786Skmacy					mhp->umem->page_size * k);
601237263Snp				if (i == PAGE_SIZE / sizeof *pages) {
602237263Snp					err = iwch_write_pbl(mhp, pages, i, n);
603237263Snp					if (err)
604237263Snp						goto pbl_done;
605237263Snp					n += i;
606237263Snp					i = 0;
607237263Snp				}
608178786Skmacy			}
609178786Skmacy		}
610178786Skmacy#endif
611237263Snp
612237263Snp	if (i)
613237263Snp		err = iwch_write_pbl(mhp, pages, i, n);
614237263Snp#ifdef notyet
615237263Snppbl_done:
616237263Snp#endif
617237263Snp	cxfree(pages);
618237263Snp	if (err)
619237263Snp		goto err_pbl;
620237263Snp
621178786Skmacy	mhp->attr.pdid = php->pdid;
622178786Skmacy	mhp->attr.zbva = 0;
623178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
624178786Skmacy	mhp->attr.va_fbo = virt;
625178786Skmacy	mhp->attr.page_size = shift - 12;
626178786Skmacy	mhp->attr.len = (u32) length;
627237263Snp
628237263Snp	err = iwch_register_mem(rhp, php, mhp, shift);
629178786Skmacy	if (err)
630237263Snp		goto err_pbl;
631178786Skmacy
632178786Skmacy	if (udata && !t3a_device(rhp)) {
633178786Skmacy		uresp.pbl_addr = (mhp->attr.pbl_addr -
634178786Skmacy	                         rhp->rdev.rnic_info.pbl_base) >> 3;
635178786Skmacy		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
636178786Skmacy		     uresp.pbl_addr);
637178786Skmacy
638178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
639178786Skmacy			iwch_dereg_mr(&mhp->ibmr);
640178786Skmacy			err = EFAULT;
641178786Skmacy			goto err;
642178786Skmacy		}
643178786Skmacy	}
644178786Skmacy
645178786Skmacy	return &mhp->ibmr;
646178786Skmacy
647237263Snperr_pbl:
648237263Snp	iwch_free_pbl(mhp);
649237263Snp
650178786Skmacyerr:
651178786Skmacy	ib_umem_release(mhp->umem);
652178786Skmacy	cxfree(mhp);
653178786Skmacy	return ERR_PTR(-err);
654178786Skmacy}
655178786Skmacy
656178786Skmacystatic struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
657178786Skmacy{
658178786Skmacy	struct ib_phys_buf bl;
659178786Skmacy	u64 kva;
660178786Skmacy	struct ib_mr *ibmr;
661178786Skmacy
662178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
663178786Skmacy
664178786Skmacy	/*
665178786Skmacy	 * T3 only supports 32 bits of size.
666178786Skmacy	 */
667178786Skmacy	bl.size = 0xffffffff;
668178786Skmacy	bl.addr = 0;
669178786Skmacy	kva = 0;
670178786Skmacy	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
671178786Skmacy	return ibmr;
672178786Skmacy}
673178786Skmacy
674178786Skmacystatic struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
675178786Skmacy{
676178786Skmacy	struct iwch_dev *rhp;
677178786Skmacy	struct iwch_pd *php;
678178786Skmacy	struct iwch_mw *mhp;
679178786Skmacy	u32 mmid;
680178786Skmacy	u32 stag = 0;
681178786Skmacy	int ret;
682178786Skmacy
683178786Skmacy	php = to_iwch_pd(pd);
684178786Skmacy	rhp = php->rhp;
685178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
686178786Skmacy	if (!mhp)
687178786Skmacy		return ERR_PTR(-ENOMEM);
688178786Skmacy	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
689178786Skmacy	if (ret) {
690178786Skmacy		cxfree(mhp);
691178786Skmacy		return ERR_PTR(-ret);
692178786Skmacy	}
693178786Skmacy	mhp->rhp = rhp;
694178786Skmacy	mhp->attr.pdid = php->pdid;
695178786Skmacy	mhp->attr.type = TPT_MW;
696178786Skmacy	mhp->attr.stag = stag;
697178786Skmacy	mmid = (stag) >> 8;
698237263Snp	mhp->ibmw.rkey = stag;
699237263Snp	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
700237263Snp		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
701237263Snp		cxfree(mhp);
702237263Snp		return ERR_PTR(-ENOMEM);
703237263Snp	}
704178786Skmacy	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
705178786Skmacy	return &(mhp->ibmw);
706178786Skmacy}
707178786Skmacy
708178786Skmacystatic int iwch_dealloc_mw(struct ib_mw *mw)
709178786Skmacy{
710178786Skmacy	struct iwch_dev *rhp;
711178786Skmacy	struct iwch_mw *mhp;
712178786Skmacy	u32 mmid;
713178786Skmacy
714178786Skmacy	mhp = to_iwch_mw(mw);
715178786Skmacy	rhp = mhp->rhp;
716178786Skmacy	mmid = (mw->rkey) >> 8;
717178786Skmacy	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
718178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
719178786Skmacy	cxfree(mhp);
720178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
721178786Skmacy	return 0;
722178786Skmacy}
723178786Skmacy
724178786Skmacystatic int iwch_destroy_qp(struct ib_qp *ib_qp)
725178786Skmacy{
726178786Skmacy	struct iwch_dev *rhp;
727178786Skmacy	struct iwch_qp *qhp;
728178786Skmacy	struct iwch_qp_attributes attrs;
729178786Skmacy	struct iwch_ucontext *ucontext;
730178786Skmacy
731178786Skmacy	qhp = to_iwch_qp(ib_qp);
732178786Skmacy	rhp = qhp->rhp;
733178786Skmacy
734178786Skmacy	attrs.next_state = IWCH_QP_STATE_ERROR;
735178786Skmacy	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
736178786Skmacy	mtx_lock(&qhp->lock);
737178786Skmacy	if (qhp->ep)
738178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
739178786Skmacy	mtx_unlock(&qhp->lock);
740178786Skmacy
741178786Skmacy	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
742178786Skmacy
743178786Skmacy	mtx_lock(&qhp->lock);
744178786Skmacy	if (--qhp->refcnt)
745178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
746178786Skmacy	mtx_unlock(&qhp->lock);
747178786Skmacy
748178786Skmacy	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
749178786Skmacy				  : NULL;
750178786Skmacy	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
751178786Skmacy			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
752178786Skmacy
753178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
754178786Skmacy	     ib_qp, qhp->wq.qpid, qhp);
755178786Skmacy	cxfree(qhp);
756178786Skmacy	return 0;
757178786Skmacy}
758178786Skmacy
759178786Skmacystatic struct ib_qp *iwch_create_qp(struct ib_pd *pd,
760178786Skmacy			     struct ib_qp_init_attr *attrs,
761178786Skmacy			     struct ib_udata *udata)
762178786Skmacy{
763178786Skmacy	struct iwch_dev *rhp;
764178786Skmacy	struct iwch_qp *qhp;
765178786Skmacy	struct iwch_pd *php;
766178786Skmacy	struct iwch_cq *schp;
767178786Skmacy	struct iwch_cq *rchp;
768178786Skmacy	struct iwch_create_qp_resp uresp;
769178786Skmacy	int wqsize, sqsize, rqsize;
770178786Skmacy	struct iwch_ucontext *ucontext;
771178786Skmacy
772178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
773178786Skmacy	if (attrs->qp_type != IB_QPT_RC)
774178786Skmacy		return ERR_PTR(-EINVAL);
775178786Skmacy	php = to_iwch_pd(pd);
776178786Skmacy	rhp = php->rhp;
777178786Skmacy	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
778178786Skmacy	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
779178786Skmacy	if (!schp || !rchp)
780178786Skmacy		return ERR_PTR(-EINVAL);
781178786Skmacy
782178786Skmacy	/* The RQT size must be # of entries + 1 rounded up to a power of two */
783178786Skmacy	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
784178786Skmacy	if (rqsize == attrs->cap.max_recv_wr)
785178786Skmacy		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
786178786Skmacy
787178786Skmacy	/* T3 doesn't support RQT depth < 16 */
788178786Skmacy	if (rqsize < 16)
789178786Skmacy		rqsize = 16;
790178786Skmacy
791178786Skmacy	if (rqsize > T3_MAX_RQ_SIZE)
792178786Skmacy		return ERR_PTR(-EINVAL);
793178786Skmacy
794178786Skmacy	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
795178786Skmacy		return ERR_PTR(-EINVAL);
796178786Skmacy
797178786Skmacy	/*
798178786Skmacy	 * NOTE: The SQ and total WQ sizes don't need to be
799178786Skmacy	 * a power of two.  However, all the code assumes
800178786Skmacy	 * they are. EG: Q_FREECNT() and friends.
801178786Skmacy	 */
802178786Skmacy	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
803178786Skmacy	wqsize = roundup_pow_of_two(rqsize + sqsize);
804178786Skmacy	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
805178786Skmacy	     wqsize, sqsize, rqsize);
806178786Skmacy	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
807178786Skmacy	if (!qhp)
808178786Skmacy		return ERR_PTR(-ENOMEM);
809178786Skmacy	qhp->wq.size_log2 = ilog2(wqsize);
810178786Skmacy	qhp->wq.rq_size_log2 = ilog2(rqsize);
811178786Skmacy	qhp->wq.sq_size_log2 = ilog2(sqsize);
812178786Skmacy	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
813178786Skmacy	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
814178786Skmacy			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
815178786Skmacy		cxfree(qhp);
816178786Skmacy		return ERR_PTR(-ENOMEM);
817178786Skmacy	}
818178786Skmacy
819178786Skmacy	attrs->cap.max_recv_wr = rqsize - 1;
820178786Skmacy	attrs->cap.max_send_wr = sqsize;
821178786Skmacy	attrs->cap.max_inline_data = T3_MAX_INLINE;
822178786Skmacy
823178786Skmacy	qhp->rhp = rhp;
824178786Skmacy	qhp->attr.pd = php->pdid;
825178786Skmacy	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
826178786Skmacy	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
827178786Skmacy	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
828178786Skmacy	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
829178786Skmacy	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
830178786Skmacy	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
831178786Skmacy	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
832178786Skmacy	qhp->attr.state = IWCH_QP_STATE_IDLE;
833178786Skmacy	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
834178786Skmacy
835178786Skmacy	/*
836178786Skmacy	 * XXX - These don't get passed in from the openib user
837178786Skmacy	 * at create time.  The CM sets them via a QP modify.
838178786Skmacy	 * Need to fix...  I think the CM should
839178786Skmacy	 */
840178786Skmacy	qhp->attr.enable_rdma_read = 1;
841178786Skmacy	qhp->attr.enable_rdma_write = 1;
842178786Skmacy	qhp->attr.enable_bind = 1;
843178786Skmacy	qhp->attr.max_ord = 1;
844178786Skmacy	qhp->attr.max_ird = 1;
845178786Skmacy
846178786Skmacy	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
847178786Skmacy	qhp->refcnt = 1;
848178786Skmacy
849237263Snp	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
850237263Snp		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
851237263Snp			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
852237263Snp		cxfree(qhp);
853237263Snp		return ERR_PTR(-ENOMEM);
854237263Snp	}
855237263Snp
856178786Skmacy	if (udata) {
857178786Skmacy
858178786Skmacy		struct iwch_mm_entry *mm1, *mm2;
859178786Skmacy
860178786Skmacy		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
861178786Skmacy		if (!mm1) {
862178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
863178786Skmacy			return ERR_PTR(-ENOMEM);
864178786Skmacy		}
865178786Skmacy
866178786Skmacy		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
867178786Skmacy		if (!mm2) {
868178786Skmacy			cxfree(mm1);
869178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
870178786Skmacy			return ERR_PTR(-ENOMEM);
871178786Skmacy		}
872178786Skmacy
873178786Skmacy		uresp.qpid = qhp->wq.qpid;
874178786Skmacy		uresp.size_log2 = qhp->wq.size_log2;
875178786Skmacy		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
876178786Skmacy		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
877178786Skmacy		mtx_lock(&ucontext->mmap_lock);
878178786Skmacy		uresp.key = ucontext->key;
879178786Skmacy		ucontext->key += PAGE_SIZE;
880178786Skmacy		uresp.db_key = ucontext->key;
881178786Skmacy		ucontext->key += PAGE_SIZE;
882178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
883178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
884178786Skmacy			cxfree(mm1);
885178786Skmacy			cxfree(mm2);
886178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
887178786Skmacy			return ERR_PTR(-EFAULT);
888178786Skmacy		}
889178786Skmacy		mm1->key = uresp.key;
890178786Skmacy		mm1->addr = vtophys(qhp->wq.queue);
891178786Skmacy		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
892178786Skmacy		insert_mmap(ucontext, mm1);
893178786Skmacy		mm2->key = uresp.db_key;
894178786Skmacy		mm2->addr = qhp->wq.udb & PAGE_MASK;
895178786Skmacy		mm2->len = PAGE_SIZE;
896178786Skmacy		insert_mmap(ucontext, mm2);
897178786Skmacy	}
898178786Skmacy	qhp->ibqp.qp_num = qhp->wq.qpid;
899178786Skmacy	callout_init(&(qhp->timer), TRUE);
900178786Skmacy	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
901178786Skmacy	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
902178786Skmacy	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
903178786Skmacy	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
904178786Skmacy	     1 << qhp->wq.size_log2);
905178786Skmacy	return &qhp->ibqp;
906178786Skmacy}
907178786Skmacy
908178786Skmacystatic int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
909178786Skmacy		      int attr_mask, struct ib_udata *udata)
910178786Skmacy{
911178786Skmacy	struct iwch_dev *rhp;
912178786Skmacy	struct iwch_qp *qhp;
913178786Skmacy	enum iwch_qp_attr_mask mask = 0;
914178786Skmacy	struct iwch_qp_attributes attrs;
915178786Skmacy
916178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
917178786Skmacy
918178786Skmacy	/* iwarp does not support the RTR state */
919178786Skmacy	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
920178786Skmacy		attr_mask &= ~IB_QP_STATE;
921178786Skmacy
922178786Skmacy	/* Make sure we still have something left to do */
923178786Skmacy	if (!attr_mask)
924178786Skmacy		return 0;
925178786Skmacy
926178786Skmacy	memset(&attrs, 0, sizeof attrs);
927178786Skmacy	qhp = to_iwch_qp(ibqp);
928178786Skmacy	rhp = qhp->rhp;
929178786Skmacy
930178786Skmacy	attrs.next_state = iwch_convert_state(attr->qp_state);
931178786Skmacy	attrs.enable_rdma_read = (attr->qp_access_flags &
932178786Skmacy			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
933178786Skmacy	attrs.enable_rdma_write = (attr->qp_access_flags &
934178786Skmacy				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
935178786Skmacy	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
936178786Skmacy
937178786Skmacy
938178786Skmacy	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
939178786Skmacy	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
940178786Skmacy			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
941178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
942178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
943178786Skmacy
944178786Skmacy	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
945178786Skmacy}
946178786Skmacy
947178786Skmacyvoid iwch_qp_add_ref(struct ib_qp *qp)
948178786Skmacy{
949178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
950178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
951178786Skmacy	to_iwch_qp(qp)->refcnt++;
952178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
953178786Skmacy}
954178786Skmacy
955178786Skmacyvoid iwch_qp_rem_ref(struct ib_qp *qp)
956178786Skmacy{
957178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
958178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
959178786Skmacy	if (--to_iwch_qp(qp)->refcnt == 0)
960178786Skmacy	        wakeup(to_iwch_qp(qp));
961178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
962178786Skmacy}
963178786Skmacy
964178786Skmacystatic struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
965178786Skmacy{
966178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
967178786Skmacy	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
968178786Skmacy}
969178786Skmacy
970178786Skmacy
971178786Skmacystatic int iwch_query_pkey(struct ib_device *ibdev,
972178786Skmacy			   u8 port, u16 index, u16 * pkey)
973178786Skmacy{
974178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
975178786Skmacy	*pkey = 0;
976178786Skmacy	return 0;
977178786Skmacy}
978178786Skmacy
979178786Skmacystatic int iwch_query_gid(struct ib_device *ibdev, u8 port,
980178786Skmacy			  int index, union ib_gid *gid)
981178786Skmacy{
982178786Skmacy	struct iwch_dev *dev;
983178786Skmacy	struct port_info *pi;
984237263Snp	struct adapter *sc;
985178786Skmacy
986178786Skmacy	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
987178786Skmacy	       __FUNCTION__, ibdev, port, index, gid);
988178786Skmacy	dev = to_iwch_dev(ibdev);
989237263Snp	sc = dev->rdev.adap;
990178786Skmacy	PANIC_IF(port == 0 || port > 2);
991237263Snp	pi = &sc->port[port - 1];
992178786Skmacy	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
993178786Skmacy	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
994178786Skmacy	return 0;
995178786Skmacy}
996178786Skmacy
997178786Skmacystatic int iwch_query_device(struct ib_device *ibdev,
998178786Skmacy			     struct ib_device_attr *props)
999178786Skmacy{
1000237263Snp	struct iwch_dev *dev;
1001237263Snp	struct adapter *sc;
1002178786Skmacy
1003178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1004178786Skmacy
1005178786Skmacy	dev = to_iwch_dev(ibdev);
1006237263Snp	sc = dev->rdev.adap;
1007178786Skmacy	memset(props, 0, sizeof *props);
1008237263Snp	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1009178786Skmacy	props->device_cap_flags = dev->device_cap_flags;
1010237263Snp	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1011237263Snp	props->vendor_id = pci_get_vendor(sc->dev);
1012237263Snp	props->vendor_part_id = pci_get_device(sc->dev);
1013237263Snp	props->max_mr_size = dev->attr.max_mr_size;
1014178786Skmacy	props->max_qp = dev->attr.max_qps;
1015178786Skmacy	props->max_qp_wr = dev->attr.max_wrs;
1016178786Skmacy	props->max_sge = dev->attr.max_sge_per_wr;
1017178786Skmacy	props->max_sge_rd = 1;
1018178786Skmacy	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1019178786Skmacy	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1020178786Skmacy	props->max_cq = dev->attr.max_cqs;
1021178786Skmacy	props->max_cqe = dev->attr.max_cqes_per_cq;
1022178786Skmacy	props->max_mr = dev->attr.max_mem_regs;
1023178786Skmacy	props->max_pd = dev->attr.max_pds;
1024178786Skmacy	props->local_ca_ack_delay = 0;
1025178786Skmacy
1026178786Skmacy	return 0;
1027178786Skmacy}
1028178786Skmacy
1029178786Skmacystatic int iwch_query_port(struct ib_device *ibdev,
1030178786Skmacy			   u8 port, struct ib_port_attr *props)
1031178786Skmacy{
1032178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1033237263Snp	memset(props, 0, sizeof(struct ib_port_attr));
1034178786Skmacy	props->max_mtu = IB_MTU_4096;
1035237263Snp	props->active_mtu = IB_MTU_2048;
1036178786Skmacy	props->state = IB_PORT_ACTIVE;
1037178786Skmacy	props->port_cap_flags =
1038178786Skmacy	    IB_PORT_CM_SUP |
1039178786Skmacy	    IB_PORT_SNMP_TUNNEL_SUP |
1040178786Skmacy	    IB_PORT_REINIT_SUP |
1041178786Skmacy	    IB_PORT_DEVICE_MGMT_SUP |
1042178786Skmacy	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1043178786Skmacy	props->gid_tbl_len = 1;
1044178786Skmacy	props->pkey_tbl_len = 1;
1045178786Skmacy	props->active_width = 2;
1046178786Skmacy	props->active_speed = 2;
1047178786Skmacy	props->max_msg_sz = -1;
1048178786Skmacy
1049178786Skmacy	return 0;
1050178786Skmacy}
1051178786Skmacy
1052178786Skmacyint iwch_register_device(struct iwch_dev *dev)
1053178786Skmacy{
1054178786Skmacy	int ret;
1055237263Snp	struct adapter *sc = dev->rdev.adap;
1056237263Snp
1057178786Skmacy	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1058178786Skmacy	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1059178786Skmacy	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1060237263Snp	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1061178786Skmacy	dev->device_cap_flags =
1062237263Snp		(IB_DEVICE_LOCAL_DMA_LKEY |
1063237263Snp		 IB_DEVICE_MEM_WINDOW);
1064178786Skmacy
1065178786Skmacy	dev->ibdev.uverbs_cmd_mask =
1066178786Skmacy	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1067178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1068178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1069178786Skmacy	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1070178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1071178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1072178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1073178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1074178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1075178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1076178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1077178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1078178786Skmacy	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1079178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1080178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1081178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1082178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1083178786Skmacy	dev->ibdev.node_type = RDMA_NODE_RNIC;
1084178786Skmacy	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1085237263Snp	dev->ibdev.phys_port_cnt = sc->params.nports;
1086178786Skmacy	dev->ibdev.num_comp_vectors = 1;
1087237263Snp	dev->ibdev.dma_device = dev->rdev.adap->dev;
1088178786Skmacy	dev->ibdev.query_device = iwch_query_device;
1089178786Skmacy	dev->ibdev.query_port = iwch_query_port;
1090178786Skmacy	dev->ibdev.modify_port = iwch_modify_port;
1091178786Skmacy	dev->ibdev.query_pkey = iwch_query_pkey;
1092178786Skmacy	dev->ibdev.query_gid = iwch_query_gid;
1093178786Skmacy	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1094178786Skmacy	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1095178786Skmacy	dev->ibdev.mmap = iwch_mmap;
1096178786Skmacy	dev->ibdev.alloc_pd = iwch_allocate_pd;
1097178786Skmacy	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1098178786Skmacy	dev->ibdev.create_ah = iwch_ah_create;
1099178786Skmacy	dev->ibdev.destroy_ah = iwch_ah_destroy;
1100178786Skmacy	dev->ibdev.create_qp = iwch_create_qp;
1101178786Skmacy	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1102178786Skmacy	dev->ibdev.destroy_qp = iwch_destroy_qp;
1103178786Skmacy	dev->ibdev.create_cq = iwch_create_cq;
1104178786Skmacy	dev->ibdev.destroy_cq = iwch_destroy_cq;
1105178786Skmacy	dev->ibdev.resize_cq = iwch_resize_cq;
1106178786Skmacy	dev->ibdev.poll_cq = iwch_poll_cq;
1107178786Skmacy	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1108178786Skmacy	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1109178786Skmacy	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1110178786Skmacy	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1111178786Skmacy	dev->ibdev.dereg_mr = iwch_dereg_mr;
1112178786Skmacy	dev->ibdev.alloc_mw = iwch_alloc_mw;
1113178786Skmacy	dev->ibdev.bind_mw = iwch_bind_mw;
1114178786Skmacy	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1115178786Skmacy
1116178786Skmacy	dev->ibdev.attach_mcast = iwch_multicast_attach;
1117178786Skmacy	dev->ibdev.detach_mcast = iwch_multicast_detach;
1118178786Skmacy	dev->ibdev.process_mad = iwch_process_mad;
1119178786Skmacy
1120178786Skmacy	dev->ibdev.req_notify_cq = iwch_arm_cq;
1121178786Skmacy	dev->ibdev.post_send = iwch_post_send;
1122178786Skmacy	dev->ibdev.post_recv = iwch_post_receive;
1123237263Snp	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1124178786Skmacy
1125237263Snp	dev->ibdev.iwcm =
1126237263Snp	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1127237263Snp	if (!dev->ibdev.iwcm)
1128237263Snp		return (ENOMEM);
1129178786Skmacy
1130178786Skmacy	dev->ibdev.iwcm->connect = iwch_connect;
1131178786Skmacy	dev->ibdev.iwcm->accept = iwch_accept_cr;
1132178786Skmacy	dev->ibdev.iwcm->reject = iwch_reject_cr;
1133178786Skmacy	dev->ibdev.iwcm->create_listen = iwch_create_listen;
1134178786Skmacy	dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1135178786Skmacy	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1136178786Skmacy	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1137178786Skmacy	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1138178786Skmacy
1139178786Skmacy	ret = ib_register_device(&dev->ibdev);
1140178786Skmacy	if (ret)
1141178786Skmacy		goto bail1;
1142237263Snp
1143237263Snp	return (0);
1144237263Snp
1145178786Skmacybail1:
1146237263Snp	cxfree(dev->ibdev.iwcm);
1147237263Snp	return (ret);
1148178786Skmacy}
1149178786Skmacy
1150178786Skmacyvoid iwch_unregister_device(struct iwch_dev *dev)
1151178786Skmacy{
1152178786Skmacy
1153178786Skmacy	ib_unregister_device(&dev->ibdev);
1154237263Snp	cxfree(dev->ibdev.iwcm);
1155178786Skmacy	return;
1156178786Skmacy}
1157237263Snp#endif
1158