1178786Skmacy/**************************************************************************
2178786Skmacy
3178786SkmacyCopyright (c) 2007, Chelsio Inc.
4178786SkmacyAll rights reserved.
5178786Skmacy
6178786SkmacyRedistribution and use in source and binary forms, with or without
7178786Skmacymodification, are permitted provided that the following conditions are met:
8178786Skmacy
9178786Skmacy 1. Redistributions of source code must retain the above copyright notice,
10178786Skmacy    this list of conditions and the following disclaimer.
11178786Skmacy
12178786Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13178786Skmacy    contributors may be used to endorse or promote products derived from
14178786Skmacy    this software without specific prior written permission.
15178786Skmacy
16178786SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17178786SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18178786SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19178786SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20178786SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21178786SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22178786SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23178786SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24178786SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25178786SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26178786SkmacyPOSSIBILITY OF SUCH DAMAGE.
27178786Skmacy
28178786Skmacy***************************************************************************/
29178786Skmacy#include <sys/cdefs.h>
30178786Skmacy__FBSDID("$FreeBSD: stable/10/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 318799 2017-05-24 18:16:20Z np $");
31178786Skmacy
32237263Snp#include "opt_inet.h"
33237263Snp
34237263Snp#ifdef TCP_OFFLOAD
35178786Skmacy#include <sys/param.h>
36178786Skmacy#include <sys/systm.h>
37178786Skmacy#include <sys/kernel.h>
38178786Skmacy#include <sys/bus.h>
39178786Skmacy#include <sys/pciio.h>
40178786Skmacy#include <sys/conf.h>
41178786Skmacy#include <machine/bus.h>
42178786Skmacy#include <machine/resource.h>
43178786Skmacy#include <sys/bus_dma.h>
44178786Skmacy#include <sys/rman.h>
45178786Skmacy#include <sys/ioccom.h>
46178786Skmacy#include <sys/mbuf.h>
47178786Skmacy#include <sys/mutex.h>
48178786Skmacy#include <sys/rwlock.h>
49178786Skmacy#include <sys/linker.h>
50178786Skmacy#include <sys/firmware.h>
51178786Skmacy#include <sys/socket.h>
52178786Skmacy#include <sys/sockio.h>
53178786Skmacy#include <sys/smp.h>
54178786Skmacy#include <sys/sysctl.h>
55178786Skmacy#include <sys/syslog.h>
56178786Skmacy#include <sys/queue.h>
57178786Skmacy#include <sys/taskqueue.h>
58178786Skmacy#include <sys/proc.h>
59178786Skmacy#include <sys/queue.h>
60178786Skmacy
61178786Skmacy#include <netinet/in.h>
62178786Skmacy
63178786Skmacy
64178786Skmacy#include <vm/vm.h>
65178786Skmacy#include <vm/pmap.h>
66178786Skmacy
67237263Snp#include <rdma/ib_verbs.h>
68237263Snp#include <rdma/ib_umem.h>
69237263Snp#include <rdma/ib_user_verbs.h>
70237263Snp#include <linux/idr.h>
71237263Snp#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72178786Skmacy
73237263Snp
74178786Skmacy#include <cxgb_include.h>
75178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79178786Skmacy#include <ulp/iw_cxgb/iw_cxgb.h>
80178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_user.h>
82178786Skmacy
83178786Skmacystatic int
84178786Skmacyiwch_modify_port(struct ib_device *ibdev,
85178786Skmacy			    u8 port, int port_modify_mask,
86178786Skmacy			    struct ib_port_modify *props)
87178786Skmacy{
88178786Skmacy	return (-ENOSYS);
89178786Skmacy}
90178786Skmacy
91178786Skmacystatic struct ib_ah *
92178786Skmacyiwch_ah_create(struct ib_pd *pd,
93178786Skmacy				    struct ib_ah_attr *ah_attr)
94178786Skmacy{
95178786Skmacy	return ERR_PTR(-ENOSYS);
96178786Skmacy}
97178786Skmacy
98178786Skmacystatic int
99178786Skmacyiwch_ah_destroy(struct ib_ah *ah)
100178786Skmacy{
101178786Skmacy	return (-ENOSYS);
102178786Skmacy}
103178786Skmacy
104178786Skmacystatic int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105178786Skmacy{
106178786Skmacy	return (-ENOSYS);
107178786Skmacy}
108178786Skmacy
109178786Skmacystatic int
110178786Skmacyiwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111178786Skmacy{
112178786Skmacy	return (-ENOSYS);
113178786Skmacy}
114178786Skmacy
115178786Skmacystatic int
116178786Skmacyiwch_process_mad(struct ib_device *ibdev,
117178786Skmacy			    int mad_flags,
118178786Skmacy			    u8 port_num,
119178786Skmacy			    struct ib_wc *in_wc,
120178786Skmacy			    struct ib_grh *in_grh,
121178786Skmacy			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122178786Skmacy{
123178786Skmacy	return (-ENOSYS);
124178786Skmacy}
125178786Skmacy
126178786Skmacystatic int
127178786Skmacyiwch_dealloc_ucontext(struct ib_ucontext *context)
128178786Skmacy{
129178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(context->device);
130178786Skmacy	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131178786Skmacy	struct iwch_mm_entry *mm, *tmp;
132178786Skmacy
133178786Skmacy	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134178786Skmacy	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135178786Skmacy		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136178786Skmacy		cxfree(mm);
137178786Skmacy	}
138178786Skmacy	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139178786Skmacy	cxfree(ucontext);
140178786Skmacy	return 0;
141178786Skmacy}
142178786Skmacy
143178786Skmacystatic struct ib_ucontext *
144178786Skmacyiwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145178786Skmacy{
146178786Skmacy	struct iwch_ucontext *context;
147178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148178786Skmacy
149178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150178786Skmacy	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151178786Skmacy	if (!context)
152178786Skmacy		return ERR_PTR(-ENOMEM);
153178786Skmacy	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154178786Skmacy	TAILQ_INIT(&context->mmaps);
155178786Skmacy	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156178786Skmacy	return &context->ibucontext;
157178786Skmacy}
158178786Skmacy
159178786Skmacystatic int
160178786Skmacyiwch_destroy_cq(struct ib_cq *ib_cq)
161178786Skmacy{
162178786Skmacy	struct iwch_cq *chp;
163178786Skmacy
164178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165178786Skmacy	chp = to_iwch_cq(ib_cq);
166178786Skmacy
167178786Skmacy	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168178786Skmacy	mtx_lock(&chp->lock);
169178786Skmacy	if (--chp->refcnt)
170178786Skmacy		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171178786Skmacy	mtx_unlock(&chp->lock);
172178786Skmacy
173178786Skmacy	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174178786Skmacy	cxfree(chp);
175178786Skmacy	return 0;
176178786Skmacy}
177178786Skmacy
178178786Skmacystatic struct ib_cq *
179178786Skmacyiwch_create_cq(struct ib_device *ibdev, int entries, int vector,
180178786Skmacy			     struct ib_ucontext *ib_context,
181178786Skmacy			     struct ib_udata *udata)
182178786Skmacy{
183178786Skmacy	struct iwch_dev *rhp;
184178786Skmacy	struct iwch_cq *chp;
185178786Skmacy	struct iwch_create_cq_resp uresp;
186178786Skmacy	struct iwch_create_cq_req ureq;
187178786Skmacy	struct iwch_ucontext *ucontext = NULL;
188237263Snp	static int warned;
189237263Snp	size_t resplen;
190178786Skmacy
191178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
192178786Skmacy	rhp = to_iwch_dev(ibdev);
193178786Skmacy	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
194178786Skmacy	if (!chp) {
195178786Skmacy		return ERR_PTR(-ENOMEM);
196178786Skmacy	}
197178786Skmacy	if (ib_context) {
198178786Skmacy		ucontext = to_iwch_ucontext(ib_context);
199178786Skmacy		if (!t3a_device(rhp)) {
200178786Skmacy			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
201178786Skmacy				cxfree(chp);
202178786Skmacy				return ERR_PTR(-EFAULT);
203178786Skmacy			}
204178786Skmacy			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
205178786Skmacy		}
206178786Skmacy	}
207178786Skmacy
208178786Skmacy	if (t3a_device(rhp)) {
209178786Skmacy
210178786Skmacy		/*
211178786Skmacy		 * T3A: Add some fluff to handle extra CQEs inserted
212178786Skmacy		 * for various errors.
213178786Skmacy		 * Additional CQE possibilities:
214178786Skmacy		 *      TERMINATE,
215178786Skmacy		 *      incoming RDMA WRITE Failures
216178786Skmacy		 *      incoming RDMA READ REQUEST FAILUREs
217178786Skmacy		 * NOTE: We cannot ensure the CQ won't overflow.
218178786Skmacy		 */
219178786Skmacy		entries += 16;
220178786Skmacy	}
221178786Skmacy	entries = roundup_pow_of_two(entries);
222178786Skmacy	chp->cq.size_log2 = ilog2(entries);
223178786Skmacy
224237263Snp	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
225178786Skmacy		cxfree(chp);
226178786Skmacy		return ERR_PTR(-ENOMEM);
227178786Skmacy	}
228178786Skmacy	chp->rhp = rhp;
229178786Skmacy	chp->ibcq.cqe = 1 << chp->cq.size_log2;
230178786Skmacy	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
231178786Skmacy	chp->refcnt = 1;
232237263Snp	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
233237263Snp		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
234237263Snp		cxfree(chp);
235237263Snp		return ERR_PTR(-ENOMEM);
236237263Snp	}
237178786Skmacy
238178786Skmacy	if (ucontext) {
239178786Skmacy		struct iwch_mm_entry *mm;
240178786Skmacy
241178786Skmacy		mm = kmalloc(sizeof *mm, M_NOWAIT);
242178786Skmacy		if (!mm) {
243178786Skmacy			iwch_destroy_cq(&chp->ibcq);
244178786Skmacy			return ERR_PTR(-ENOMEM);
245178786Skmacy		}
246178786Skmacy		uresp.cqid = chp->cq.cqid;
247178786Skmacy		uresp.size_log2 = chp->cq.size_log2;
248178786Skmacy		mtx_lock(&ucontext->mmap_lock);
249178786Skmacy		uresp.key = ucontext->key;
250178786Skmacy		ucontext->key += PAGE_SIZE;
251178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
252237263Snp		mm->key = uresp.key;
253237263Snp		mm->addr = vtophys(chp->cq.queue);
254237263Snp               	if (udata->outlen < sizeof uresp) {
255237263Snp                	if (!warned++)
256237263Snp                        	CTR1(KTR_IW_CXGB, "%s Warning - "
257237263Snp                                	"downlevel libcxgb3 (non-fatal).\n",
258237263Snp					__func__);
259237263Snp                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
260237263Snp                       				sizeof(struct t3_cqe));
261237263Snp                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
262237263Snp               	} else {
263237263Snp                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
264237263Snp                        			sizeof(struct t3_cqe));
265237263Snp                       	uresp.memsize = mm->len;
266237263Snp                      	resplen = sizeof uresp;
267237263Snp               	}
268237263Snp              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
269178786Skmacy			cxfree(mm);
270178786Skmacy			iwch_destroy_cq(&chp->ibcq);
271178786Skmacy			return ERR_PTR(-EFAULT);
272178786Skmacy		}
273178786Skmacy		insert_mmap(ucontext, mm);
274178786Skmacy	}
275178786Skmacy	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
276178786Skmacy	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
277178786Skmacy	     (unsigned long long) chp->cq.dma_addr);
278178786Skmacy	return &chp->ibcq;
279178786Skmacy}
280178786Skmacy
281178786Skmacystatic int
282237263Snpiwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
283237263Snp    struct ib_udata *udata __unused)
284178786Skmacy{
285178786Skmacy
286178786Skmacy	return (-ENOSYS);
287178786Skmacy}
288178786Skmacy
289178786Skmacystatic int
290178786Skmacyiwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
291178786Skmacy{
292178786Skmacy	struct iwch_dev *rhp;
293178786Skmacy	struct iwch_cq *chp;
294178786Skmacy	enum t3_cq_opcode cq_op;
295178786Skmacy	int err;
296178786Skmacy	u32 rptr;
297178786Skmacy
298178786Skmacy	chp = to_iwch_cq(ibcq);
299178786Skmacy	rhp = chp->rhp;
300178786Skmacy	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
301178786Skmacy		cq_op = CQ_ARM_SE;
302178786Skmacy	else
303178786Skmacy		cq_op = CQ_ARM_AN;
304178786Skmacy	if (chp->user_rptr_addr) {
305285942Sdim		if (copyin(chp->user_rptr_addr, &rptr, sizeof(rptr)))
306178786Skmacy			return (-EFAULT);
307178786Skmacy		mtx_lock(&chp->lock);
308178786Skmacy		chp->cq.rptr = rptr;
309178786Skmacy	} else
310178786Skmacy		mtx_lock(&chp->lock);
311178786Skmacy	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
312178786Skmacy	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
313178786Skmacy	mtx_unlock(&chp->lock);
314178786Skmacy	if (err < 0)
315178786Skmacy		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
316178786Skmacy		       chp->cq.cqid);
317178786Skmacy	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
318178786Skmacy		err = 0;
319178786Skmacy	return err;
320178786Skmacy}
321178786Skmacy
322178786Skmacystatic int
323237263Snpiwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
324178786Skmacy{
325178786Skmacy
326237263Snp	return (-ENOSYS);
327178786Skmacy}
328178786Skmacy
329178786Skmacystatic int iwch_deallocate_pd(struct ib_pd *pd)
330178786Skmacy{
331178786Skmacy	struct iwch_dev *rhp;
332178786Skmacy	struct iwch_pd *php;
333178786Skmacy
334178786Skmacy	php = to_iwch_pd(pd);
335178786Skmacy	rhp = php->rhp;
336178786Skmacy	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
337178786Skmacy	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
338178786Skmacy	cxfree(php);
339178786Skmacy	return 0;
340178786Skmacy}
341178786Skmacy
342178786Skmacystatic struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
343178786Skmacy			       struct ib_ucontext *context,
344178786Skmacy			       struct ib_udata *udata)
345178786Skmacy{
346178786Skmacy	struct iwch_pd *php;
347178786Skmacy	u32 pdid;
348178786Skmacy	struct iwch_dev *rhp;
349178786Skmacy
350178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
351178786Skmacy	rhp = (struct iwch_dev *) ibdev;
352178786Skmacy	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
353178786Skmacy	if (!pdid)
354178786Skmacy		return ERR_PTR(-EINVAL);
355178786Skmacy	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
356178786Skmacy	if (!php) {
357178786Skmacy		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
358178786Skmacy		return ERR_PTR(-ENOMEM);
359178786Skmacy	}
360178786Skmacy	php->pdid = pdid;
361178786Skmacy	php->rhp = rhp;
362178786Skmacy	if (context) {
363178786Skmacy		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
364178786Skmacy			iwch_deallocate_pd(&php->ibpd);
365178786Skmacy			return ERR_PTR(-EFAULT);
366178786Skmacy		}
367178786Skmacy	}
368178786Skmacy	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
369178786Skmacy	return &php->ibpd;
370178786Skmacy}
371178786Skmacy
372178786Skmacystatic int iwch_dereg_mr(struct ib_mr *ib_mr)
373178786Skmacy{
374178786Skmacy	struct iwch_dev *rhp;
375178786Skmacy	struct iwch_mr *mhp;
376178786Skmacy	u32 mmid;
377178786Skmacy
378178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
379178786Skmacy	/* There can be no memory windows */
380237263Snp	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
381178786Skmacy		return (-EINVAL);
382178786Skmacy
383178786Skmacy	mhp = to_iwch_mr(ib_mr);
384178786Skmacy	rhp = mhp->rhp;
385178786Skmacy	mmid = mhp->attr.stag >> 8;
386178786Skmacy	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
387178786Skmacy		       mhp->attr.pbl_addr);
388237263Snp	iwch_free_pbl(mhp);
389178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
390178786Skmacy	if (mhp->kva)
391178786Skmacy		cxfree((void *) (unsigned long) mhp->kva);
392178786Skmacy	if (mhp->umem)
393178786Skmacy		ib_umem_release(mhp->umem);
394178786Skmacy	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
395178786Skmacy	cxfree(mhp);
396178786Skmacy	return 0;
397178786Skmacy}
398178786Skmacy
399178786Skmacystatic struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
400178786Skmacy					struct ib_phys_buf *buffer_list,
401178786Skmacy					int num_phys_buf,
402178786Skmacy					int acc,
403178786Skmacy					u64 *iova_start)
404178786Skmacy{
405178786Skmacy	__be64 *page_list;
406178786Skmacy	int shift;
407178786Skmacy	u64 total_size;
408178786Skmacy	int npages;
409178786Skmacy	struct iwch_dev *rhp;
410178786Skmacy	struct iwch_pd *php;
411178786Skmacy	struct iwch_mr *mhp;
412178786Skmacy	int ret;
413178786Skmacy
414178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
415178786Skmacy	php = to_iwch_pd(pd);
416178786Skmacy	rhp = php->rhp;
417178786Skmacy
418178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
419178786Skmacy	if (!mhp)
420178786Skmacy		return ERR_PTR(-ENOMEM);
421178786Skmacy
422237263Snp	mhp->rhp = rhp;
423237263Snp
424178786Skmacy	/* First check that we have enough alignment */
425178786Skmacy	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
426178786Skmacy		ret = -EINVAL;
427178786Skmacy		goto err;
428178786Skmacy	}
429178786Skmacy
430178786Skmacy	if (num_phys_buf > 1 &&
431178786Skmacy	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
432178786Skmacy		ret = -EINVAL;
433178786Skmacy		goto err;
434178786Skmacy	}
435178786Skmacy
436178786Skmacy	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
437178786Skmacy				   &total_size, &npages, &shift, &page_list);
438178786Skmacy	if (ret)
439178786Skmacy		goto err;
440178786Skmacy
441237263Snp	ret = iwch_alloc_pbl(mhp, npages);
442237263Snp	if (ret) {
443237263Snp		cxfree(page_list);
444237263Snp		goto err_pbl;
445237263Snp	}
446237263Snp
447237263Snp	ret = iwch_write_pbl(mhp, page_list, npages, 0);
448237263Snp	cxfree(page_list);
449237263Snp	if (ret)
450237263Snp		goto err;
451237263Snp
452178786Skmacy	mhp->attr.pdid = php->pdid;
453178786Skmacy	mhp->attr.zbva = 0;
454178786Skmacy
455178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
456178786Skmacy	mhp->attr.va_fbo = *iova_start;
457178786Skmacy	mhp->attr.page_size = shift - 12;
458178786Skmacy
459178786Skmacy	mhp->attr.len = (u32) total_size;
460178786Skmacy	mhp->attr.pbl_size = npages;
461237263Snp	ret = iwch_register_mem(rhp, php, mhp, shift);
462237263Snp	if (ret)
463237263Snp		goto err_pbl;
464237263Snp
465178786Skmacy	return &mhp->ibmr;
466237263Snp
467237263Snperr_pbl:
468237263Snp	iwch_free_pbl(mhp);
469237263Snp
470178786Skmacyerr:
471178786Skmacy	cxfree(mhp);
472237263Snp	return ERR_PTR(ret);
473178786Skmacy
474178786Skmacy}
475178786Skmacy
476178786Skmacystatic int iwch_reregister_phys_mem(struct ib_mr *mr,
477178786Skmacy				     int mr_rereg_mask,
478178786Skmacy				     struct ib_pd *pd,
479178786Skmacy	                             struct ib_phys_buf *buffer_list,
480178786Skmacy	                             int num_phys_buf,
481178786Skmacy	                             int acc, u64 * iova_start)
482178786Skmacy{
483178786Skmacy
484178786Skmacy	struct iwch_mr mh, *mhp;
485178786Skmacy	struct iwch_pd *php;
486178786Skmacy	struct iwch_dev *rhp;
487178786Skmacy	__be64 *page_list = NULL;
488178786Skmacy	int shift = 0;
489178786Skmacy	u64 total_size;
490239101Sdim	int npages = 0;
491178786Skmacy	int ret;
492178786Skmacy
493178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
494178786Skmacy
495178786Skmacy	/* There can be no memory windows */
496237263Snp	if (atomic_load_acq_int(&mr->usecnt.counter))
497178786Skmacy		return (-EINVAL);
498178786Skmacy
499178786Skmacy	mhp = to_iwch_mr(mr);
500178786Skmacy	rhp = mhp->rhp;
501178786Skmacy	php = to_iwch_pd(mr->pd);
502178786Skmacy
503178786Skmacy	/* make sure we are on the same adapter */
504178786Skmacy	if (rhp != php->rhp)
505178786Skmacy		return (-EINVAL);
506178786Skmacy
507178786Skmacy	memcpy(&mh, mhp, sizeof *mhp);
508178786Skmacy
509178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
510178786Skmacy		php = to_iwch_pd(pd);
511178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
512178786Skmacy		mh.attr.perms = iwch_ib_to_tpt_access(acc);
513178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
514178786Skmacy		ret = build_phys_page_list(buffer_list, num_phys_buf,
515178786Skmacy					   iova_start,
516178786Skmacy					   &total_size, &npages,
517178786Skmacy					   &shift, &page_list);
518178786Skmacy		if (ret)
519178786Skmacy			return ret;
520178786Skmacy	}
521178786Skmacy
522237263Snp	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
523178786Skmacy	cxfree(page_list);
524178786Skmacy	if (ret) {
525178786Skmacy		return ret;
526178786Skmacy	}
527178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
528178786Skmacy		mhp->attr.pdid = php->pdid;
529178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
530178786Skmacy		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
531178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
532178786Skmacy		mhp->attr.zbva = 0;
533178786Skmacy		mhp->attr.va_fbo = *iova_start;
534178786Skmacy		mhp->attr.page_size = shift - 12;
535178786Skmacy		mhp->attr.len = (u32) total_size;
536178786Skmacy		mhp->attr.pbl_size = npages;
537178786Skmacy	}
538178786Skmacy
539178786Skmacy	return 0;
540178786Skmacy}
541178786Skmacy
542178786Skmacy
543178786Skmacystatic struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
544255932Salfred				      u64 virt, int acc, struct ib_udata *udata,
545255932Salfred				      int mr_id)
546178786Skmacy{
547178786Skmacy	__be64 *pages;
548178786Skmacy	int shift, i, n;
549178786Skmacy	int err = 0;
550178786Skmacy	struct ib_umem_chunk *chunk;
551178786Skmacy	struct iwch_dev *rhp;
552178786Skmacy	struct iwch_pd *php;
553178786Skmacy	struct iwch_mr *mhp;
554178786Skmacy	struct iwch_reg_user_mr_resp uresp;
555178786Skmacy#ifdef notyet
556178786Skmacy	int j, k, len;
557178786Skmacy#endif
558178786Skmacy
559178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
560178786Skmacy
561178786Skmacy	php = to_iwch_pd(pd);
562178786Skmacy	rhp = php->rhp;
563178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
564178786Skmacy	if (!mhp)
565178786Skmacy		return ERR_PTR(-ENOMEM);
566178786Skmacy
567237263Snp	mhp->rhp = rhp;
568237263Snp
569237263Snp	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
570178786Skmacy	if (IS_ERR(mhp->umem)) {
571178786Skmacy		err = PTR_ERR(mhp->umem);
572178786Skmacy		cxfree(mhp);
573178786Skmacy		return ERR_PTR(-err);
574178786Skmacy	}
575178786Skmacy
576178786Skmacy	shift = ffs(mhp->umem->page_size) - 1;
577178786Skmacy
578178786Skmacy	n = 0;
579237263Snp	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
580178786Skmacy		n += chunk->nents;
581178786Skmacy
582237263Snp	err = iwch_alloc_pbl(mhp, n);
583237263Snp	if (err)
584237263Snp		goto err;
585237263Snp
586237263Snp	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
587178786Skmacy	if (!pages) {
588178786Skmacy		err = -ENOMEM;
589237263Snp		goto err_pbl;
590178786Skmacy	}
591178786Skmacy
592178786Skmacy	i = n = 0;
593178786Skmacy
594237263Snp#ifdef notyet
595178786Skmacy	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
596178786Skmacy		for (j = 0; j < chunk->nmap; ++j) {
597178786Skmacy			len = sg_dma_len(&chunk->page_list[j]) >> shift;
598178786Skmacy			for (k = 0; k < len; ++k) {
599178786Skmacy				pages[i++] = htobe64(sg_dma_address(
600178786Skmacy					&chunk->page_list[j]) +
601178786Skmacy					mhp->umem->page_size * k);
602237263Snp				if (i == PAGE_SIZE / sizeof *pages) {
603237263Snp					err = iwch_write_pbl(mhp, pages, i, n);
604237263Snp					if (err)
605237263Snp						goto pbl_done;
606237263Snp					n += i;
607237263Snp					i = 0;
608237263Snp				}
609178786Skmacy			}
610178786Skmacy		}
611178786Skmacy#endif
612237263Snp
613237263Snp	if (i)
614237263Snp		err = iwch_write_pbl(mhp, pages, i, n);
615237263Snp#ifdef notyet
616237263Snppbl_done:
617237263Snp#endif
618237263Snp	cxfree(pages);
619237263Snp	if (err)
620237263Snp		goto err_pbl;
621237263Snp
622178786Skmacy	mhp->attr.pdid = php->pdid;
623178786Skmacy	mhp->attr.zbva = 0;
624178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
625178786Skmacy	mhp->attr.va_fbo = virt;
626178786Skmacy	mhp->attr.page_size = shift - 12;
627178786Skmacy	mhp->attr.len = (u32) length;
628237263Snp
629237263Snp	err = iwch_register_mem(rhp, php, mhp, shift);
630178786Skmacy	if (err)
631237263Snp		goto err_pbl;
632178786Skmacy
633178786Skmacy	if (udata && !t3a_device(rhp)) {
634178786Skmacy		uresp.pbl_addr = (mhp->attr.pbl_addr -
635178786Skmacy	                         rhp->rdev.rnic_info.pbl_base) >> 3;
636178786Skmacy		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
637178786Skmacy		     uresp.pbl_addr);
638178786Skmacy
639178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
640178786Skmacy			iwch_dereg_mr(&mhp->ibmr);
641178786Skmacy			err = EFAULT;
642178786Skmacy			goto err;
643178786Skmacy		}
644178786Skmacy	}
645178786Skmacy
646178786Skmacy	return &mhp->ibmr;
647178786Skmacy
648237263Snperr_pbl:
649237263Snp	iwch_free_pbl(mhp);
650237263Snp
651178786Skmacyerr:
652178786Skmacy	ib_umem_release(mhp->umem);
653178786Skmacy	cxfree(mhp);
654178786Skmacy	return ERR_PTR(-err);
655178786Skmacy}
656178786Skmacy
657178786Skmacystatic struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
658178786Skmacy{
659178786Skmacy	struct ib_phys_buf bl;
660178786Skmacy	u64 kva;
661178786Skmacy	struct ib_mr *ibmr;
662178786Skmacy
663178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
664178786Skmacy
665178786Skmacy	/*
666178786Skmacy	 * T3 only supports 32 bits of size.
667178786Skmacy	 */
668178786Skmacy	bl.size = 0xffffffff;
669178786Skmacy	bl.addr = 0;
670178786Skmacy	kva = 0;
671178786Skmacy	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
672178786Skmacy	return ibmr;
673178786Skmacy}
674178786Skmacy
675178786Skmacystatic struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
676178786Skmacy{
677178786Skmacy	struct iwch_dev *rhp;
678178786Skmacy	struct iwch_pd *php;
679178786Skmacy	struct iwch_mw *mhp;
680178786Skmacy	u32 mmid;
681178786Skmacy	u32 stag = 0;
682178786Skmacy	int ret;
683178786Skmacy
684178786Skmacy	php = to_iwch_pd(pd);
685178786Skmacy	rhp = php->rhp;
686178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
687178786Skmacy	if (!mhp)
688178786Skmacy		return ERR_PTR(-ENOMEM);
689178786Skmacy	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
690178786Skmacy	if (ret) {
691178786Skmacy		cxfree(mhp);
692178786Skmacy		return ERR_PTR(-ret);
693178786Skmacy	}
694178786Skmacy	mhp->rhp = rhp;
695178786Skmacy	mhp->attr.pdid = php->pdid;
696178786Skmacy	mhp->attr.type = TPT_MW;
697178786Skmacy	mhp->attr.stag = stag;
698178786Skmacy	mmid = (stag) >> 8;
699237263Snp	mhp->ibmw.rkey = stag;
700237263Snp	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
701237263Snp		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
702237263Snp		cxfree(mhp);
703237263Snp		return ERR_PTR(-ENOMEM);
704237263Snp	}
705178786Skmacy	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
706178786Skmacy	return &(mhp->ibmw);
707178786Skmacy}
708178786Skmacy
709178786Skmacystatic int iwch_dealloc_mw(struct ib_mw *mw)
710178786Skmacy{
711178786Skmacy	struct iwch_dev *rhp;
712178786Skmacy	struct iwch_mw *mhp;
713178786Skmacy	u32 mmid;
714178786Skmacy
715178786Skmacy	mhp = to_iwch_mw(mw);
716178786Skmacy	rhp = mhp->rhp;
717178786Skmacy	mmid = (mw->rkey) >> 8;
718178786Skmacy	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
719178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
720178786Skmacy	cxfree(mhp);
721178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
722178786Skmacy	return 0;
723178786Skmacy}
724178786Skmacy
725178786Skmacystatic int iwch_destroy_qp(struct ib_qp *ib_qp)
726178786Skmacy{
727178786Skmacy	struct iwch_dev *rhp;
728178786Skmacy	struct iwch_qp *qhp;
729178786Skmacy	struct iwch_qp_attributes attrs;
730178786Skmacy	struct iwch_ucontext *ucontext;
731178786Skmacy
732178786Skmacy	qhp = to_iwch_qp(ib_qp);
733178786Skmacy	rhp = qhp->rhp;
734178786Skmacy
735178786Skmacy	attrs.next_state = IWCH_QP_STATE_ERROR;
736178786Skmacy	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
737178786Skmacy	mtx_lock(&qhp->lock);
738178786Skmacy	if (qhp->ep)
739178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
740178786Skmacy	mtx_unlock(&qhp->lock);
741178786Skmacy
742178786Skmacy	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
743178786Skmacy
744178786Skmacy	mtx_lock(&qhp->lock);
745178786Skmacy	if (--qhp->refcnt)
746178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
747178786Skmacy	mtx_unlock(&qhp->lock);
748178786Skmacy
749178786Skmacy	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
750178786Skmacy				  : NULL;
751178786Skmacy	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
752178786Skmacy			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
753178786Skmacy
754178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
755178786Skmacy	     ib_qp, qhp->wq.qpid, qhp);
756178786Skmacy	cxfree(qhp);
757178786Skmacy	return 0;
758178786Skmacy}
759178786Skmacy
760178786Skmacystatic struct ib_qp *iwch_create_qp(struct ib_pd *pd,
761178786Skmacy			     struct ib_qp_init_attr *attrs,
762178786Skmacy			     struct ib_udata *udata)
763178786Skmacy{
764178786Skmacy	struct iwch_dev *rhp;
765178786Skmacy	struct iwch_qp *qhp;
766178786Skmacy	struct iwch_pd *php;
767178786Skmacy	struct iwch_cq *schp;
768178786Skmacy	struct iwch_cq *rchp;
769178786Skmacy	struct iwch_create_qp_resp uresp;
770178786Skmacy	int wqsize, sqsize, rqsize;
771178786Skmacy	struct iwch_ucontext *ucontext;
772178786Skmacy
773178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
774178786Skmacy	if (attrs->qp_type != IB_QPT_RC)
775178786Skmacy		return ERR_PTR(-EINVAL);
776178786Skmacy	php = to_iwch_pd(pd);
777178786Skmacy	rhp = php->rhp;
778178786Skmacy	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
779178786Skmacy	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
780178786Skmacy	if (!schp || !rchp)
781178786Skmacy		return ERR_PTR(-EINVAL);
782178786Skmacy
783178786Skmacy	/* The RQT size must be # of entries + 1 rounded up to a power of two */
784178786Skmacy	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
785178786Skmacy	if (rqsize == attrs->cap.max_recv_wr)
786178786Skmacy		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
787178786Skmacy
788178786Skmacy	/* T3 doesn't support RQT depth < 16 */
789178786Skmacy	if (rqsize < 16)
790178786Skmacy		rqsize = 16;
791178786Skmacy
792178786Skmacy	if (rqsize > T3_MAX_RQ_SIZE)
793178786Skmacy		return ERR_PTR(-EINVAL);
794178786Skmacy
795178786Skmacy	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
796178786Skmacy		return ERR_PTR(-EINVAL);
797178786Skmacy
798178786Skmacy	/*
799178786Skmacy	 * NOTE: The SQ and total WQ sizes don't need to be
800178786Skmacy	 * a power of two.  However, all the code assumes
801178786Skmacy	 * they are. EG: Q_FREECNT() and friends.
802178786Skmacy	 */
803178786Skmacy	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
804178786Skmacy	wqsize = roundup_pow_of_two(rqsize + sqsize);
805178786Skmacy	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
806178786Skmacy	     wqsize, sqsize, rqsize);
807178786Skmacy	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
808178786Skmacy	if (!qhp)
809178786Skmacy		return ERR_PTR(-ENOMEM);
810178786Skmacy	qhp->wq.size_log2 = ilog2(wqsize);
811178786Skmacy	qhp->wq.rq_size_log2 = ilog2(rqsize);
812178786Skmacy	qhp->wq.sq_size_log2 = ilog2(sqsize);
813178786Skmacy	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
814178786Skmacy	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
815178786Skmacy			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
816178786Skmacy		cxfree(qhp);
817178786Skmacy		return ERR_PTR(-ENOMEM);
818178786Skmacy	}
819178786Skmacy
820178786Skmacy	attrs->cap.max_recv_wr = rqsize - 1;
821178786Skmacy	attrs->cap.max_send_wr = sqsize;
822178786Skmacy	attrs->cap.max_inline_data = T3_MAX_INLINE;
823178786Skmacy
824178786Skmacy	qhp->rhp = rhp;
825178786Skmacy	qhp->attr.pd = php->pdid;
826178786Skmacy	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
827178786Skmacy	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
828178786Skmacy	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
829178786Skmacy	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
830178786Skmacy	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
831178786Skmacy	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
832178786Skmacy	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
833178786Skmacy	qhp->attr.state = IWCH_QP_STATE_IDLE;
834178786Skmacy	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
835178786Skmacy
836178786Skmacy	/*
837178786Skmacy	 * XXX - These don't get passed in from the openib user
838178786Skmacy	 * at create time.  The CM sets them via a QP modify.
839178786Skmacy	 * Need to fix...  I think the CM should
840178786Skmacy	 */
841178786Skmacy	qhp->attr.enable_rdma_read = 1;
842178786Skmacy	qhp->attr.enable_rdma_write = 1;
843178786Skmacy	qhp->attr.enable_bind = 1;
844178786Skmacy	qhp->attr.max_ord = 1;
845178786Skmacy	qhp->attr.max_ird = 1;
846178786Skmacy
847178786Skmacy	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
848178786Skmacy	qhp->refcnt = 1;
849178786Skmacy
850237263Snp	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
851237263Snp		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
852237263Snp			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
853237263Snp		cxfree(qhp);
854237263Snp		return ERR_PTR(-ENOMEM);
855237263Snp	}
856237263Snp
857178786Skmacy	if (udata) {
858178786Skmacy
859178786Skmacy		struct iwch_mm_entry *mm1, *mm2;
860178786Skmacy
861178786Skmacy		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
862178786Skmacy		if (!mm1) {
863178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
864178786Skmacy			return ERR_PTR(-ENOMEM);
865178786Skmacy		}
866178786Skmacy
867178786Skmacy		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
868178786Skmacy		if (!mm2) {
869178786Skmacy			cxfree(mm1);
870178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
871178786Skmacy			return ERR_PTR(-ENOMEM);
872178786Skmacy		}
873178786Skmacy
874178786Skmacy		uresp.qpid = qhp->wq.qpid;
875178786Skmacy		uresp.size_log2 = qhp->wq.size_log2;
876178786Skmacy		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
877178786Skmacy		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
878178786Skmacy		mtx_lock(&ucontext->mmap_lock);
879178786Skmacy		uresp.key = ucontext->key;
880178786Skmacy		ucontext->key += PAGE_SIZE;
881178786Skmacy		uresp.db_key = ucontext->key;
882178786Skmacy		ucontext->key += PAGE_SIZE;
883178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
884178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
885178786Skmacy			cxfree(mm1);
886178786Skmacy			cxfree(mm2);
887178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
888178786Skmacy			return ERR_PTR(-EFAULT);
889178786Skmacy		}
890178786Skmacy		mm1->key = uresp.key;
891178786Skmacy		mm1->addr = vtophys(qhp->wq.queue);
892178786Skmacy		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
893178786Skmacy		insert_mmap(ucontext, mm1);
894178786Skmacy		mm2->key = uresp.db_key;
895178786Skmacy		mm2->addr = qhp->wq.udb & PAGE_MASK;
896178786Skmacy		mm2->len = PAGE_SIZE;
897178786Skmacy		insert_mmap(ucontext, mm2);
898178786Skmacy	}
899178786Skmacy	qhp->ibqp.qp_num = qhp->wq.qpid;
900178786Skmacy	callout_init(&(qhp->timer), TRUE);
901178786Skmacy	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
902178786Skmacy	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
903178786Skmacy	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
904178786Skmacy	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
905178786Skmacy	     1 << qhp->wq.size_log2);
906178786Skmacy	return &qhp->ibqp;
907178786Skmacy}
908178786Skmacy
909178786Skmacystatic int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
910178786Skmacy		      int attr_mask, struct ib_udata *udata)
911178786Skmacy{
912178786Skmacy	struct iwch_dev *rhp;
913178786Skmacy	struct iwch_qp *qhp;
914178786Skmacy	enum iwch_qp_attr_mask mask = 0;
915178786Skmacy	struct iwch_qp_attributes attrs;
916178786Skmacy
917178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
918178786Skmacy
919178786Skmacy	/* iwarp does not support the RTR state */
920178786Skmacy	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
921178786Skmacy		attr_mask &= ~IB_QP_STATE;
922178786Skmacy
923178786Skmacy	/* Make sure we still have something left to do */
924178786Skmacy	if (!attr_mask)
925178786Skmacy		return 0;
926178786Skmacy
927178786Skmacy	memset(&attrs, 0, sizeof attrs);
928178786Skmacy	qhp = to_iwch_qp(ibqp);
929178786Skmacy	rhp = qhp->rhp;
930178786Skmacy
931178786Skmacy	attrs.next_state = iwch_convert_state(attr->qp_state);
932178786Skmacy	attrs.enable_rdma_read = (attr->qp_access_flags &
933178786Skmacy			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
934178786Skmacy	attrs.enable_rdma_write = (attr->qp_access_flags &
935178786Skmacy				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
936178786Skmacy	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
937178786Skmacy
938178786Skmacy
939178786Skmacy	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
940178786Skmacy	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
941178786Skmacy			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
942178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
943178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
944178786Skmacy
945178786Skmacy	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
946178786Skmacy}
947178786Skmacy
948178786Skmacyvoid iwch_qp_add_ref(struct ib_qp *qp)
949178786Skmacy{
950178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
951178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
952178786Skmacy	to_iwch_qp(qp)->refcnt++;
953178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
954178786Skmacy}
955178786Skmacy
956178786Skmacyvoid iwch_qp_rem_ref(struct ib_qp *qp)
957178786Skmacy{
958178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
959178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
960178786Skmacy	if (--to_iwch_qp(qp)->refcnt == 0)
961178786Skmacy	        wakeup(to_iwch_qp(qp));
962178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
963178786Skmacy}
964178786Skmacy
965178786Skmacystatic struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
966178786Skmacy{
967178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
968178786Skmacy	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
969178786Skmacy}
970178786Skmacy
971178786Skmacy
972178786Skmacystatic int iwch_query_pkey(struct ib_device *ibdev,
973178786Skmacy			   u8 port, u16 index, u16 * pkey)
974178786Skmacy{
975178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
976178786Skmacy	*pkey = 0;
977178786Skmacy	return 0;
978178786Skmacy}
979178786Skmacy
980178786Skmacystatic int iwch_query_gid(struct ib_device *ibdev, u8 port,
981178786Skmacy			  int index, union ib_gid *gid)
982178786Skmacy{
983178786Skmacy	struct iwch_dev *dev;
984178786Skmacy	struct port_info *pi;
985237263Snp	struct adapter *sc;
986178786Skmacy
987178786Skmacy	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
988178786Skmacy	       __FUNCTION__, ibdev, port, index, gid);
989178786Skmacy	dev = to_iwch_dev(ibdev);
990237263Snp	sc = dev->rdev.adap;
991178786Skmacy	PANIC_IF(port == 0 || port > 2);
992237263Snp	pi = &sc->port[port - 1];
993178786Skmacy	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
994178786Skmacy	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
995178786Skmacy	return 0;
996178786Skmacy}
997178786Skmacy
998178786Skmacystatic int iwch_query_device(struct ib_device *ibdev,
999178786Skmacy			     struct ib_device_attr *props)
1000178786Skmacy{
1001237263Snp	struct iwch_dev *dev;
1002237263Snp	struct adapter *sc;
1003178786Skmacy
1004178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1005178786Skmacy
1006178786Skmacy	dev = to_iwch_dev(ibdev);
1007237263Snp	sc = dev->rdev.adap;
1008178786Skmacy	memset(props, 0, sizeof *props);
1009237263Snp	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1010178786Skmacy	props->device_cap_flags = dev->device_cap_flags;
1011237263Snp	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1012237263Snp	props->vendor_id = pci_get_vendor(sc->dev);
1013237263Snp	props->vendor_part_id = pci_get_device(sc->dev);
1014237263Snp	props->max_mr_size = dev->attr.max_mr_size;
1015178786Skmacy	props->max_qp = dev->attr.max_qps;
1016178786Skmacy	props->max_qp_wr = dev->attr.max_wrs;
1017178786Skmacy	props->max_sge = dev->attr.max_sge_per_wr;
1018178786Skmacy	props->max_sge_rd = 1;
1019178786Skmacy	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1020178786Skmacy	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1021178786Skmacy	props->max_cq = dev->attr.max_cqs;
1022178786Skmacy	props->max_cqe = dev->attr.max_cqes_per_cq;
1023178786Skmacy	props->max_mr = dev->attr.max_mem_regs;
1024178786Skmacy	props->max_pd = dev->attr.max_pds;
1025178786Skmacy	props->local_ca_ack_delay = 0;
1026178786Skmacy
1027178786Skmacy	return 0;
1028178786Skmacy}
1029178786Skmacy
1030178786Skmacystatic int iwch_query_port(struct ib_device *ibdev,
1031178786Skmacy			   u8 port, struct ib_port_attr *props)
1032178786Skmacy{
1033178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1034237263Snp	memset(props, 0, sizeof(struct ib_port_attr));
1035178786Skmacy	props->max_mtu = IB_MTU_4096;
1036237263Snp	props->active_mtu = IB_MTU_2048;
1037178786Skmacy	props->state = IB_PORT_ACTIVE;
1038178786Skmacy	props->port_cap_flags =
1039178786Skmacy	    IB_PORT_CM_SUP |
1040178786Skmacy	    IB_PORT_SNMP_TUNNEL_SUP |
1041178786Skmacy	    IB_PORT_REINIT_SUP |
1042178786Skmacy	    IB_PORT_DEVICE_MGMT_SUP |
1043178786Skmacy	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1044178786Skmacy	props->gid_tbl_len = 1;
1045178786Skmacy	props->pkey_tbl_len = 1;
1046178786Skmacy	props->active_width = 2;
1047178786Skmacy	props->active_speed = 2;
1048178786Skmacy	props->max_msg_sz = -1;
1049178786Skmacy
1050178786Skmacy	return 0;
1051178786Skmacy}
1052178786Skmacy
1053178786Skmacyint iwch_register_device(struct iwch_dev *dev)
1054178786Skmacy{
1055178786Skmacy	int ret;
1056237263Snp	struct adapter *sc = dev->rdev.adap;
1057237263Snp
1058178786Skmacy	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1059178786Skmacy	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1060178786Skmacy	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1061237263Snp	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1062178786Skmacy	dev->device_cap_flags =
1063237263Snp		(IB_DEVICE_LOCAL_DMA_LKEY |
1064237263Snp		 IB_DEVICE_MEM_WINDOW);
1065178786Skmacy
1066178786Skmacy	dev->ibdev.uverbs_cmd_mask =
1067178786Skmacy	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1068178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1069178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1070178786Skmacy	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1071178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1072178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1073178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1074178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1075178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1076178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1077178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1078178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1079178786Skmacy	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1080178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1081178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1082178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1083178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1084178786Skmacy	dev->ibdev.node_type = RDMA_NODE_RNIC;
1085178786Skmacy	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1086237263Snp	dev->ibdev.phys_port_cnt = sc->params.nports;
1087178786Skmacy	dev->ibdev.num_comp_vectors = 1;
1088318799Snp	dev->ibdev.dma_device = NULL;
1089178786Skmacy	dev->ibdev.query_device = iwch_query_device;
1090178786Skmacy	dev->ibdev.query_port = iwch_query_port;
1091178786Skmacy	dev->ibdev.modify_port = iwch_modify_port;
1092178786Skmacy	dev->ibdev.query_pkey = iwch_query_pkey;
1093178786Skmacy	dev->ibdev.query_gid = iwch_query_gid;
1094178786Skmacy	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1095178786Skmacy	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1096178786Skmacy	dev->ibdev.mmap = iwch_mmap;
1097178786Skmacy	dev->ibdev.alloc_pd = iwch_allocate_pd;
1098178786Skmacy	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1099178786Skmacy	dev->ibdev.create_ah = iwch_ah_create;
1100178786Skmacy	dev->ibdev.destroy_ah = iwch_ah_destroy;
1101178786Skmacy	dev->ibdev.create_qp = iwch_create_qp;
1102178786Skmacy	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1103178786Skmacy	dev->ibdev.destroy_qp = iwch_destroy_qp;
1104178786Skmacy	dev->ibdev.create_cq = iwch_create_cq;
1105178786Skmacy	dev->ibdev.destroy_cq = iwch_destroy_cq;
1106178786Skmacy	dev->ibdev.resize_cq = iwch_resize_cq;
1107178786Skmacy	dev->ibdev.poll_cq = iwch_poll_cq;
1108178786Skmacy	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1109178786Skmacy	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1110178786Skmacy	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1111178786Skmacy	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1112178786Skmacy	dev->ibdev.dereg_mr = iwch_dereg_mr;
1113178786Skmacy	dev->ibdev.alloc_mw = iwch_alloc_mw;
1114178786Skmacy	dev->ibdev.bind_mw = iwch_bind_mw;
1115178786Skmacy	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1116178786Skmacy
1117178786Skmacy	dev->ibdev.attach_mcast = iwch_multicast_attach;
1118178786Skmacy	dev->ibdev.detach_mcast = iwch_multicast_detach;
1119178786Skmacy	dev->ibdev.process_mad = iwch_process_mad;
1120178786Skmacy
1121178786Skmacy	dev->ibdev.req_notify_cq = iwch_arm_cq;
1122178786Skmacy	dev->ibdev.post_send = iwch_post_send;
1123178786Skmacy	dev->ibdev.post_recv = iwch_post_receive;
1124237263Snp	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1125178786Skmacy
1126237263Snp	dev->ibdev.iwcm =
1127237263Snp	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1128237263Snp	if (!dev->ibdev.iwcm)
1129237263Snp		return (ENOMEM);
1130178786Skmacy
1131178786Skmacy	dev->ibdev.iwcm->connect = iwch_connect;
1132178786Skmacy	dev->ibdev.iwcm->accept = iwch_accept_cr;
1133178786Skmacy	dev->ibdev.iwcm->reject = iwch_reject_cr;
1134309378Sjhb	dev->ibdev.iwcm->create_listen_ep = iwch_create_listen_ep;
1135309378Sjhb	dev->ibdev.iwcm->destroy_listen_ep = iwch_destroy_listen_ep;
1136309378Sjhb	dev->ibdev.iwcm->newconn = process_newconn;
1137178786Skmacy	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1138178786Skmacy	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1139178786Skmacy	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1140178786Skmacy
1141255932Salfred	ret = ib_register_device(&dev->ibdev, NULL);
1142178786Skmacy	if (ret)
1143178786Skmacy		goto bail1;
1144237263Snp
1145237263Snp	return (0);
1146237263Snp
1147178786Skmacybail1:
1148237263Snp	cxfree(dev->ibdev.iwcm);
1149237263Snp	return (ret);
1150178786Skmacy}
1151178786Skmacy
1152178786Skmacyvoid iwch_unregister_device(struct iwch_dev *dev)
1153178786Skmacy{
1154178786Skmacy
1155178786Skmacy	ib_unregister_device(&dev->ibdev);
1156237263Snp	cxfree(dev->ibdev.iwcm);
1157178786Skmacy	return;
1158178786Skmacy}
1159237263Snp#endif
1160