1178786Skmacy/**************************************************************************
2178786Skmacy
3178786SkmacyCopyright (c) 2007, Chelsio Inc.
4178786SkmacyAll rights reserved.
5178786Skmacy
6178786SkmacyRedistribution and use in source and binary forms, with or without
7178786Skmacymodification, are permitted provided that the following conditions are met:
8178786Skmacy
9178786Skmacy 1. Redistributions of source code must retain the above copyright notice,
10178786Skmacy    this list of conditions and the following disclaimer.
11178786Skmacy
12178786Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13178786Skmacy    contributors may be used to endorse or promote products derived from
14178786Skmacy    this software without specific prior written permission.
15178786Skmacy
16178786SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17178786SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18178786SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19178786SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20178786SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21178786SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22178786SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23178786SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24178786SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25178786SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26178786SkmacyPOSSIBILITY OF SUCH DAMAGE.
27178786Skmacy
28178786Skmacy***************************************************************************/
29178786Skmacy#include <sys/cdefs.h>
30178786Skmacy__FBSDID("$FreeBSD: stable/11/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 318798 2017-05-24 18:14:57Z np $");
31178786Skmacy
32237263Snp#include "opt_inet.h"
33237263Snp
34237263Snp#ifdef TCP_OFFLOAD
35178786Skmacy#include <sys/param.h>
36178786Skmacy#include <sys/systm.h>
37178786Skmacy#include <sys/kernel.h>
38178786Skmacy#include <sys/bus.h>
39178786Skmacy#include <sys/pciio.h>
40178786Skmacy#include <sys/conf.h>
41178786Skmacy#include <machine/bus.h>
42178786Skmacy#include <machine/resource.h>
43178786Skmacy#include <sys/bus_dma.h>
44178786Skmacy#include <sys/rman.h>
45178786Skmacy#include <sys/ioccom.h>
46178786Skmacy#include <sys/mbuf.h>
47178786Skmacy#include <sys/mutex.h>
48178786Skmacy#include <sys/rwlock.h>
49178786Skmacy#include <sys/linker.h>
50178786Skmacy#include <sys/firmware.h>
51178786Skmacy#include <sys/socket.h>
52178786Skmacy#include <sys/sockio.h>
53178786Skmacy#include <sys/smp.h>
54178786Skmacy#include <sys/sysctl.h>
55178786Skmacy#include <sys/syslog.h>
56178786Skmacy#include <sys/queue.h>
57178786Skmacy#include <sys/taskqueue.h>
58178786Skmacy#include <sys/proc.h>
59178786Skmacy#include <sys/queue.h>
60178786Skmacy
61178786Skmacy#include <netinet/in.h>
62178786Skmacy
63178786Skmacy
64178786Skmacy#include <vm/vm.h>
65178786Skmacy#include <vm/pmap.h>
66178786Skmacy
67237263Snp#include <rdma/ib_verbs.h>
68237263Snp#include <rdma/ib_umem.h>
69237263Snp#include <rdma/ib_user_verbs.h>
70237263Snp#include <linux/idr.h>
71237263Snp#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72178786Skmacy
73237263Snp
74178786Skmacy#include <cxgb_include.h>
75178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79178786Skmacy#include <ulp/iw_cxgb/iw_cxgb.h>
80178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81178786Skmacy#include <ulp/iw_cxgb/iw_cxgb_user.h>
82178786Skmacy
83178786Skmacystatic int
84178786Skmacyiwch_modify_port(struct ib_device *ibdev,
85178786Skmacy			    u8 port, int port_modify_mask,
86178786Skmacy			    struct ib_port_modify *props)
87178786Skmacy{
88178786Skmacy	return (-ENOSYS);
89178786Skmacy}
90178786Skmacy
91178786Skmacystatic struct ib_ah *
92178786Skmacyiwch_ah_create(struct ib_pd *pd,
93178786Skmacy				    struct ib_ah_attr *ah_attr)
94178786Skmacy{
95178786Skmacy	return ERR_PTR(-ENOSYS);
96178786Skmacy}
97178786Skmacy
98178786Skmacystatic int
99178786Skmacyiwch_ah_destroy(struct ib_ah *ah)
100178786Skmacy{
101178786Skmacy	return (-ENOSYS);
102178786Skmacy}
103178786Skmacy
104178786Skmacystatic int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105178786Skmacy{
106178786Skmacy	return (-ENOSYS);
107178786Skmacy}
108178786Skmacy
109178786Skmacystatic int
110178786Skmacyiwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111178786Skmacy{
112178786Skmacy	return (-ENOSYS);
113178786Skmacy}
114178786Skmacy
115178786Skmacystatic int
116178786Skmacyiwch_process_mad(struct ib_device *ibdev,
117178786Skmacy			    int mad_flags,
118178786Skmacy			    u8 port_num,
119178786Skmacy			    struct ib_wc *in_wc,
120178786Skmacy			    struct ib_grh *in_grh,
121178786Skmacy			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122178786Skmacy{
123178786Skmacy	return (-ENOSYS);
124178786Skmacy}
125178786Skmacy
126178786Skmacystatic int
127178786Skmacyiwch_dealloc_ucontext(struct ib_ucontext *context)
128178786Skmacy{
129178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(context->device);
130178786Skmacy	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131178786Skmacy	struct iwch_mm_entry *mm, *tmp;
132178786Skmacy
133178786Skmacy	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134178786Skmacy	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135178786Skmacy		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136178786Skmacy		cxfree(mm);
137178786Skmacy	}
138178786Skmacy	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139178786Skmacy	cxfree(ucontext);
140178786Skmacy	return 0;
141178786Skmacy}
142178786Skmacy
143178786Skmacystatic struct ib_ucontext *
144178786Skmacyiwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145178786Skmacy{
146178786Skmacy	struct iwch_ucontext *context;
147178786Skmacy	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148178786Skmacy
149178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150178786Skmacy	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151178786Skmacy	if (!context)
152178786Skmacy		return ERR_PTR(-ENOMEM);
153178786Skmacy	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154178786Skmacy	TAILQ_INIT(&context->mmaps);
155178786Skmacy	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156178786Skmacy	return &context->ibucontext;
157178786Skmacy}
158178786Skmacy
159178786Skmacystatic int
160178786Skmacyiwch_destroy_cq(struct ib_cq *ib_cq)
161178786Skmacy{
162178786Skmacy	struct iwch_cq *chp;
163178786Skmacy
164178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165178786Skmacy	chp = to_iwch_cq(ib_cq);
166178786Skmacy
167178786Skmacy	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168178786Skmacy	mtx_lock(&chp->lock);
169178786Skmacy	if (--chp->refcnt)
170178786Skmacy		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171178786Skmacy	mtx_unlock(&chp->lock);
172178786Skmacy
173178786Skmacy	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174178786Skmacy	cxfree(chp);
175178786Skmacy	return 0;
176178786Skmacy}
177178786Skmacy
178178786Skmacystatic struct ib_cq *
179278886Shselaskyiwch_create_cq(struct ib_device *ibdev, struct ib_cq_init_attr *attr,
180178786Skmacy			     struct ib_ucontext *ib_context,
181178786Skmacy			     struct ib_udata *udata)
182178786Skmacy{
183178786Skmacy	struct iwch_dev *rhp;
184178786Skmacy	struct iwch_cq *chp;
185178786Skmacy	struct iwch_create_cq_resp uresp;
186178786Skmacy	struct iwch_create_cq_req ureq;
187178786Skmacy	struct iwch_ucontext *ucontext = NULL;
188237263Snp	static int warned;
189237263Snp	size_t resplen;
190278886Shselasky	int entries = attr->cqe;
191178786Skmacy
192178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
193178786Skmacy	rhp = to_iwch_dev(ibdev);
194178786Skmacy	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
195178786Skmacy	if (!chp) {
196178786Skmacy		return ERR_PTR(-ENOMEM);
197178786Skmacy	}
198178786Skmacy	if (ib_context) {
199178786Skmacy		ucontext = to_iwch_ucontext(ib_context);
200178786Skmacy		if (!t3a_device(rhp)) {
201178786Skmacy			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
202178786Skmacy				cxfree(chp);
203178786Skmacy				return ERR_PTR(-EFAULT);
204178786Skmacy			}
205178786Skmacy			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
206178786Skmacy		}
207178786Skmacy	}
208178786Skmacy
209178786Skmacy	if (t3a_device(rhp)) {
210178786Skmacy
211178786Skmacy		/*
212178786Skmacy		 * T3A: Add some fluff to handle extra CQEs inserted
213178786Skmacy		 * for various errors.
214178786Skmacy		 * Additional CQE possibilities:
215178786Skmacy		 *      TERMINATE,
216178786Skmacy		 *      incoming RDMA WRITE Failures
217178786Skmacy		 *      incoming RDMA READ REQUEST FAILUREs
218178786Skmacy		 * NOTE: We cannot ensure the CQ won't overflow.
219178786Skmacy		 */
220178786Skmacy		entries += 16;
221178786Skmacy	}
222178786Skmacy	entries = roundup_pow_of_two(entries);
223178786Skmacy	chp->cq.size_log2 = ilog2(entries);
224178786Skmacy
225237263Snp	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
226178786Skmacy		cxfree(chp);
227178786Skmacy		return ERR_PTR(-ENOMEM);
228178786Skmacy	}
229178786Skmacy	chp->rhp = rhp;
230178786Skmacy	chp->ibcq.cqe = 1 << chp->cq.size_log2;
231178786Skmacy	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
232178786Skmacy	chp->refcnt = 1;
233237263Snp	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
234237263Snp		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
235237263Snp		cxfree(chp);
236237263Snp		return ERR_PTR(-ENOMEM);
237237263Snp	}
238178786Skmacy
239178786Skmacy	if (ucontext) {
240178786Skmacy		struct iwch_mm_entry *mm;
241178786Skmacy
242178786Skmacy		mm = kmalloc(sizeof *mm, M_NOWAIT);
243178786Skmacy		if (!mm) {
244178786Skmacy			iwch_destroy_cq(&chp->ibcq);
245178786Skmacy			return ERR_PTR(-ENOMEM);
246178786Skmacy		}
247178786Skmacy		uresp.cqid = chp->cq.cqid;
248178786Skmacy		uresp.size_log2 = chp->cq.size_log2;
249178786Skmacy		mtx_lock(&ucontext->mmap_lock);
250178786Skmacy		uresp.key = ucontext->key;
251178786Skmacy		ucontext->key += PAGE_SIZE;
252178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
253237263Snp		mm->key = uresp.key;
254237263Snp		mm->addr = vtophys(chp->cq.queue);
255237263Snp               	if (udata->outlen < sizeof uresp) {
256237263Snp                	if (!warned++)
257237263Snp                        	CTR1(KTR_IW_CXGB, "%s Warning - "
258237263Snp                                	"downlevel libcxgb3 (non-fatal).\n",
259237263Snp					__func__);
260237263Snp                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
261237263Snp                       				sizeof(struct t3_cqe));
262237263Snp                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
263237263Snp               	} else {
264237263Snp                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
265237263Snp                        			sizeof(struct t3_cqe));
266237263Snp                       	uresp.memsize = mm->len;
267237263Snp                      	resplen = sizeof uresp;
268237263Snp               	}
269237263Snp              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
270178786Skmacy			cxfree(mm);
271178786Skmacy			iwch_destroy_cq(&chp->ibcq);
272178786Skmacy			return ERR_PTR(-EFAULT);
273178786Skmacy		}
274178786Skmacy		insert_mmap(ucontext, mm);
275178786Skmacy	}
276178786Skmacy	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
277178786Skmacy	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
278178786Skmacy	     (unsigned long long) chp->cq.dma_addr);
279178786Skmacy	return &chp->ibcq;
280178786Skmacy}
281178786Skmacy
282178786Skmacystatic int
283237263Snpiwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
284237263Snp    struct ib_udata *udata __unused)
285178786Skmacy{
286178786Skmacy
287178786Skmacy	return (-ENOSYS);
288178786Skmacy}
289178786Skmacy
290178786Skmacystatic int
291178786Skmacyiwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
292178786Skmacy{
293178786Skmacy	struct iwch_dev *rhp;
294178786Skmacy	struct iwch_cq *chp;
295178786Skmacy	enum t3_cq_opcode cq_op;
296178786Skmacy	int err;
297178786Skmacy	u32 rptr;
298178786Skmacy
299178786Skmacy	chp = to_iwch_cq(ibcq);
300178786Skmacy	rhp = chp->rhp;
301178786Skmacy	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
302178786Skmacy		cq_op = CQ_ARM_SE;
303178786Skmacy	else
304178786Skmacy		cq_op = CQ_ARM_AN;
305178786Skmacy	if (chp->user_rptr_addr) {
306285340Sdim		if (copyin(chp->user_rptr_addr, &rptr, sizeof(rptr)))
307178786Skmacy			return (-EFAULT);
308178786Skmacy		mtx_lock(&chp->lock);
309178786Skmacy		chp->cq.rptr = rptr;
310178786Skmacy	} else
311178786Skmacy		mtx_lock(&chp->lock);
312178786Skmacy	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
313178786Skmacy	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
314178786Skmacy	mtx_unlock(&chp->lock);
315178786Skmacy	if (err < 0)
316178786Skmacy		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
317178786Skmacy		       chp->cq.cqid);
318178786Skmacy	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
319178786Skmacy		err = 0;
320178786Skmacy	return err;
321178786Skmacy}
322178786Skmacy
323178786Skmacystatic int
324237263Snpiwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
325178786Skmacy{
326178786Skmacy
327237263Snp	return (-ENOSYS);
328178786Skmacy}
329178786Skmacy
330178786Skmacystatic int iwch_deallocate_pd(struct ib_pd *pd)
331178786Skmacy{
332178786Skmacy	struct iwch_dev *rhp;
333178786Skmacy	struct iwch_pd *php;
334178786Skmacy
335178786Skmacy	php = to_iwch_pd(pd);
336178786Skmacy	rhp = php->rhp;
337178786Skmacy	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
338178786Skmacy	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
339178786Skmacy	cxfree(php);
340178786Skmacy	return 0;
341178786Skmacy}
342178786Skmacy
343178786Skmacystatic struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
344178786Skmacy			       struct ib_ucontext *context,
345178786Skmacy			       struct ib_udata *udata)
346178786Skmacy{
347178786Skmacy	struct iwch_pd *php;
348178786Skmacy	u32 pdid;
349178786Skmacy	struct iwch_dev *rhp;
350178786Skmacy
351178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
352178786Skmacy	rhp = (struct iwch_dev *) ibdev;
353178786Skmacy	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
354178786Skmacy	if (!pdid)
355178786Skmacy		return ERR_PTR(-EINVAL);
356178786Skmacy	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
357178786Skmacy	if (!php) {
358178786Skmacy		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
359178786Skmacy		return ERR_PTR(-ENOMEM);
360178786Skmacy	}
361178786Skmacy	php->pdid = pdid;
362178786Skmacy	php->rhp = rhp;
363178786Skmacy	if (context) {
364178786Skmacy		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
365178786Skmacy			iwch_deallocate_pd(&php->ibpd);
366178786Skmacy			return ERR_PTR(-EFAULT);
367178786Skmacy		}
368178786Skmacy	}
369178786Skmacy	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
370178786Skmacy	return &php->ibpd;
371178786Skmacy}
372178786Skmacy
373178786Skmacystatic int iwch_dereg_mr(struct ib_mr *ib_mr)
374178786Skmacy{
375178786Skmacy	struct iwch_dev *rhp;
376178786Skmacy	struct iwch_mr *mhp;
377178786Skmacy	u32 mmid;
378178786Skmacy
379178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
380178786Skmacy	/* There can be no memory windows */
381237263Snp	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
382178786Skmacy		return (-EINVAL);
383178786Skmacy
384178786Skmacy	mhp = to_iwch_mr(ib_mr);
385178786Skmacy	rhp = mhp->rhp;
386178786Skmacy	mmid = mhp->attr.stag >> 8;
387178786Skmacy	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
388178786Skmacy		       mhp->attr.pbl_addr);
389237263Snp	iwch_free_pbl(mhp);
390178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
391178786Skmacy	if (mhp->kva)
392178786Skmacy		cxfree((void *) (unsigned long) mhp->kva);
393178786Skmacy	if (mhp->umem)
394178786Skmacy		ib_umem_release(mhp->umem);
395178786Skmacy	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
396178786Skmacy	cxfree(mhp);
397178786Skmacy	return 0;
398178786Skmacy}
399178786Skmacy
400178786Skmacystatic struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
401178786Skmacy					struct ib_phys_buf *buffer_list,
402178786Skmacy					int num_phys_buf,
403178786Skmacy					int acc,
404178786Skmacy					u64 *iova_start)
405178786Skmacy{
406178786Skmacy	__be64 *page_list;
407178786Skmacy	int shift;
408178786Skmacy	u64 total_size;
409178786Skmacy	int npages;
410178786Skmacy	struct iwch_dev *rhp;
411178786Skmacy	struct iwch_pd *php;
412178786Skmacy	struct iwch_mr *mhp;
413178786Skmacy	int ret;
414178786Skmacy
415178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
416178786Skmacy	php = to_iwch_pd(pd);
417178786Skmacy	rhp = php->rhp;
418178786Skmacy
419178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
420178786Skmacy	if (!mhp)
421178786Skmacy		return ERR_PTR(-ENOMEM);
422178786Skmacy
423237263Snp	mhp->rhp = rhp;
424237263Snp
425178786Skmacy	/* First check that we have enough alignment */
426178786Skmacy	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
427178786Skmacy		ret = -EINVAL;
428178786Skmacy		goto err;
429178786Skmacy	}
430178786Skmacy
431178786Skmacy	if (num_phys_buf > 1 &&
432178786Skmacy	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
433178786Skmacy		ret = -EINVAL;
434178786Skmacy		goto err;
435178786Skmacy	}
436178786Skmacy
437178786Skmacy	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
438178786Skmacy				   &total_size, &npages, &shift, &page_list);
439178786Skmacy	if (ret)
440178786Skmacy		goto err;
441178786Skmacy
442237263Snp	ret = iwch_alloc_pbl(mhp, npages);
443237263Snp	if (ret) {
444237263Snp		cxfree(page_list);
445237263Snp		goto err_pbl;
446237263Snp	}
447237263Snp
448237263Snp	ret = iwch_write_pbl(mhp, page_list, npages, 0);
449237263Snp	cxfree(page_list);
450237263Snp	if (ret)
451237263Snp		goto err;
452237263Snp
453178786Skmacy	mhp->attr.pdid = php->pdid;
454178786Skmacy	mhp->attr.zbva = 0;
455178786Skmacy
456178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
457178786Skmacy	mhp->attr.va_fbo = *iova_start;
458178786Skmacy	mhp->attr.page_size = shift - 12;
459178786Skmacy
460178786Skmacy	mhp->attr.len = (u32) total_size;
461178786Skmacy	mhp->attr.pbl_size = npages;
462237263Snp	ret = iwch_register_mem(rhp, php, mhp, shift);
463237263Snp	if (ret)
464237263Snp		goto err_pbl;
465237263Snp
466178786Skmacy	return &mhp->ibmr;
467237263Snp
468237263Snperr_pbl:
469237263Snp	iwch_free_pbl(mhp);
470237263Snp
471178786Skmacyerr:
472178786Skmacy	cxfree(mhp);
473237263Snp	return ERR_PTR(ret);
474178786Skmacy
475178786Skmacy}
476178786Skmacy
477178786Skmacystatic int iwch_reregister_phys_mem(struct ib_mr *mr,
478178786Skmacy				     int mr_rereg_mask,
479178786Skmacy				     struct ib_pd *pd,
480178786Skmacy	                             struct ib_phys_buf *buffer_list,
481178786Skmacy	                             int num_phys_buf,
482178786Skmacy	                             int acc, u64 * iova_start)
483178786Skmacy{
484178786Skmacy
485178786Skmacy	struct iwch_mr mh, *mhp;
486178786Skmacy	struct iwch_pd *php;
487178786Skmacy	struct iwch_dev *rhp;
488178786Skmacy	__be64 *page_list = NULL;
489178786Skmacy	int shift = 0;
490178786Skmacy	u64 total_size;
491239101Sdim	int npages = 0;
492178786Skmacy	int ret;
493178786Skmacy
494178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
495178786Skmacy
496178786Skmacy	/* There can be no memory windows */
497237263Snp	if (atomic_load_acq_int(&mr->usecnt.counter))
498178786Skmacy		return (-EINVAL);
499178786Skmacy
500178786Skmacy	mhp = to_iwch_mr(mr);
501178786Skmacy	rhp = mhp->rhp;
502178786Skmacy	php = to_iwch_pd(mr->pd);
503178786Skmacy
504178786Skmacy	/* make sure we are on the same adapter */
505178786Skmacy	if (rhp != php->rhp)
506178786Skmacy		return (-EINVAL);
507178786Skmacy
508178786Skmacy	memcpy(&mh, mhp, sizeof *mhp);
509178786Skmacy
510178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
511178786Skmacy		php = to_iwch_pd(pd);
512178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
513178786Skmacy		mh.attr.perms = iwch_ib_to_tpt_access(acc);
514178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
515178786Skmacy		ret = build_phys_page_list(buffer_list, num_phys_buf,
516178786Skmacy					   iova_start,
517178786Skmacy					   &total_size, &npages,
518178786Skmacy					   &shift, &page_list);
519178786Skmacy		if (ret)
520178786Skmacy			return ret;
521178786Skmacy	}
522178786Skmacy
523237263Snp	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
524178786Skmacy	cxfree(page_list);
525178786Skmacy	if (ret) {
526178786Skmacy		return ret;
527178786Skmacy	}
528178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_PD)
529178786Skmacy		mhp->attr.pdid = php->pdid;
530178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
531178786Skmacy		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
532178786Skmacy	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
533178786Skmacy		mhp->attr.zbva = 0;
534178786Skmacy		mhp->attr.va_fbo = *iova_start;
535178786Skmacy		mhp->attr.page_size = shift - 12;
536178786Skmacy		mhp->attr.len = (u32) total_size;
537178786Skmacy		mhp->attr.pbl_size = npages;
538178786Skmacy	}
539178786Skmacy
540178786Skmacy	return 0;
541178786Skmacy}
542178786Skmacy
543178786Skmacy
544178786Skmacystatic struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
545255932Salfred				      u64 virt, int acc, struct ib_udata *udata,
546255932Salfred				      int mr_id)
547178786Skmacy{
548178786Skmacy	__be64 *pages;
549278886Shselasky	int shift, n, len;
550278886Shselasky	int i, k, entry;
551178786Skmacy	int err = 0;
552178786Skmacy	struct iwch_dev *rhp;
553178786Skmacy	struct iwch_pd *php;
554178786Skmacy	struct iwch_mr *mhp;
555178786Skmacy	struct iwch_reg_user_mr_resp uresp;
556278886Shselasky	struct scatterlist *sg;
557178786Skmacy
558178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
559178786Skmacy
560178786Skmacy	php = to_iwch_pd(pd);
561178786Skmacy	rhp = php->rhp;
562178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
563178786Skmacy	if (!mhp)
564178786Skmacy		return ERR_PTR(-ENOMEM);
565178786Skmacy
566237263Snp	mhp->rhp = rhp;
567237263Snp
568237263Snp	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569178786Skmacy	if (IS_ERR(mhp->umem)) {
570178786Skmacy		err = PTR_ERR(mhp->umem);
571178786Skmacy		cxfree(mhp);
572178786Skmacy		return ERR_PTR(-err);
573178786Skmacy	}
574178786Skmacy
575178786Skmacy	shift = ffs(mhp->umem->page_size) - 1;
576178786Skmacy
577278886Shselasky	n = mhp->umem->nmap;
578178786Skmacy
579237263Snp	err = iwch_alloc_pbl(mhp, n);
580237263Snp	if (err)
581237263Snp		goto err;
582237263Snp
583237263Snp	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
584178786Skmacy	if (!pages) {
585178786Skmacy		err = -ENOMEM;
586237263Snp		goto err_pbl;
587178786Skmacy	}
588178786Skmacy
589178786Skmacy	i = n = 0;
590178786Skmacy
591278886Shselasky	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
592278886Shselasky		len = sg_dma_len(sg) >> shift;
593278886Shselasky		for (k = 0; k < len; ++k) {
594278886Shselasky			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
595278886Shselasky					mhp->umem->page_size * k);
596278886Shselasky			if (i == PAGE_SIZE / sizeof *pages) {
597278886Shselasky				err = iwch_write_pbl(mhp, pages, i, n);
598278886Shselasky				if (err)
599278886Shselasky					goto pbl_done;
600278886Shselasky				n += i;
601278886Shselasky				i = 0;
602278886Shselasky			}
603278886Shselasky		}
604278886Shselasky	}
605278886Shselasky#if 0
606178786Skmacy	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
607178786Skmacy		for (j = 0; j < chunk->nmap; ++j) {
608178786Skmacy			len = sg_dma_len(&chunk->page_list[j]) >> shift;
609178786Skmacy			for (k = 0; k < len; ++k) {
610178786Skmacy				pages[i++] = htobe64(sg_dma_address(
611178786Skmacy					&chunk->page_list[j]) +
612178786Skmacy					mhp->umem->page_size * k);
613237263Snp				if (i == PAGE_SIZE / sizeof *pages) {
614237263Snp					err = iwch_write_pbl(mhp, pages, i, n);
615237263Snp					if (err)
616237263Snp						goto pbl_done;
617237263Snp					n += i;
618237263Snp					i = 0;
619237263Snp				}
620178786Skmacy			}
621178786Skmacy		}
622178786Skmacy#endif
623237263Snp
624237263Snp	if (i)
625237263Snp		err = iwch_write_pbl(mhp, pages, i, n);
626237263Snppbl_done:
627237263Snp	cxfree(pages);
628237263Snp	if (err)
629237263Snp		goto err_pbl;
630237263Snp
631178786Skmacy	mhp->attr.pdid = php->pdid;
632178786Skmacy	mhp->attr.zbva = 0;
633178786Skmacy	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634178786Skmacy	mhp->attr.va_fbo = virt;
635178786Skmacy	mhp->attr.page_size = shift - 12;
636178786Skmacy	mhp->attr.len = (u32) length;
637237263Snp
638237263Snp	err = iwch_register_mem(rhp, php, mhp, shift);
639178786Skmacy	if (err)
640237263Snp		goto err_pbl;
641178786Skmacy
642178786Skmacy	if (udata && !t3a_device(rhp)) {
643178786Skmacy		uresp.pbl_addr = (mhp->attr.pbl_addr -
644178786Skmacy	                         rhp->rdev.rnic_info.pbl_base) >> 3;
645178786Skmacy		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
646178786Skmacy		     uresp.pbl_addr);
647178786Skmacy
648178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
649178786Skmacy			iwch_dereg_mr(&mhp->ibmr);
650178786Skmacy			err = EFAULT;
651178786Skmacy			goto err;
652178786Skmacy		}
653178786Skmacy	}
654178786Skmacy
655178786Skmacy	return &mhp->ibmr;
656178786Skmacy
657237263Snperr_pbl:
658237263Snp	iwch_free_pbl(mhp);
659237263Snp
660178786Skmacyerr:
661178786Skmacy	ib_umem_release(mhp->umem);
662178786Skmacy	cxfree(mhp);
663178786Skmacy	return ERR_PTR(-err);
664178786Skmacy}
665178786Skmacy
666178786Skmacystatic struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
667178786Skmacy{
668178786Skmacy	struct ib_phys_buf bl;
669178786Skmacy	u64 kva;
670178786Skmacy	struct ib_mr *ibmr;
671178786Skmacy
672178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
673178786Skmacy
674178786Skmacy	/*
675178786Skmacy	 * T3 only supports 32 bits of size.
676178786Skmacy	 */
677178786Skmacy	bl.size = 0xffffffff;
678178786Skmacy	bl.addr = 0;
679178786Skmacy	kva = 0;
680178786Skmacy	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
681178786Skmacy	return ibmr;
682178786Skmacy}
683178786Skmacy
684278886Shselaskystatic struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
685178786Skmacy{
686178786Skmacy	struct iwch_dev *rhp;
687178786Skmacy	struct iwch_pd *php;
688178786Skmacy	struct iwch_mw *mhp;
689178786Skmacy	u32 mmid;
690178786Skmacy	u32 stag = 0;
691178786Skmacy	int ret;
692178786Skmacy
693178786Skmacy	php = to_iwch_pd(pd);
694178786Skmacy	rhp = php->rhp;
695178786Skmacy	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
696178786Skmacy	if (!mhp)
697178786Skmacy		return ERR_PTR(-ENOMEM);
698178786Skmacy	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
699178786Skmacy	if (ret) {
700178786Skmacy		cxfree(mhp);
701178786Skmacy		return ERR_PTR(-ret);
702178786Skmacy	}
703178786Skmacy	mhp->rhp = rhp;
704178786Skmacy	mhp->attr.pdid = php->pdid;
705178786Skmacy	mhp->attr.type = TPT_MW;
706178786Skmacy	mhp->attr.stag = stag;
707178786Skmacy	mmid = (stag) >> 8;
708237263Snp	mhp->ibmw.rkey = stag;
709237263Snp	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
710237263Snp		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
711237263Snp		cxfree(mhp);
712237263Snp		return ERR_PTR(-ENOMEM);
713237263Snp	}
714178786Skmacy	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
715178786Skmacy	return &(mhp->ibmw);
716178786Skmacy}
717178786Skmacy
718178786Skmacystatic int iwch_dealloc_mw(struct ib_mw *mw)
719178786Skmacy{
720178786Skmacy	struct iwch_dev *rhp;
721178786Skmacy	struct iwch_mw *mhp;
722178786Skmacy	u32 mmid;
723178786Skmacy
724178786Skmacy	mhp = to_iwch_mw(mw);
725178786Skmacy	rhp = mhp->rhp;
726178786Skmacy	mmid = (mw->rkey) >> 8;
727178786Skmacy	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
728178786Skmacy	remove_handle(rhp, &rhp->mmidr, mmid);
729178786Skmacy	cxfree(mhp);
730178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
731178786Skmacy	return 0;
732178786Skmacy}
733178786Skmacy
734178786Skmacystatic int iwch_destroy_qp(struct ib_qp *ib_qp)
735178786Skmacy{
736178786Skmacy	struct iwch_dev *rhp;
737178786Skmacy	struct iwch_qp *qhp;
738178786Skmacy	struct iwch_qp_attributes attrs;
739178786Skmacy	struct iwch_ucontext *ucontext;
740178786Skmacy
741178786Skmacy	qhp = to_iwch_qp(ib_qp);
742178786Skmacy	rhp = qhp->rhp;
743178786Skmacy
744178786Skmacy	attrs.next_state = IWCH_QP_STATE_ERROR;
745178786Skmacy	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
746178786Skmacy	mtx_lock(&qhp->lock);
747178786Skmacy	if (qhp->ep)
748178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
749178786Skmacy	mtx_unlock(&qhp->lock);
750178786Skmacy
751178786Skmacy	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
752178786Skmacy
753178786Skmacy	mtx_lock(&qhp->lock);
754178786Skmacy	if (--qhp->refcnt)
755178786Skmacy		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
756178786Skmacy	mtx_unlock(&qhp->lock);
757178786Skmacy
758178786Skmacy	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
759178786Skmacy				  : NULL;
760178786Skmacy	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
761178786Skmacy			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
762178786Skmacy
763178786Skmacy	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
764178786Skmacy	     ib_qp, qhp->wq.qpid, qhp);
765178786Skmacy	cxfree(qhp);
766178786Skmacy	return 0;
767178786Skmacy}
768178786Skmacy
769178786Skmacystatic struct ib_qp *iwch_create_qp(struct ib_pd *pd,
770178786Skmacy			     struct ib_qp_init_attr *attrs,
771178786Skmacy			     struct ib_udata *udata)
772178786Skmacy{
773178786Skmacy	struct iwch_dev *rhp;
774178786Skmacy	struct iwch_qp *qhp;
775178786Skmacy	struct iwch_pd *php;
776178786Skmacy	struct iwch_cq *schp;
777178786Skmacy	struct iwch_cq *rchp;
778178786Skmacy	struct iwch_create_qp_resp uresp;
779178786Skmacy	int wqsize, sqsize, rqsize;
780178786Skmacy	struct iwch_ucontext *ucontext;
781178786Skmacy
782178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
783178786Skmacy	if (attrs->qp_type != IB_QPT_RC)
784178786Skmacy		return ERR_PTR(-EINVAL);
785178786Skmacy	php = to_iwch_pd(pd);
786178786Skmacy	rhp = php->rhp;
787178786Skmacy	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
788178786Skmacy	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
789178786Skmacy	if (!schp || !rchp)
790178786Skmacy		return ERR_PTR(-EINVAL);
791178786Skmacy
792178786Skmacy	/* The RQT size must be # of entries + 1 rounded up to a power of two */
793178786Skmacy	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
794178786Skmacy	if (rqsize == attrs->cap.max_recv_wr)
795178786Skmacy		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
796178786Skmacy
797178786Skmacy	/* T3 doesn't support RQT depth < 16 */
798178786Skmacy	if (rqsize < 16)
799178786Skmacy		rqsize = 16;
800178786Skmacy
801178786Skmacy	if (rqsize > T3_MAX_RQ_SIZE)
802178786Skmacy		return ERR_PTR(-EINVAL);
803178786Skmacy
804178786Skmacy	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
805178786Skmacy		return ERR_PTR(-EINVAL);
806178786Skmacy
807178786Skmacy	/*
808178786Skmacy	 * NOTE: The SQ and total WQ sizes don't need to be
809178786Skmacy	 * a power of two.  However, all the code assumes
810178786Skmacy	 * they are. EG: Q_FREECNT() and friends.
811178786Skmacy	 */
812178786Skmacy	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
813178786Skmacy	wqsize = roundup_pow_of_two(rqsize + sqsize);
814178786Skmacy	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
815178786Skmacy	     wqsize, sqsize, rqsize);
816178786Skmacy	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
817178786Skmacy	if (!qhp)
818178786Skmacy		return ERR_PTR(-ENOMEM);
819178786Skmacy	qhp->wq.size_log2 = ilog2(wqsize);
820178786Skmacy	qhp->wq.rq_size_log2 = ilog2(rqsize);
821178786Skmacy	qhp->wq.sq_size_log2 = ilog2(sqsize);
822178786Skmacy	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
823178786Skmacy	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
824178786Skmacy			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
825178786Skmacy		cxfree(qhp);
826178786Skmacy		return ERR_PTR(-ENOMEM);
827178786Skmacy	}
828178786Skmacy
829178786Skmacy	attrs->cap.max_recv_wr = rqsize - 1;
830178786Skmacy	attrs->cap.max_send_wr = sqsize;
831178786Skmacy	attrs->cap.max_inline_data = T3_MAX_INLINE;
832178786Skmacy
833178786Skmacy	qhp->rhp = rhp;
834178786Skmacy	qhp->attr.pd = php->pdid;
835178786Skmacy	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
836178786Skmacy	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
837178786Skmacy	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
838178786Skmacy	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
839178786Skmacy	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
840178786Skmacy	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
841178786Skmacy	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
842178786Skmacy	qhp->attr.state = IWCH_QP_STATE_IDLE;
843178786Skmacy	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
844178786Skmacy
845178786Skmacy	/*
846178786Skmacy	 * XXX - These don't get passed in from the openib user
847178786Skmacy	 * at create time.  The CM sets them via a QP modify.
848178786Skmacy	 * Need to fix...  I think the CM should
849178786Skmacy	 */
850178786Skmacy	qhp->attr.enable_rdma_read = 1;
851178786Skmacy	qhp->attr.enable_rdma_write = 1;
852178786Skmacy	qhp->attr.enable_bind = 1;
853178786Skmacy	qhp->attr.max_ord = 1;
854178786Skmacy	qhp->attr.max_ird = 1;
855178786Skmacy
856178786Skmacy	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
857178786Skmacy	qhp->refcnt = 1;
858178786Skmacy
859237263Snp	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
860237263Snp		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
861237263Snp			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
862237263Snp		cxfree(qhp);
863237263Snp		return ERR_PTR(-ENOMEM);
864237263Snp	}
865237263Snp
866178786Skmacy	if (udata) {
867178786Skmacy
868178786Skmacy		struct iwch_mm_entry *mm1, *mm2;
869178786Skmacy
870178786Skmacy		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
871178786Skmacy		if (!mm1) {
872178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
873178786Skmacy			return ERR_PTR(-ENOMEM);
874178786Skmacy		}
875178786Skmacy
876178786Skmacy		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
877178786Skmacy		if (!mm2) {
878178786Skmacy			cxfree(mm1);
879178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
880178786Skmacy			return ERR_PTR(-ENOMEM);
881178786Skmacy		}
882178786Skmacy
883178786Skmacy		uresp.qpid = qhp->wq.qpid;
884178786Skmacy		uresp.size_log2 = qhp->wq.size_log2;
885178786Skmacy		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
886178786Skmacy		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
887178786Skmacy		mtx_lock(&ucontext->mmap_lock);
888178786Skmacy		uresp.key = ucontext->key;
889178786Skmacy		ucontext->key += PAGE_SIZE;
890178786Skmacy		uresp.db_key = ucontext->key;
891178786Skmacy		ucontext->key += PAGE_SIZE;
892178786Skmacy		mtx_unlock(&ucontext->mmap_lock);
893178786Skmacy		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
894178786Skmacy			cxfree(mm1);
895178786Skmacy			cxfree(mm2);
896178786Skmacy			iwch_destroy_qp(&qhp->ibqp);
897178786Skmacy			return ERR_PTR(-EFAULT);
898178786Skmacy		}
899178786Skmacy		mm1->key = uresp.key;
900178786Skmacy		mm1->addr = vtophys(qhp->wq.queue);
901178786Skmacy		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
902178786Skmacy		insert_mmap(ucontext, mm1);
903178786Skmacy		mm2->key = uresp.db_key;
904178786Skmacy		mm2->addr = qhp->wq.udb & PAGE_MASK;
905178786Skmacy		mm2->len = PAGE_SIZE;
906178786Skmacy		insert_mmap(ucontext, mm2);
907178786Skmacy	}
908178786Skmacy	qhp->ibqp.qp_num = qhp->wq.qpid;
909283291Sjkim	callout_init(&(qhp->timer), 1);
910178786Skmacy	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
911178786Skmacy	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
912178786Skmacy	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
913178786Skmacy	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
914178786Skmacy	     1 << qhp->wq.size_log2);
915178786Skmacy	return &qhp->ibqp;
916178786Skmacy}
917178786Skmacy
918178786Skmacystatic int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
919178786Skmacy		      int attr_mask, struct ib_udata *udata)
920178786Skmacy{
921178786Skmacy	struct iwch_dev *rhp;
922178786Skmacy	struct iwch_qp *qhp;
923178786Skmacy	enum iwch_qp_attr_mask mask = 0;
924178786Skmacy	struct iwch_qp_attributes attrs;
925178786Skmacy
926178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
927178786Skmacy
928178786Skmacy	/* iwarp does not support the RTR state */
929178786Skmacy	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
930178786Skmacy		attr_mask &= ~IB_QP_STATE;
931178786Skmacy
932178786Skmacy	/* Make sure we still have something left to do */
933178786Skmacy	if (!attr_mask)
934178786Skmacy		return 0;
935178786Skmacy
936178786Skmacy	memset(&attrs, 0, sizeof attrs);
937178786Skmacy	qhp = to_iwch_qp(ibqp);
938178786Skmacy	rhp = qhp->rhp;
939178786Skmacy
940178786Skmacy	attrs.next_state = iwch_convert_state(attr->qp_state);
941178786Skmacy	attrs.enable_rdma_read = (attr->qp_access_flags &
942178786Skmacy			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
943178786Skmacy	attrs.enable_rdma_write = (attr->qp_access_flags &
944178786Skmacy				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
945178786Skmacy	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
946178786Skmacy
947178786Skmacy
948178786Skmacy	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
949178786Skmacy	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
950178786Skmacy			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
951178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
952178786Skmacy			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
953178786Skmacy
954178786Skmacy	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
955178786Skmacy}
956178786Skmacy
957178786Skmacyvoid iwch_qp_add_ref(struct ib_qp *qp)
958178786Skmacy{
959178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
960178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
961178786Skmacy	to_iwch_qp(qp)->refcnt++;
962178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
963178786Skmacy}
964178786Skmacy
965178786Skmacyvoid iwch_qp_rem_ref(struct ib_qp *qp)
966178786Skmacy{
967178786Skmacy	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
968178786Skmacy	mtx_lock(&to_iwch_qp(qp)->lock);
969178786Skmacy	if (--to_iwch_qp(qp)->refcnt == 0)
970178786Skmacy	        wakeup(to_iwch_qp(qp));
971178786Skmacy	mtx_unlock(&to_iwch_qp(qp)->lock);
972178786Skmacy}
973178786Skmacy
974178786Skmacystatic struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
975178786Skmacy{
976178786Skmacy	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
977178786Skmacy	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
978178786Skmacy}
979178786Skmacy
980178786Skmacy
981178786Skmacystatic int iwch_query_pkey(struct ib_device *ibdev,
982178786Skmacy			   u8 port, u16 index, u16 * pkey)
983178786Skmacy{
984178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
985178786Skmacy	*pkey = 0;
986178786Skmacy	return 0;
987178786Skmacy}
988178786Skmacy
989178786Skmacystatic int iwch_query_gid(struct ib_device *ibdev, u8 port,
990178786Skmacy			  int index, union ib_gid *gid)
991178786Skmacy{
992178786Skmacy	struct iwch_dev *dev;
993178786Skmacy	struct port_info *pi;
994237263Snp	struct adapter *sc;
995178786Skmacy
996178786Skmacy	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
997178786Skmacy	       __FUNCTION__, ibdev, port, index, gid);
998178786Skmacy	dev = to_iwch_dev(ibdev);
999237263Snp	sc = dev->rdev.adap;
1000178786Skmacy	PANIC_IF(port == 0 || port > 2);
1001237263Snp	pi = &sc->port[port - 1];
1002178786Skmacy	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1003178786Skmacy	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
1004178786Skmacy	return 0;
1005178786Skmacy}
1006178786Skmacy
1007178786Skmacystatic int iwch_query_device(struct ib_device *ibdev,
1008178786Skmacy			     struct ib_device_attr *props)
1009178786Skmacy{
1010237263Snp	struct iwch_dev *dev;
1011237263Snp	struct adapter *sc;
1012178786Skmacy
1013178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1014178786Skmacy
1015178786Skmacy	dev = to_iwch_dev(ibdev);
1016237263Snp	sc = dev->rdev.adap;
1017178786Skmacy	memset(props, 0, sizeof *props);
1018237263Snp	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1019178786Skmacy	props->device_cap_flags = dev->device_cap_flags;
1020237263Snp	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1021237263Snp	props->vendor_id = pci_get_vendor(sc->dev);
1022237263Snp	props->vendor_part_id = pci_get_device(sc->dev);
1023237263Snp	props->max_mr_size = dev->attr.max_mr_size;
1024178786Skmacy	props->max_qp = dev->attr.max_qps;
1025178786Skmacy	props->max_qp_wr = dev->attr.max_wrs;
1026178786Skmacy	props->max_sge = dev->attr.max_sge_per_wr;
1027178786Skmacy	props->max_sge_rd = 1;
1028178786Skmacy	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1029178786Skmacy	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1030178786Skmacy	props->max_cq = dev->attr.max_cqs;
1031178786Skmacy	props->max_cqe = dev->attr.max_cqes_per_cq;
1032178786Skmacy	props->max_mr = dev->attr.max_mem_regs;
1033178786Skmacy	props->max_pd = dev->attr.max_pds;
1034178786Skmacy	props->local_ca_ack_delay = 0;
1035178786Skmacy
1036178786Skmacy	return 0;
1037178786Skmacy}
1038178786Skmacy
1039178786Skmacystatic int iwch_query_port(struct ib_device *ibdev,
1040178786Skmacy			   u8 port, struct ib_port_attr *props)
1041178786Skmacy{
1042178786Skmacy	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1043237263Snp	memset(props, 0, sizeof(struct ib_port_attr));
1044178786Skmacy	props->max_mtu = IB_MTU_4096;
1045237263Snp	props->active_mtu = IB_MTU_2048;
1046178786Skmacy	props->state = IB_PORT_ACTIVE;
1047178786Skmacy	props->port_cap_flags =
1048178786Skmacy	    IB_PORT_CM_SUP |
1049178786Skmacy	    IB_PORT_SNMP_TUNNEL_SUP |
1050178786Skmacy	    IB_PORT_REINIT_SUP |
1051178786Skmacy	    IB_PORT_DEVICE_MGMT_SUP |
1052178786Skmacy	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1053178786Skmacy	props->gid_tbl_len = 1;
1054178786Skmacy	props->pkey_tbl_len = 1;
1055178786Skmacy	props->active_width = 2;
1056178786Skmacy	props->active_speed = 2;
1057178786Skmacy	props->max_msg_sz = -1;
1058178786Skmacy
1059178786Skmacy	return 0;
1060178786Skmacy}
1061178786Skmacy
1062178786Skmacyint iwch_register_device(struct iwch_dev *dev)
1063178786Skmacy{
1064178786Skmacy	int ret;
1065237263Snp	struct adapter *sc = dev->rdev.adap;
1066237263Snp
1067178786Skmacy	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1068178786Skmacy	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1069178786Skmacy	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1070237263Snp	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1071178786Skmacy	dev->device_cap_flags =
1072237263Snp		(IB_DEVICE_LOCAL_DMA_LKEY |
1073237263Snp		 IB_DEVICE_MEM_WINDOW);
1074178786Skmacy
1075178786Skmacy	dev->ibdev.uverbs_cmd_mask =
1076178786Skmacy	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1077178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1078178786Skmacy	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1079178786Skmacy	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1080178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1081178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1082178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1083178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1084178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1085178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1086178786Skmacy	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1087178786Skmacy	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1088178786Skmacy	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1089178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1090178786Skmacy	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1091178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1092178786Skmacy	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1093178786Skmacy	dev->ibdev.node_type = RDMA_NODE_RNIC;
1094178786Skmacy	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1095237263Snp	dev->ibdev.phys_port_cnt = sc->params.nports;
1096178786Skmacy	dev->ibdev.num_comp_vectors = 1;
1097318798Snp	dev->ibdev.dma_device = NULL;
1098178786Skmacy	dev->ibdev.query_device = iwch_query_device;
1099178786Skmacy	dev->ibdev.query_port = iwch_query_port;
1100178786Skmacy	dev->ibdev.modify_port = iwch_modify_port;
1101178786Skmacy	dev->ibdev.query_pkey = iwch_query_pkey;
1102178786Skmacy	dev->ibdev.query_gid = iwch_query_gid;
1103178786Skmacy	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1104178786Skmacy	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1105178786Skmacy	dev->ibdev.mmap = iwch_mmap;
1106178786Skmacy	dev->ibdev.alloc_pd = iwch_allocate_pd;
1107178786Skmacy	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1108178786Skmacy	dev->ibdev.create_ah = iwch_ah_create;
1109178786Skmacy	dev->ibdev.destroy_ah = iwch_ah_destroy;
1110178786Skmacy	dev->ibdev.create_qp = iwch_create_qp;
1111178786Skmacy	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1112178786Skmacy	dev->ibdev.destroy_qp = iwch_destroy_qp;
1113178786Skmacy	dev->ibdev.create_cq = iwch_create_cq;
1114178786Skmacy	dev->ibdev.destroy_cq = iwch_destroy_cq;
1115178786Skmacy	dev->ibdev.resize_cq = iwch_resize_cq;
1116178786Skmacy	dev->ibdev.poll_cq = iwch_poll_cq;
1117178786Skmacy	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1118178786Skmacy	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1119178786Skmacy	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1120178786Skmacy	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1121178786Skmacy	dev->ibdev.dereg_mr = iwch_dereg_mr;
1122178786Skmacy	dev->ibdev.alloc_mw = iwch_alloc_mw;
1123178786Skmacy	dev->ibdev.bind_mw = iwch_bind_mw;
1124178786Skmacy	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1125178786Skmacy
1126178786Skmacy	dev->ibdev.attach_mcast = iwch_multicast_attach;
1127178786Skmacy	dev->ibdev.detach_mcast = iwch_multicast_detach;
1128178786Skmacy	dev->ibdev.process_mad = iwch_process_mad;
1129178786Skmacy
1130178786Skmacy	dev->ibdev.req_notify_cq = iwch_arm_cq;
1131178786Skmacy	dev->ibdev.post_send = iwch_post_send;
1132178786Skmacy	dev->ibdev.post_recv = iwch_post_receive;
1133237263Snp	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1134178786Skmacy
1135237263Snp	dev->ibdev.iwcm =
1136237263Snp	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1137237263Snp	if (!dev->ibdev.iwcm)
1138237263Snp		return (ENOMEM);
1139178786Skmacy
1140178786Skmacy	dev->ibdev.iwcm->connect = iwch_connect;
1141178786Skmacy	dev->ibdev.iwcm->accept = iwch_accept_cr;
1142178786Skmacy	dev->ibdev.iwcm->reject = iwch_reject_cr;
1143294610Snp	dev->ibdev.iwcm->create_listen_ep = iwch_create_listen_ep;
1144294610Snp	dev->ibdev.iwcm->destroy_listen_ep = iwch_destroy_listen_ep;
1145294610Snp	dev->ibdev.iwcm->newconn = process_newconn;
1146178786Skmacy	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1147178786Skmacy	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1148178786Skmacy	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1149178786Skmacy
1150255932Salfred	ret = ib_register_device(&dev->ibdev, NULL);
1151178786Skmacy	if (ret)
1152178786Skmacy		goto bail1;
1153237263Snp
1154237263Snp	return (0);
1155237263Snp
1156178786Skmacybail1:
1157237263Snp	cxfree(dev->ibdev.iwcm);
1158237263Snp	return (ret);
1159178786Skmacy}
1160178786Skmacy
1161178786Skmacyvoid iwch_unregister_device(struct iwch_dev *dev)
1162178786Skmacy{
1163178786Skmacy
1164178786Skmacy	ib_unregister_device(&dev->ibdev);
1165237263Snp	cxfree(dev->ibdev.iwcm);
1166178786Skmacy	return;
1167178786Skmacy}
1168237263Snp#endif
1169