iw_cxgb_provider.c revision 237263
1219820Sjeff/**************************************************************************
2219820Sjeff
3219820SjeffCopyright (c) 2007, Chelsio Inc.
4219820SjeffAll rights reserved.
5299674Shselasky
6219820SjeffRedistribution and use in source and binary forms, with or without
7219820Sjeffmodification, are permitted provided that the following conditions are met:
8219820Sjeff
9219820Sjeff 1. Redistributions of source code must retain the above copyright notice,
10219820Sjeff    this list of conditions and the following disclaimer.
11219820Sjeff
12219820Sjeff 2. Neither the name of the Chelsio Corporation nor the names of its
13219820Sjeff    contributors may be used to endorse or promote products derived from
14219820Sjeff    this software without specific prior written permission.
15219820Sjeff
16219820SjeffTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17219820SjeffAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18219820SjeffIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19219820SjeffARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20219820SjeffLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21219820SjeffCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22219820SjeffSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23219820SjeffINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24219820SjeffCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25219820SjeffARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26219820SjeffPOSSIBILITY OF SUCH DAMAGE.
27219820Sjeff
28289644Shselasky***************************************************************************/
29289644Shselasky#include <sys/cdefs.h>
30219820Sjeff__FBSDID("$FreeBSD: head/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 237263 2012-06-19 07:34:13Z np $");
31219820Sjeff
32219820Sjeff#include "opt_inet.h"
33219820Sjeff
34299933Shselasky#ifdef TCP_OFFLOAD
35219820Sjeff#include <sys/param.h>
36219820Sjeff#include <sys/systm.h>
37290335Shselasky#include <sys/kernel.h>
38219820Sjeff#include <sys/bus.h>
39219820Sjeff#include <sys/pciio.h>
40219820Sjeff#include <sys/conf.h>
41219820Sjeff#include <machine/bus.h>
42219820Sjeff#include <machine/resource.h>
43219820Sjeff#include <sys/bus_dma.h>
44219820Sjeff#include <sys/rman.h>
45219820Sjeff#include <sys/ioccom.h>
46219820Sjeff#include <sys/mbuf.h>
47219820Sjeff#include <sys/mutex.h>
48219820Sjeff#include <sys/rwlock.h>
49219820Sjeff#include <sys/linker.h>
50219820Sjeff#include <sys/firmware.h>
51219820Sjeff#include <sys/socket.h>
52328653Shselasky#include <sys/sockio.h>
53328653Shselasky#include <sys/smp.h>
54328653Shselasky#include <sys/sysctl.h>
55219820Sjeff#include <sys/syslog.h>
56219820Sjeff#include <sys/queue.h>
57219820Sjeff#include <sys/taskqueue.h>
58219820Sjeff#include <sys/proc.h>
59219820Sjeff#include <sys/queue.h>
60328653Shselasky
61219820Sjeff#include <netinet/in.h>
62219820Sjeff
63270710Shselasky
64219820Sjeff#include <vm/vm.h>
65219820Sjeff#include <vm/pmap.h>
66328653Shselasky
67328653Shselasky#include <rdma/ib_verbs.h>
68328653Shselasky#include <rdma/ib_umem.h>
69328653Shselasky#include <rdma/ib_user_verbs.h>
70328653Shselasky#include <linux/idr.h>
71328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
72328653Shselasky
73328653Shselasky
74328653Shselasky#include <cxgb_include.h>
75328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_wr.h>
76328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_hal.h>
77328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_provider.h>
78328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_cm.h>
79328653Shselasky#include <ulp/iw_cxgb/iw_cxgb.h>
80328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_resource.h>
81328653Shselasky#include <ulp/iw_cxgb/iw_cxgb_user.h>
82328653Shselasky
83328653Shselaskystatic int
84328653Shselaskyiwch_modify_port(struct ib_device *ibdev,
85328653Shselasky			    u8 port, int port_modify_mask,
86328653Shselasky			    struct ib_port_modify *props)
87328653Shselasky{
88328653Shselasky	return (-ENOSYS);
89328653Shselasky}
90328653Shselasky
91328653Shselaskystatic struct ib_ah *
92328653Shselaskyiwch_ah_create(struct ib_pd *pd,
93219820Sjeff				    struct ib_ah_attr *ah_attr)
94219820Sjeff{
95219820Sjeff	return ERR_PTR(-ENOSYS);
96219820Sjeff}
97311801Shselasky
98311801Shselaskystatic int
99311801Shselaskyiwch_ah_destroy(struct ib_ah *ah)
100311801Shselasky{
101311801Shselasky	return (-ENOSYS);
102311801Shselasky}
103311801Shselasky
104311801Shselaskystatic int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
105328653Shselasky{
106328653Shselasky	return (-ENOSYS);
107219820Sjeff}
108219820Sjeff
109219820Sjeffstatic int
110219820Sjeffiwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
111219820Sjeff{
112219820Sjeff	return (-ENOSYS);
113219820Sjeff}
114310246Shselasky
115219820Sjeffstatic int
116219820Sjeffiwch_process_mad(struct ib_device *ibdev,
117299933Shselasky			    int mad_flags,
118328653Shselasky			    u8 port_num,
119328653Shselasky			    struct ib_wc *in_wc,
120328653Shselasky			    struct ib_grh *in_grh,
121328653Shselasky			    struct ib_mad *in_mad, struct ib_mad *out_mad)
122219820Sjeff{
123219820Sjeff	return (-ENOSYS);
124292987Shselasky}
125292987Shselasky
126292987Shselaskystatic int
127292987Shselaskyiwch_dealloc_ucontext(struct ib_ucontext *context)
128219820Sjeff{
129219820Sjeff	struct iwch_dev *rhp = to_iwch_dev(context->device);
130270710Shselasky	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131270710Shselasky	struct iwch_mm_entry *mm, *tmp;
132270710Shselasky
133270710Shselasky	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134219820Sjeff	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135270710Shselasky		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
136219820Sjeff		cxfree(mm);
137219820Sjeff	}
138219820Sjeff	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
139219820Sjeff	cxfree(ucontext);
140219820Sjeff	return 0;
141219820Sjeff}
142219820Sjeff
143270710Shselaskystatic struct ib_ucontext *
144219820Sjeffiwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
145270710Shselasky{
146270710Shselasky	struct iwch_ucontext *context;
147219820Sjeff	struct iwch_dev *rhp = to_iwch_dev(ibdev);
148219820Sjeff
149219820Sjeff	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150219820Sjeff	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
151328653Shselasky	if (!context)
152328653Shselasky		return ERR_PTR(-ENOMEM);
153328653Shselasky	cxio_init_ucontext(&rhp->rdev, &context->uctx);
154328653Shselasky	TAILQ_INIT(&context->mmaps);
155328653Shselasky	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156328653Shselasky	return &context->ibucontext;
157328653Shselasky}
158219820Sjeff
159270710Shselaskystatic int
160270710Shselaskyiwch_destroy_cq(struct ib_cq *ib_cq)
161270710Shselasky{
162270710Shselasky	struct iwch_cq *chp;
163270710Shselasky
164270710Shselasky	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165270710Shselasky	chp = to_iwch_cq(ib_cq);
166270710Shselasky
167270710Shselasky	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168270710Shselasky	mtx_lock(&chp->lock);
169270710Shselasky	if (--chp->refcnt)
170270710Shselasky		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171270710Shselasky	mtx_unlock(&chp->lock);
172270710Shselasky
173270710Shselasky	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
174270710Shselasky	cxfree(chp);
175270710Shselasky	return 0;
176270710Shselasky}
177270710Shselasky
178270710Shselaskystatic struct ib_cq *
179270710Shselaskyiwch_create_cq(struct ib_device *ibdev, int entries, int vector,
180270710Shselasky			     struct ib_ucontext *ib_context,
181219820Sjeff			     struct ib_udata *udata)
182219820Sjeff{
183219820Sjeff	struct iwch_dev *rhp;
184299933Shselasky	struct iwch_cq *chp;
185328653Shselasky	struct iwch_create_cq_resp uresp;
186219820Sjeff	struct iwch_create_cq_req ureq;
187219820Sjeff	struct iwch_ucontext *ucontext = NULL;
188219820Sjeff	static int warned;
189328653Shselasky	size_t resplen;
190328653Shselasky
191328653Shselasky	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
192328653Shselasky	rhp = to_iwch_dev(ibdev);
193328653Shselasky	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
194328653Shselasky	if (!chp) {
195328653Shselasky		return ERR_PTR(-ENOMEM);
196328653Shselasky	}
197328653Shselasky	if (ib_context) {
198328653Shselasky		ucontext = to_iwch_ucontext(ib_context);
199328653Shselasky		if (!t3a_device(rhp)) {
200328653Shselasky			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
201219820Sjeff				cxfree(chp);
202299933Shselasky				return ERR_PTR(-EFAULT);
203219820Sjeff			}
204219820Sjeff			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
205219820Sjeff		}
206219820Sjeff	}
207219820Sjeff
208219820Sjeff	if (t3a_device(rhp)) {
209219820Sjeff
210219820Sjeff		/*
211219820Sjeff		 * T3A: Add some fluff to handle extra CQEs inserted
212219820Sjeff		 * for various errors.
213219820Sjeff		 * Additional CQE possibilities:
214219820Sjeff		 *      TERMINATE,
215219820Sjeff		 *      incoming RDMA WRITE Failures
216219820Sjeff		 *      incoming RDMA READ REQUEST FAILUREs
217219820Sjeff		 * NOTE: We cannot ensure the CQ won't overflow.
218219820Sjeff		 */
219219820Sjeff		entries += 16;
220219820Sjeff	}
221219820Sjeff	entries = roundup_pow_of_two(entries);
222219820Sjeff	chp->cq.size_log2 = ilog2(entries);
223219820Sjeff
224219820Sjeff	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
225219820Sjeff		cxfree(chp);
226219820Sjeff		return ERR_PTR(-ENOMEM);
227219820Sjeff	}
228219820Sjeff	chp->rhp = rhp;
229219820Sjeff	chp->ibcq.cqe = 1 << chp->cq.size_log2;
230219820Sjeff	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
231219820Sjeff	chp->refcnt = 1;
232219820Sjeff	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
233219820Sjeff		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
234219820Sjeff		cxfree(chp);
235219820Sjeff		return ERR_PTR(-ENOMEM);
236219820Sjeff	}
237219820Sjeff
238219820Sjeff	if (ucontext) {
239219820Sjeff		struct iwch_mm_entry *mm;
240219820Sjeff
241219820Sjeff		mm = kmalloc(sizeof *mm, M_NOWAIT);
242219820Sjeff		if (!mm) {
243219820Sjeff			iwch_destroy_cq(&chp->ibcq);
244219820Sjeff			return ERR_PTR(-ENOMEM);
245219820Sjeff		}
246219820Sjeff		uresp.cqid = chp->cq.cqid;
247219820Sjeff		uresp.size_log2 = chp->cq.size_log2;
248292987Shselasky		mtx_lock(&ucontext->mmap_lock);
249219820Sjeff		uresp.key = ucontext->key;
250292987Shselasky		ucontext->key += PAGE_SIZE;
251219820Sjeff		mtx_unlock(&ucontext->mmap_lock);
252219820Sjeff		mm->key = uresp.key;
253219820Sjeff		mm->addr = vtophys(chp->cq.queue);
254219820Sjeff               	if (udata->outlen < sizeof uresp) {
255219820Sjeff                	if (!warned++)
256219820Sjeff                        	CTR1(KTR_IW_CXGB, "%s Warning - "
257219820Sjeff                                	"downlevel libcxgb3 (non-fatal).\n",
258219820Sjeff					__func__);
259219820Sjeff                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
260219820Sjeff                       				sizeof(struct t3_cqe));
261219820Sjeff                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
262299933Shselasky               	} else {
263299933Shselasky                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
264299933Shselasky                        			sizeof(struct t3_cqe));
265299933Shselasky                       	uresp.memsize = mm->len;
266299933Shselasky                      	resplen = sizeof uresp;
267219820Sjeff               	}
268299933Shselasky              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
269219820Sjeff			cxfree(mm);
270219820Sjeff			iwch_destroy_cq(&chp->ibcq);
271219820Sjeff			return ERR_PTR(-EFAULT);
272299933Shselasky		}
273299933Shselasky		insert_mmap(ucontext, mm);
274299933Shselasky	}
275311801Shselasky	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
276311801Shselasky	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
277299933Shselasky	     (unsigned long long) chp->cq.dma_addr);
278299933Shselasky	return &chp->ibcq;
279311801Shselasky}
280299933Shselasky
281311801Shselaskystatic int
282311801Shselaskyiwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
283311801Shselasky    struct ib_udata *udata __unused)
284311801Shselasky{
285311801Shselasky
286311801Shselasky	return (-ENOSYS);
287299933Shselasky}
288311801Shselasky
289311801Shselaskystatic int
290311801Shselaskyiwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
291311801Shselasky{
292311801Shselasky	struct iwch_dev *rhp;
293311801Shselasky	struct iwch_cq *chp;
294299933Shselasky	enum t3_cq_opcode cq_op;
295299933Shselasky	int err;
296299933Shselasky	u32 rptr;
297299933Shselasky
298311801Shselasky	chp = to_iwch_cq(ibcq);
299299933Shselasky	rhp = chp->rhp;
300328653Shselasky	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
301328653Shselasky		cq_op = CQ_ARM_SE;
302328653Shselasky	else
303299933Shselasky		cq_op = CQ_ARM_AN;
304299933Shselasky	if (chp->user_rptr_addr) {
305219820Sjeff		if (copyin(&rptr, chp->user_rptr_addr, 4))
306299933Shselasky			return (-EFAULT);
307311801Shselasky		mtx_lock(&chp->lock);
308299933Shselasky		chp->cq.rptr = rptr;
309299933Shselasky	} else
310299933Shselasky		mtx_lock(&chp->lock);
311299933Shselasky	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
312299933Shselasky	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
313299933Shselasky	mtx_unlock(&chp->lock);
314299933Shselasky	if (err < 0)
315299933Shselasky		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
316299933Shselasky		       chp->cq.cqid);
317299933Shselasky	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
318299933Shselasky		err = 0;
319299933Shselasky	return err;
320299933Shselasky}
321299933Shselasky
322299933Shselaskystatic int
323299933Shselaskyiwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
324299933Shselasky{
325299933Shselasky
326299933Shselasky	return (-ENOSYS);
327299933Shselasky}
328299933Shselasky
329299933Shselaskystatic int iwch_deallocate_pd(struct ib_pd *pd)
330299933Shselasky{
331299933Shselasky	struct iwch_dev *rhp;
332299933Shselasky	struct iwch_pd *php;
333299933Shselasky
334299933Shselasky	php = to_iwch_pd(pd);
335299933Shselasky	rhp = php->rhp;
336299933Shselasky	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
337299933Shselasky	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
338299933Shselasky	cxfree(php);
339299933Shselasky	return 0;
340299933Shselasky}
341299933Shselasky
342299933Shselaskystatic struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
343299933Shselasky			       struct ib_ucontext *context,
344311801Shselasky			       struct ib_udata *udata)
345311801Shselasky{
346299933Shselasky	struct iwch_pd *php;
347299933Shselasky	u32 pdid;
348299933Shselasky	struct iwch_dev *rhp;
349299933Shselasky
350299933Shselasky	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
351299933Shselasky	rhp = (struct iwch_dev *) ibdev;
352299933Shselasky	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
353299933Shselasky	if (!pdid)
354299933Shselasky		return ERR_PTR(-EINVAL);
355299933Shselasky	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
356299933Shselasky	if (!php) {
357299933Shselasky		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
358299933Shselasky		return ERR_PTR(-ENOMEM);
359299933Shselasky	}
360299933Shselasky	php->pdid = pdid;
361299933Shselasky	php->rhp = rhp;
362299933Shselasky	if (context) {
363299933Shselasky		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
364299933Shselasky			iwch_deallocate_pd(&php->ibpd);
365299933Shselasky			return ERR_PTR(-EFAULT);
366299933Shselasky		}
367299933Shselasky	}
368299933Shselasky	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
369299933Shselasky	return &php->ibpd;
370299933Shselasky}
371299933Shselasky
372299933Shselaskystatic int iwch_dereg_mr(struct ib_mr *ib_mr)
373299933Shselasky{
374299933Shselasky	struct iwch_dev *rhp;
375299933Shselasky	struct iwch_mr *mhp;
376299933Shselasky	u32 mmid;
377299933Shselasky
378328653Shselasky	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
379328653Shselasky	/* There can be no memory windows */
380328653Shselasky	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
381328653Shselasky		return (-EINVAL);
382328653Shselasky
383328653Shselasky	mhp = to_iwch_mr(ib_mr);
384328653Shselasky	rhp = mhp->rhp;
385299933Shselasky	mmid = mhp->attr.stag >> 8;
386219820Sjeff	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
387219820Sjeff		       mhp->attr.pbl_addr);
388311801Shselasky	iwch_free_pbl(mhp);
389311801Shselasky	remove_handle(rhp, &rhp->mmidr, mmid);
390219820Sjeff	if (mhp->kva)
391328653Shselasky		cxfree((void *) (unsigned long) mhp->kva);
392311801Shselasky	if (mhp->umem)
393299674Shselasky		ib_umem_release(mhp->umem);
394219820Sjeff	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
395219820Sjeff	cxfree(mhp);
396219820Sjeff	return 0;
397311801Shselasky}
398299674Shselasky
399299674Shselaskystatic struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
400311801Shselasky					struct ib_phys_buf *buffer_list,
401311801Shselasky					int num_phys_buf,
402311801Shselasky					int acc,
403299674Shselasky					u64 *iova_start)
404299931Shselasky{
405219820Sjeff	__be64 *page_list;
406219820Sjeff	int shift;
407299931Shselasky	u64 total_size;
408299931Shselasky	int npages;
409219820Sjeff	struct iwch_dev *rhp;
410219820Sjeff	struct iwch_pd *php;
411219820Sjeff	struct iwch_mr *mhp;
412219820Sjeff	int ret;
413219820Sjeff
414311801Shselasky	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
415292987Shselasky	php = to_iwch_pd(pd);
416219820Sjeff	rhp = php->rhp;
417219820Sjeff
418219820Sjeff	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
419219820Sjeff	if (!mhp)
420219820Sjeff		return ERR_PTR(-ENOMEM);
421219820Sjeff
422219820Sjeff	mhp->rhp = rhp;
423219820Sjeff
424219820Sjeff	/* First check that we have enough alignment */
425219820Sjeff	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
426219820Sjeff		ret = -EINVAL;
427299933Shselasky		goto err;
428299933Shselasky	}
429311801Shselasky
430299960Shselasky	if (num_phys_buf > 1 &&
431219820Sjeff	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
432299960Shselasky		ret = -EINVAL;
433299960Shselasky		goto err;
434219820Sjeff	}
435219820Sjeff
436219820Sjeff	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
437299933Shselasky				   &total_size, &npages, &shift, &page_list);
438299933Shselasky	if (ret)
439299933Shselasky		goto err;
440299933Shselasky
441299933Shselasky	ret = iwch_alloc_pbl(mhp, npages);
442299933Shselasky	if (ret) {
443299933Shselasky		cxfree(page_list);
444299933Shselasky		goto err_pbl;
445311801Shselasky	}
446299960Shselasky
447299933Shselasky	ret = iwch_write_pbl(mhp, page_list, npages, 0);
448299960Shselasky	cxfree(page_list);
449299960Shselasky	if (ret)
450299933Shselasky		goto err;
451299933Shselasky
452219820Sjeff	mhp->attr.pdid = php->pdid;
453219820Sjeff	mhp->attr.zbva = 0;
454219820Sjeff
455219820Sjeff	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
456219820Sjeff	mhp->attr.va_fbo = *iova_start;
457219820Sjeff	mhp->attr.page_size = shift - 12;
458219820Sjeff
459219820Sjeff	mhp->attr.len = (u32) total_size;
460219820Sjeff	mhp->attr.pbl_size = npages;
461219820Sjeff	ret = iwch_register_mem(rhp, php, mhp, shift);
462219820Sjeff	if (ret)
463299933Shselasky		goto err_pbl;
464219820Sjeff
465219820Sjeff	return &mhp->ibmr;
466219820Sjeff
467219820Sjefferr_pbl:
468292987Shselasky	iwch_free_pbl(mhp);
469219820Sjeff
470219820Sjefferr:
471219820Sjeff	cxfree(mhp);
472219820Sjeff	return ERR_PTR(ret);
473219820Sjeff
474219820Sjeff}
475219820Sjeff
476219820Sjeffstatic int iwch_reregister_phys_mem(struct ib_mr *mr,
477219820Sjeff				     int mr_rereg_mask,
478219820Sjeff				     struct ib_pd *pd,
479219820Sjeff	                             struct ib_phys_buf *buffer_list,
480219820Sjeff	                             int num_phys_buf,
481219820Sjeff	                             int acc, u64 * iova_start)
482328653Shselasky{
483292987Shselasky
484219820Sjeff	struct iwch_mr mh, *mhp;
485219820Sjeff	struct iwch_pd *php;
486219820Sjeff	struct iwch_dev *rhp;
487219820Sjeff	__be64 *page_list = NULL;
488219820Sjeff	int shift = 0;
489219820Sjeff	u64 total_size;
490219820Sjeff	int npages;
491219820Sjeff	int ret;
492219820Sjeff
493219820Sjeff	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
494219820Sjeff
495219820Sjeff	/* There can be no memory windows */
496219820Sjeff	if (atomic_load_acq_int(&mr->usecnt.counter))
497219820Sjeff		return (-EINVAL);
498219820Sjeff
499219820Sjeff	mhp = to_iwch_mr(mr);
500219820Sjeff	rhp = mhp->rhp;
501219820Sjeff	php = to_iwch_pd(mr->pd);
502219820Sjeff
503219820Sjeff	/* make sure we are on the same adapter */
504219820Sjeff	if (rhp != php->rhp)
505219820Sjeff		return (-EINVAL);
506219820Sjeff
507219820Sjeff	memcpy(&mh, mhp, sizeof *mhp);
508219820Sjeff
509219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_PD)
510219820Sjeff		php = to_iwch_pd(pd);
511219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
512219820Sjeff		mh.attr.perms = iwch_ib_to_tpt_access(acc);
513219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
514219820Sjeff		ret = build_phys_page_list(buffer_list, num_phys_buf,
515219820Sjeff					   iova_start,
516219820Sjeff					   &total_size, &npages,
517219820Sjeff					   &shift, &page_list);
518219820Sjeff		if (ret)
519219820Sjeff			return ret;
520219820Sjeff	}
521219820Sjeff
522219820Sjeff	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
523219820Sjeff	cxfree(page_list);
524219820Sjeff	if (ret) {
525219820Sjeff		return ret;
526219820Sjeff	}
527219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_PD)
528219820Sjeff		mhp->attr.pdid = php->pdid;
529219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
530219820Sjeff		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
531219820Sjeff	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
532219820Sjeff		mhp->attr.zbva = 0;
533219820Sjeff		mhp->attr.va_fbo = *iova_start;
534219820Sjeff		mhp->attr.page_size = shift - 12;
535219820Sjeff		mhp->attr.len = (u32) total_size;
536292987Shselasky		mhp->attr.pbl_size = npages;
537292987Shselasky	}
538255932Salfred
539255932Salfred	return 0;
540255932Salfred}
541255932Salfred
542285088Shselasky
543278865Shselaskystatic struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
544270710Shselasky				      u64 virt, int acc, struct ib_udata *udata)
545219820Sjeff{
546	__be64 *pages;
547	int shift, i, n;
548	int err = 0;
549	struct ib_umem_chunk *chunk;
550	struct iwch_dev *rhp;
551	struct iwch_pd *php;
552	struct iwch_mr *mhp;
553	struct iwch_reg_user_mr_resp uresp;
554#ifdef notyet
555	int j, k, len;
556#endif
557
558	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
559
560	php = to_iwch_pd(pd);
561	rhp = php->rhp;
562	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
563	if (!mhp)
564		return ERR_PTR(-ENOMEM);
565
566	mhp->rhp = rhp;
567
568	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569	if (IS_ERR(mhp->umem)) {
570		err = PTR_ERR(mhp->umem);
571		cxfree(mhp);
572		return ERR_PTR(-err);
573	}
574
575	shift = ffs(mhp->umem->page_size) - 1;
576
577	n = 0;
578	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
579		n += chunk->nents;
580
581	err = iwch_alloc_pbl(mhp, n);
582	if (err)
583		goto err;
584
585	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
586	if (!pages) {
587		err = -ENOMEM;
588		goto err_pbl;
589	}
590
591	i = n = 0;
592
593#ifdef notyet
594	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
595		for (j = 0; j < chunk->nmap; ++j) {
596			len = sg_dma_len(&chunk->page_list[j]) >> shift;
597			for (k = 0; k < len; ++k) {
598				pages[i++] = htobe64(sg_dma_address(
599					&chunk->page_list[j]) +
600					mhp->umem->page_size * k);
601				if (i == PAGE_SIZE / sizeof *pages) {
602					err = iwch_write_pbl(mhp, pages, i, n);
603					if (err)
604						goto pbl_done;
605					n += i;
606					i = 0;
607				}
608			}
609		}
610#endif
611
612	if (i)
613		err = iwch_write_pbl(mhp, pages, i, n);
614#ifdef notyet
615pbl_done:
616#endif
617	cxfree(pages);
618	if (err)
619		goto err_pbl;
620
621	mhp->attr.pdid = php->pdid;
622	mhp->attr.zbva = 0;
623	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
624	mhp->attr.va_fbo = virt;
625	mhp->attr.page_size = shift - 12;
626	mhp->attr.len = (u32) length;
627
628	err = iwch_register_mem(rhp, php, mhp, shift);
629	if (err)
630		goto err_pbl;
631
632	if (udata && !t3a_device(rhp)) {
633		uresp.pbl_addr = (mhp->attr.pbl_addr -
634	                         rhp->rdev.rnic_info.pbl_base) >> 3;
635		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
636		     uresp.pbl_addr);
637
638		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
639			iwch_dereg_mr(&mhp->ibmr);
640			err = EFAULT;
641			goto err;
642		}
643	}
644
645	return &mhp->ibmr;
646
647err_pbl:
648	iwch_free_pbl(mhp);
649
650err:
651	ib_umem_release(mhp->umem);
652	cxfree(mhp);
653	return ERR_PTR(-err);
654}
655
656static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
657{
658	struct ib_phys_buf bl;
659	u64 kva;
660	struct ib_mr *ibmr;
661
662	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
663
664	/*
665	 * T3 only supports 32 bits of size.
666	 */
667	bl.size = 0xffffffff;
668	bl.addr = 0;
669	kva = 0;
670	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
671	return ibmr;
672}
673
674static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
675{
676	struct iwch_dev *rhp;
677	struct iwch_pd *php;
678	struct iwch_mw *mhp;
679	u32 mmid;
680	u32 stag = 0;
681	int ret;
682
683	php = to_iwch_pd(pd);
684	rhp = php->rhp;
685	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
686	if (!mhp)
687		return ERR_PTR(-ENOMEM);
688	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
689	if (ret) {
690		cxfree(mhp);
691		return ERR_PTR(-ret);
692	}
693	mhp->rhp = rhp;
694	mhp->attr.pdid = php->pdid;
695	mhp->attr.type = TPT_MW;
696	mhp->attr.stag = stag;
697	mmid = (stag) >> 8;
698	mhp->ibmw.rkey = stag;
699	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
700		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
701		cxfree(mhp);
702		return ERR_PTR(-ENOMEM);
703	}
704	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
705	return &(mhp->ibmw);
706}
707
708static int iwch_dealloc_mw(struct ib_mw *mw)
709{
710	struct iwch_dev *rhp;
711	struct iwch_mw *mhp;
712	u32 mmid;
713
714	mhp = to_iwch_mw(mw);
715	rhp = mhp->rhp;
716	mmid = (mw->rkey) >> 8;
717	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
718	remove_handle(rhp, &rhp->mmidr, mmid);
719	cxfree(mhp);
720	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
721	return 0;
722}
723
724static int iwch_destroy_qp(struct ib_qp *ib_qp)
725{
726	struct iwch_dev *rhp;
727	struct iwch_qp *qhp;
728	struct iwch_qp_attributes attrs;
729	struct iwch_ucontext *ucontext;
730
731	qhp = to_iwch_qp(ib_qp);
732	rhp = qhp->rhp;
733
734	attrs.next_state = IWCH_QP_STATE_ERROR;
735	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
736	mtx_lock(&qhp->lock);
737	if (qhp->ep)
738		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
739	mtx_unlock(&qhp->lock);
740
741	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
742
743	mtx_lock(&qhp->lock);
744	if (--qhp->refcnt)
745		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
746	mtx_unlock(&qhp->lock);
747
748	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
749				  : NULL;
750	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
751			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
752
753	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
754	     ib_qp, qhp->wq.qpid, qhp);
755	cxfree(qhp);
756	return 0;
757}
758
759static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
760			     struct ib_qp_init_attr *attrs,
761			     struct ib_udata *udata)
762{
763	struct iwch_dev *rhp;
764	struct iwch_qp *qhp;
765	struct iwch_pd *php;
766	struct iwch_cq *schp;
767	struct iwch_cq *rchp;
768	struct iwch_create_qp_resp uresp;
769	int wqsize, sqsize, rqsize;
770	struct iwch_ucontext *ucontext;
771
772	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
773	if (attrs->qp_type != IB_QPT_RC)
774		return ERR_PTR(-EINVAL);
775	php = to_iwch_pd(pd);
776	rhp = php->rhp;
777	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
778	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
779	if (!schp || !rchp)
780		return ERR_PTR(-EINVAL);
781
782	/* The RQT size must be # of entries + 1 rounded up to a power of two */
783	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
784	if (rqsize == attrs->cap.max_recv_wr)
785		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
786
787	/* T3 doesn't support RQT depth < 16 */
788	if (rqsize < 16)
789		rqsize = 16;
790
791	if (rqsize > T3_MAX_RQ_SIZE)
792		return ERR_PTR(-EINVAL);
793
794	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
795		return ERR_PTR(-EINVAL);
796
797	/*
798	 * NOTE: The SQ and total WQ sizes don't need to be
799	 * a power of two.  However, all the code assumes
800	 * they are. EG: Q_FREECNT() and friends.
801	 */
802	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
803	wqsize = roundup_pow_of_two(rqsize + sqsize);
804	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
805	     wqsize, sqsize, rqsize);
806	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
807	if (!qhp)
808		return ERR_PTR(-ENOMEM);
809	qhp->wq.size_log2 = ilog2(wqsize);
810	qhp->wq.rq_size_log2 = ilog2(rqsize);
811	qhp->wq.sq_size_log2 = ilog2(sqsize);
812	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
813	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
814			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
815		cxfree(qhp);
816		return ERR_PTR(-ENOMEM);
817	}
818
819	attrs->cap.max_recv_wr = rqsize - 1;
820	attrs->cap.max_send_wr = sqsize;
821	attrs->cap.max_inline_data = T3_MAX_INLINE;
822
823	qhp->rhp = rhp;
824	qhp->attr.pd = php->pdid;
825	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
826	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
827	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
828	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
829	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
830	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
831	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
832	qhp->attr.state = IWCH_QP_STATE_IDLE;
833	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
834
835	/*
836	 * XXX - These don't get passed in from the openib user
837	 * at create time.  The CM sets them via a QP modify.
838	 * Need to fix...  I think the CM should
839	 */
840	qhp->attr.enable_rdma_read = 1;
841	qhp->attr.enable_rdma_write = 1;
842	qhp->attr.enable_bind = 1;
843	qhp->attr.max_ord = 1;
844	qhp->attr.max_ird = 1;
845
846	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
847	qhp->refcnt = 1;
848
849	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
850		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
851			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
852		cxfree(qhp);
853		return ERR_PTR(-ENOMEM);
854	}
855
856	if (udata) {
857
858		struct iwch_mm_entry *mm1, *mm2;
859
860		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
861		if (!mm1) {
862			iwch_destroy_qp(&qhp->ibqp);
863			return ERR_PTR(-ENOMEM);
864		}
865
866		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
867		if (!mm2) {
868			cxfree(mm1);
869			iwch_destroy_qp(&qhp->ibqp);
870			return ERR_PTR(-ENOMEM);
871		}
872
873		uresp.qpid = qhp->wq.qpid;
874		uresp.size_log2 = qhp->wq.size_log2;
875		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
876		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
877		mtx_lock(&ucontext->mmap_lock);
878		uresp.key = ucontext->key;
879		ucontext->key += PAGE_SIZE;
880		uresp.db_key = ucontext->key;
881		ucontext->key += PAGE_SIZE;
882		mtx_unlock(&ucontext->mmap_lock);
883		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
884			cxfree(mm1);
885			cxfree(mm2);
886			iwch_destroy_qp(&qhp->ibqp);
887			return ERR_PTR(-EFAULT);
888		}
889		mm1->key = uresp.key;
890		mm1->addr = vtophys(qhp->wq.queue);
891		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
892		insert_mmap(ucontext, mm1);
893		mm2->key = uresp.db_key;
894		mm2->addr = qhp->wq.udb & PAGE_MASK;
895		mm2->len = PAGE_SIZE;
896		insert_mmap(ucontext, mm2);
897	}
898	qhp->ibqp.qp_num = qhp->wq.qpid;
899	callout_init(&(qhp->timer), TRUE);
900	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
901	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
902	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
903	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
904	     1 << qhp->wq.size_log2);
905	return &qhp->ibqp;
906}
907
908static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
909		      int attr_mask, struct ib_udata *udata)
910{
911	struct iwch_dev *rhp;
912	struct iwch_qp *qhp;
913	enum iwch_qp_attr_mask mask = 0;
914	struct iwch_qp_attributes attrs;
915
916	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
917
918	/* iwarp does not support the RTR state */
919	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
920		attr_mask &= ~IB_QP_STATE;
921
922	/* Make sure we still have something left to do */
923	if (!attr_mask)
924		return 0;
925
926	memset(&attrs, 0, sizeof attrs);
927	qhp = to_iwch_qp(ibqp);
928	rhp = qhp->rhp;
929
930	attrs.next_state = iwch_convert_state(attr->qp_state);
931	attrs.enable_rdma_read = (attr->qp_access_flags &
932			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
933	attrs.enable_rdma_write = (attr->qp_access_flags &
934				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
935	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
936
937
938	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
939	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
940			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
941			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
942			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
943
944	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
945}
946
947void iwch_qp_add_ref(struct ib_qp *qp)
948{
949	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
950	mtx_lock(&to_iwch_qp(qp)->lock);
951	to_iwch_qp(qp)->refcnt++;
952	mtx_unlock(&to_iwch_qp(qp)->lock);
953}
954
955void iwch_qp_rem_ref(struct ib_qp *qp)
956{
957	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
958	mtx_lock(&to_iwch_qp(qp)->lock);
959	if (--to_iwch_qp(qp)->refcnt == 0)
960	        wakeup(to_iwch_qp(qp));
961	mtx_unlock(&to_iwch_qp(qp)->lock);
962}
963
964static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
965{
966	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
967	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
968}
969
970
971static int iwch_query_pkey(struct ib_device *ibdev,
972			   u8 port, u16 index, u16 * pkey)
973{
974	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
975	*pkey = 0;
976	return 0;
977}
978
979static int iwch_query_gid(struct ib_device *ibdev, u8 port,
980			  int index, union ib_gid *gid)
981{
982	struct iwch_dev *dev;
983	struct port_info *pi;
984	struct adapter *sc;
985
986	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
987	       __FUNCTION__, ibdev, port, index, gid);
988	dev = to_iwch_dev(ibdev);
989	sc = dev->rdev.adap;
990	PANIC_IF(port == 0 || port > 2);
991	pi = &sc->port[port - 1];
992	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
993	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
994	return 0;
995}
996
997static int iwch_query_device(struct ib_device *ibdev,
998			     struct ib_device_attr *props)
999{
1000	struct iwch_dev *dev;
1001	struct adapter *sc;
1002
1003	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1004
1005	dev = to_iwch_dev(ibdev);
1006	sc = dev->rdev.adap;
1007	memset(props, 0, sizeof *props);
1008	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1009	props->device_cap_flags = dev->device_cap_flags;
1010	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1011	props->vendor_id = pci_get_vendor(sc->dev);
1012	props->vendor_part_id = pci_get_device(sc->dev);
1013	props->max_mr_size = dev->attr.max_mr_size;
1014	props->max_qp = dev->attr.max_qps;
1015	props->max_qp_wr = dev->attr.max_wrs;
1016	props->max_sge = dev->attr.max_sge_per_wr;
1017	props->max_sge_rd = 1;
1018	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1019	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1020	props->max_cq = dev->attr.max_cqs;
1021	props->max_cqe = dev->attr.max_cqes_per_cq;
1022	props->max_mr = dev->attr.max_mem_regs;
1023	props->max_pd = dev->attr.max_pds;
1024	props->local_ca_ack_delay = 0;
1025
1026	return 0;
1027}
1028
1029static int iwch_query_port(struct ib_device *ibdev,
1030			   u8 port, struct ib_port_attr *props)
1031{
1032	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1033	memset(props, 0, sizeof(struct ib_port_attr));
1034	props->max_mtu = IB_MTU_4096;
1035	props->active_mtu = IB_MTU_2048;
1036	props->state = IB_PORT_ACTIVE;
1037	props->port_cap_flags =
1038	    IB_PORT_CM_SUP |
1039	    IB_PORT_SNMP_TUNNEL_SUP |
1040	    IB_PORT_REINIT_SUP |
1041	    IB_PORT_DEVICE_MGMT_SUP |
1042	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1043	props->gid_tbl_len = 1;
1044	props->pkey_tbl_len = 1;
1045	props->active_width = 2;
1046	props->active_speed = 2;
1047	props->max_msg_sz = -1;
1048
1049	return 0;
1050}
1051
1052int iwch_register_device(struct iwch_dev *dev)
1053{
1054	int ret;
1055	struct adapter *sc = dev->rdev.adap;
1056
1057	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1058	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1059	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1060	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1061	dev->device_cap_flags =
1062		(IB_DEVICE_LOCAL_DMA_LKEY |
1063		 IB_DEVICE_MEM_WINDOW);
1064
1065	dev->ibdev.uverbs_cmd_mask =
1066	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1067	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1068	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1069	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1070	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1071	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1072	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1073	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1074	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1075	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1076	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1077	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1078	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1079	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1080	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1081	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1082	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1083	dev->ibdev.node_type = RDMA_NODE_RNIC;
1084	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1085	dev->ibdev.phys_port_cnt = sc->params.nports;
1086	dev->ibdev.num_comp_vectors = 1;
1087	dev->ibdev.dma_device = dev->rdev.adap->dev;
1088	dev->ibdev.query_device = iwch_query_device;
1089	dev->ibdev.query_port = iwch_query_port;
1090	dev->ibdev.modify_port = iwch_modify_port;
1091	dev->ibdev.query_pkey = iwch_query_pkey;
1092	dev->ibdev.query_gid = iwch_query_gid;
1093	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1094	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1095	dev->ibdev.mmap = iwch_mmap;
1096	dev->ibdev.alloc_pd = iwch_allocate_pd;
1097	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1098	dev->ibdev.create_ah = iwch_ah_create;
1099	dev->ibdev.destroy_ah = iwch_ah_destroy;
1100	dev->ibdev.create_qp = iwch_create_qp;
1101	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1102	dev->ibdev.destroy_qp = iwch_destroy_qp;
1103	dev->ibdev.create_cq = iwch_create_cq;
1104	dev->ibdev.destroy_cq = iwch_destroy_cq;
1105	dev->ibdev.resize_cq = iwch_resize_cq;
1106	dev->ibdev.poll_cq = iwch_poll_cq;
1107	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1108	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1109	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1110	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1111	dev->ibdev.dereg_mr = iwch_dereg_mr;
1112	dev->ibdev.alloc_mw = iwch_alloc_mw;
1113	dev->ibdev.bind_mw = iwch_bind_mw;
1114	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1115
1116	dev->ibdev.attach_mcast = iwch_multicast_attach;
1117	dev->ibdev.detach_mcast = iwch_multicast_detach;
1118	dev->ibdev.process_mad = iwch_process_mad;
1119
1120	dev->ibdev.req_notify_cq = iwch_arm_cq;
1121	dev->ibdev.post_send = iwch_post_send;
1122	dev->ibdev.post_recv = iwch_post_receive;
1123	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1124
1125	dev->ibdev.iwcm =
1126	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1127	if (!dev->ibdev.iwcm)
1128		return (ENOMEM);
1129
1130	dev->ibdev.iwcm->connect = iwch_connect;
1131	dev->ibdev.iwcm->accept = iwch_accept_cr;
1132	dev->ibdev.iwcm->reject = iwch_reject_cr;
1133	dev->ibdev.iwcm->create_listen = iwch_create_listen;
1134	dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1135	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1136	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1137	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1138
1139	ret = ib_register_device(&dev->ibdev);
1140	if (ret)
1141		goto bail1;
1142
1143	return (0);
1144
1145bail1:
1146	cxfree(dev->ibdev.iwcm);
1147	return (ret);
1148}
1149
1150void iwch_unregister_device(struct iwch_dev *dev)
1151{
1152
1153	ib_unregister_device(&dev->ibdev);
1154	cxfree(dev->ibdev.iwcm);
1155	return;
1156}
1157#endif
1158