iw_cxgb_provider.c revision 278886
11558Srgrimes/**************************************************************************
21558Srgrimes
31558SrgrimesCopyright (c) 2007, Chelsio Inc.
41558SrgrimesAll rights reserved.
51558Srgrimes
61558SrgrimesRedistribution and use in source and binary forms, with or without
71558Srgrimesmodification, are permitted provided that the following conditions are met:
81558Srgrimes
91558Srgrimes 1. Redistributions of source code must retain the above copyright notice,
101558Srgrimes    this list of conditions and the following disclaimer.
111558Srgrimes
121558Srgrimes 2. Neither the name of the Chelsio Corporation nor the names of its
131558Srgrimes    contributors may be used to endorse or promote products derived from
141558Srgrimes    this software without specific prior written permission.
151558Srgrimes
161558SrgrimesTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
171558SrgrimesAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
181558SrgrimesIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
191558SrgrimesARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
201558SrgrimesLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
211558SrgrimesCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
221558SrgrimesSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
231558SrgrimesINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
241558SrgrimesCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
251558SrgrimesARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
261558SrgrimesPOSSIBILITY OF SUCH DAMAGE.
271558Srgrimes
281558Srgrimes***************************************************************************/
291558Srgrimes#include <sys/cdefs.h>
301558Srgrimes__FBSDID("$FreeBSD: head/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c 278886 2015-02-17 08:40:27Z hselasky $");
311558Srgrimes
321558Srgrimes#include "opt_inet.h"
331558Srgrimes
341558Srgrimes#ifdef TCP_OFFLOAD
3541477Sjulian#include <sys/param.h>
3623675Speter#include <sys/systm.h>
3741477Sjulian#include <sys/kernel.h>
3841477Sjulian#include <sys/bus.h>
3950476Speter#include <sys/pciio.h>
401558Srgrimes#include <sys/conf.h>
411558Srgrimes#include <machine/bus.h>
421558Srgrimes#include <machine/resource.h>
4374556Smckusick#include <sys/bus_dma.h>
4423675Speter#include <sys/rman.h>
451558Srgrimes#include <sys/ioccom.h>
461558Srgrimes#include <sys/mbuf.h>
4723799Sbde#include <sys/mutex.h>
4823675Speter#include <sys/rwlock.h>
491558Srgrimes#include <sys/linker.h>
5023675Speter#include <sys/firmware.h>
511558Srgrimes#include <sys/socket.h>
521558Srgrimes#include <sys/sockio.h>
5392839Simp#include <sys/smp.h>
5474556Smckusick#include <sys/sysctl.h>
557585Sbde#include <sys/syslog.h>
5692839Simp#include <sys/queue.h>
571558Srgrimes#include <sys/taskqueue.h>
5841474Sjulian#include <sys/proc.h>
5974556Smckusick#include <sys/queue.h>
6023675Speter
6123675Speter#include <netinet/in.h>
6274556Smckusick
6374556Smckusick
641558Srgrimes#include <vm/vm.h>
651558Srgrimes#include <vm/pmap.h>
661558Srgrimes
671558Srgrimes#include <rdma/ib_verbs.h>
6892806Sobrien#include <rdma/ib_umem.h>
691558Srgrimes#include <rdma/ib_user_verbs.h>
701558Srgrimes#include <linux/idr.h>
7141474Sjulian#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
7223675Speter
731558Srgrimes
7423675Speter#include <cxgb_include.h>
751558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_wr.h>
761558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_hal.h>
771558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_provider.h>
781558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_cm.h>
791558Srgrimes#include <ulp/iw_cxgb/iw_cxgb.h>
801558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_resource.h>
811558Srgrimes#include <ulp/iw_cxgb/iw_cxgb_user.h>
821558Srgrimes
831558Srgrimesstatic int
841558Srgrimesiwch_modify_port(struct ib_device *ibdev,
851558Srgrimes			    u8 port, int port_modify_mask,
861558Srgrimes			    struct ib_port_modify *props)
871558Srgrimes{
881558Srgrimes	return (-ENOSYS);
891558Srgrimes}
901558Srgrimes
911558Srgrimesstatic struct ib_ah *
921558Srgrimesiwch_ah_create(struct ib_pd *pd,
931558Srgrimes				    struct ib_ah_attr *ah_attr)
941558Srgrimes{
951558Srgrimes	return ERR_PTR(-ENOSYS);
961558Srgrimes}
9774556Smckusick
981558Srgrimesstatic int
991558Srgrimesiwch_ah_destroy(struct ib_ah *ah)
1001558Srgrimes{
1011558Srgrimes	return (-ENOSYS);
1021558Srgrimes}
1031558Srgrimes
1041558Srgrimesstatic int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1051558Srgrimes{
1061558Srgrimes	return (-ENOSYS);
1071558Srgrimes}
1081558Srgrimes
1091558Srgrimesstatic int
1101558Srgrimesiwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1111558Srgrimes{
1121558Srgrimes	return (-ENOSYS);
1131558Srgrimes}
1141558Srgrimes
1151558Srgrimesstatic int
11623675Speteriwch_process_mad(struct ib_device *ibdev,
11723675Speter			    int mad_flags,
11823675Speter			    u8 port_num,
1191558Srgrimes			    struct ib_wc *in_wc,
1201558Srgrimes			    struct ib_grh *in_grh,
12134266Sjulian			    struct ib_mad *in_mad, struct ib_mad *out_mad)
12234266Sjulian{
1231558Srgrimes	return (-ENOSYS);
1241558Srgrimes}
1251558Srgrimes
1261558Srgrimesstatic int
1271558Srgrimesiwch_dealloc_ucontext(struct ib_ucontext *context)
1281558Srgrimes{
1291558Srgrimes	struct iwch_dev *rhp = to_iwch_dev(context->device);
13023675Speter	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
1311558Srgrimes	struct iwch_mm_entry *mm, *tmp;
13238328Sdfr
1338871Srgrimes	CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
13438328Sdfr	TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
1351558Srgrimes		TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
1361558Srgrimes		cxfree(mm);
13734266Sjulian	}
13834266Sjulian	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
13934266Sjulian	cxfree(ucontext);
14034266Sjulian	return 0;
14134266Sjulian}
14234266Sjulian
14338328Sdfrstatic struct ib_ucontext *
1441558Srgrimesiwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
14538328Sdfr{
1461558Srgrimes	struct iwch_ucontext *context;
14738328Sdfr	struct iwch_dev *rhp = to_iwch_dev(ibdev);
1481558Srgrimes
1491558Srgrimes	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1501558Srgrimes	context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
1511558Srgrimes	if (!context)
15223675Speter		return ERR_PTR(-ENOMEM);
15323675Speter	cxio_init_ucontext(&rhp->rdev, &context->uctx);
1541558Srgrimes	TAILQ_INIT(&context->mmaps);
1551558Srgrimes	mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
1561558Srgrimes	return &context->ibucontext;
1571558Srgrimes}
1581558Srgrimes
15934266Sjulianstatic int
16023675Speteriwch_destroy_cq(struct ib_cq *ib_cq)
1611558Srgrimes{
1621558Srgrimes	struct iwch_cq *chp;
16323675Speter
1641558Srgrimes	CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
1651558Srgrimes	chp = to_iwch_cq(ib_cq);
1661558Srgrimes
1671558Srgrimes	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
1681558Srgrimes	mtx_lock(&chp->lock);
16923675Speter	if (--chp->refcnt)
1701558Srgrimes		msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
1711558Srgrimes	mtx_unlock(&chp->lock);
1721558Srgrimes
1731558Srgrimes	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
17470050Siedowse	cxfree(chp);
17570050Siedowse	return 0;
17670050Siedowse}
17770050Siedowse
17870050Siedowsestatic struct ib_cq *
17970050Siedowseiwch_create_cq(struct ib_device *ibdev, struct ib_cq_init_attr *attr,
1801558Srgrimes			     struct ib_ucontext *ib_context,
1811558Srgrimes			     struct ib_udata *udata)
1821558Srgrimes{
1831558Srgrimes	struct iwch_dev *rhp;
1841558Srgrimes	struct iwch_cq *chp;
1851558Srgrimes	struct iwch_create_cq_resp uresp;
1861558Srgrimes	struct iwch_create_cq_req ureq;
1871558Srgrimes	struct iwch_ucontext *ucontext = NULL;
1881558Srgrimes	static int warned;
1891558Srgrimes	size_t resplen;
1901558Srgrimes	int entries = attr->cqe;
1911558Srgrimes
1921558Srgrimes	CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
1931558Srgrimes	rhp = to_iwch_dev(ibdev);
1941558Srgrimes	chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
1951558Srgrimes	if (!chp) {
1961558Srgrimes		return ERR_PTR(-ENOMEM);
1971558Srgrimes	}
1981558Srgrimes	if (ib_context) {
1991558Srgrimes		ucontext = to_iwch_ucontext(ib_context);
2001558Srgrimes		if (!t3a_device(rhp)) {
2011558Srgrimes			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
2021558Srgrimes				cxfree(chp);
2031558Srgrimes				return ERR_PTR(-EFAULT);
2041558Srgrimes			}
2051558Srgrimes			chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
2061558Srgrimes		}
2071558Srgrimes	}
20875047Smckusick
2091558Srgrimes	if (t3a_device(rhp)) {
2101558Srgrimes
2111558Srgrimes		/*
21223675Speter		 * T3A: Add some fluff to handle extra CQEs inserted
21323675Speter		 * for various errors.
2141558Srgrimes		 * Additional CQE possibilities:
2151558Srgrimes		 *      TERMINATE,
2161558Srgrimes		 *      incoming RDMA WRITE Failures
2171558Srgrimes		 *      incoming RDMA READ REQUEST FAILUREs
21841474Sjulian		 * NOTE: We cannot ensure the CQ won't overflow.
21941474Sjulian		 */
2201558Srgrimes		entries += 16;
2211558Srgrimes	}
2221558Srgrimes	entries = roundup_pow_of_two(entries);
2231558Srgrimes	chp->cq.size_log2 = ilog2(entries);
2241558Srgrimes
2251558Srgrimes	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
2261558Srgrimes		cxfree(chp);
2271558Srgrimes		return ERR_PTR(-ENOMEM);
2281558Srgrimes	}
2291558Srgrimes	chp->rhp = rhp;
2301558Srgrimes	chp->ibcq.cqe = 1 << chp->cq.size_log2;
2311558Srgrimes	mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
2321558Srgrimes	chp->refcnt = 1;
2331558Srgrimes	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
2341558Srgrimes		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
2351558Srgrimes		cxfree(chp);
2361558Srgrimes		return ERR_PTR(-ENOMEM);
23774556Smckusick	}
2381558Srgrimes
23986514Siedowse	if (ucontext) {
24041474Sjulian		struct iwch_mm_entry *mm;
2411558Srgrimes
2421558Srgrimes		mm = kmalloc(sizeof *mm, M_NOWAIT);
2431558Srgrimes		if (!mm) {
24474556Smckusick			iwch_destroy_cq(&chp->ibcq);
2451558Srgrimes			return ERR_PTR(-ENOMEM);
2461558Srgrimes		}
2471558Srgrimes		uresp.cqid = chp->cq.cqid;
2481558Srgrimes		uresp.size_log2 = chp->cq.size_log2;
2491558Srgrimes		mtx_lock(&ucontext->mmap_lock);
2501558Srgrimes		uresp.key = ucontext->key;
2511558Srgrimes		ucontext->key += PAGE_SIZE;
2521558Srgrimes		mtx_unlock(&ucontext->mmap_lock);
2531558Srgrimes		mm->key = uresp.key;
2541558Srgrimes		mm->addr = vtophys(chp->cq.queue);
2551558Srgrimes               	if (udata->outlen < sizeof uresp) {
2561558Srgrimes                	if (!warned++)
2571558Srgrimes                        	CTR1(KTR_IW_CXGB, "%s Warning - "
2581558Srgrimes                                	"downlevel libcxgb3 (non-fatal).\n",
2591558Srgrimes					__func__);
2601558Srgrimes                       	mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
2611558Srgrimes                       				sizeof(struct t3_cqe));
2621558Srgrimes                       	resplen = sizeof(struct iwch_create_cq_resp_v0);
2631558Srgrimes               	} else {
2641558Srgrimes                	mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
2651558Srgrimes                        			sizeof(struct t3_cqe));
2661558Srgrimes                       	uresp.memsize = mm->len;
2671558Srgrimes                      	resplen = sizeof uresp;
2681558Srgrimes               	}
2691558Srgrimes              	if (ib_copy_to_udata(udata, &uresp, resplen)) {
2701558Srgrimes			cxfree(mm);
2711558Srgrimes			iwch_destroy_cq(&chp->ibcq);
2721558Srgrimes			return ERR_PTR(-EFAULT);
27323675Speter		}
2741558Srgrimes		insert_mmap(ucontext, mm);
2751558Srgrimes	}
2761558Srgrimes	CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
2771558Srgrimes	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
2781558Srgrimes	     (unsigned long long) chp->cq.dma_addr);
2791558Srgrimes	return &chp->ibcq;
2801558Srgrimes}
2811558Srgrimes
2821558Srgrimesstatic int
2831558Srgrimesiwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
2841558Srgrimes    struct ib_udata *udata __unused)
2851558Srgrimes{
2861558Srgrimes
2871558Srgrimes	return (-ENOSYS);
2881558Srgrimes}
2891558Srgrimes
2901558Srgrimesstatic int
2911558Srgrimesiwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
2921558Srgrimes{
2931558Srgrimes	struct iwch_dev *rhp;
2941558Srgrimes	struct iwch_cq *chp;
2951558Srgrimes	enum t3_cq_opcode cq_op;
2961558Srgrimes	int err;
2971558Srgrimes	u32 rptr;
2981558Srgrimes
2991558Srgrimes	chp = to_iwch_cq(ibcq);
3001558Srgrimes	rhp = chp->rhp;
3011558Srgrimes	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
3021558Srgrimes		cq_op = CQ_ARM_SE;
3031558Srgrimes	else
3041558Srgrimes		cq_op = CQ_ARM_AN;
3051558Srgrimes	if (chp->user_rptr_addr) {
30674556Smckusick		if (copyin(&rptr, chp->user_rptr_addr, 4))
30774556Smckusick			return (-EFAULT);
3081558Srgrimes		mtx_lock(&chp->lock);
30923675Speter		chp->cq.rptr = rptr;
3101558Srgrimes	} else
3111558Srgrimes		mtx_lock(&chp->lock);
3121558Srgrimes	CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
31323675Speter	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
3141558Srgrimes	mtx_unlock(&chp->lock);
3151558Srgrimes	if (err < 0)
3161558Srgrimes		log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
31774556Smckusick		       chp->cq.cqid);
31874556Smckusick	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
31923675Speter		err = 0;
32023675Speter	return err;
3211558Srgrimes}
32223675Speter
32323675Speterstatic int
32423675Speteriwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
3251558Srgrimes{
3261558Srgrimes
32774556Smckusick	return (-ENOSYS);
32874556Smckusick}
32974556Smckusick
33074556Smckusickstatic int iwch_deallocate_pd(struct ib_pd *pd)
33174556Smckusick{
33262668Smckusick	struct iwch_dev *rhp;
33374556Smckusick	struct iwch_pd *php;
33474556Smckusick
33574556Smckusick	php = to_iwch_pd(pd);
33674556Smckusick	rhp = php->rhp;
33774556Smckusick	CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
33874556Smckusick	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
33974556Smckusick	cxfree(php);
34074556Smckusick	return 0;
34174556Smckusick}
34274556Smckusick
34362668Smckusickstatic struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
34474556Smckusick			       struct ib_ucontext *context,
34574556Smckusick			       struct ib_udata *udata)
34634266Sjulian{
34734266Sjulian	struct iwch_pd *php;
34834266Sjulian	u32 pdid;
34934266Sjulian	struct iwch_dev *rhp;
35034266Sjulian
3511558Srgrimes	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
3521558Srgrimes	rhp = (struct iwch_dev *) ibdev;
3531558Srgrimes	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
35474556Smckusick	if (!pdid)
35574556Smckusick		return ERR_PTR(-EINVAL);
3561558Srgrimes	php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
35723675Speter	if (!php) {
3581558Srgrimes		cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
35941474Sjulian		return ERR_PTR(-ENOMEM);
3601558Srgrimes	}
3611558Srgrimes	php->pdid = pdid;
3621558Srgrimes	php->rhp = rhp;
36374556Smckusick	if (context) {
36474556Smckusick		if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
36592839Simp			iwch_deallocate_pd(&php->ibpd);
36692839Simp			return ERR_PTR(-EFAULT);
36792839Simp		}
36892839Simp	}
36992839Simp	CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
37092839Simp	return &php->ibpd;
37192839Simp}
37292839Simp
37392839Simpstatic int iwch_dereg_mr(struct ib_mr *ib_mr)
37474556Smckusick{
37574556Smckusick	struct iwch_dev *rhp;
37674556Smckusick	struct iwch_mr *mhp;
37774556Smckusick	u32 mmid;
37874556Smckusick
37992839Simp	CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
38074556Smckusick	/* There can be no memory windows */
38175557Smckusick	if (atomic_load_acq_int(&ib_mr->usecnt.counter))
38275557Smckusick		return (-EINVAL);
38375557Smckusick
38475557Smckusick	mhp = to_iwch_mr(ib_mr);
38574556Smckusick	rhp = mhp->rhp;
38674556Smckusick	mmid = mhp->attr.stag >> 8;
38774556Smckusick	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
38874556Smckusick		       mhp->attr.pbl_addr);
38974556Smckusick	iwch_free_pbl(mhp);
39074556Smckusick	remove_handle(rhp, &rhp->mmidr, mmid);
39174556Smckusick	if (mhp->kva)
39274556Smckusick		cxfree((void *) (unsigned long) mhp->kva);
39374556Smckusick	if (mhp->umem)
39474556Smckusick		ib_umem_release(mhp->umem);
39574556Smckusick	CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
39674556Smckusick	cxfree(mhp);
39774556Smckusick	return 0;
39874556Smckusick}
39974556Smckusick
40074556Smckusickstatic struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
40174556Smckusick					struct ib_phys_buf *buffer_list,
40274556Smckusick					int num_phys_buf,
40374556Smckusick					int acc,
40474556Smckusick					u64 *iova_start)
40575557Smckusick{
40674556Smckusick	__be64 *page_list;
40774556Smckusick	int shift;
40875557Smckusick	u64 total_size;
40974556Smckusick	int npages;
41074556Smckusick	struct iwch_dev *rhp;
41174556Smckusick	struct iwch_pd *php;
41274556Smckusick	struct iwch_mr *mhp;
41374556Smckusick	int ret;
41474556Smckusick
41574556Smckusick	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
41674556Smckusick	php = to_iwch_pd(pd);
41774556Smckusick	rhp = php->rhp;
41874556Smckusick
41974556Smckusick	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
42074556Smckusick	if (!mhp)
42174556Smckusick		return ERR_PTR(-ENOMEM);
42274556Smckusick
42374556Smckusick	mhp->rhp = rhp;
42474556Smckusick
42574556Smckusick	/* First check that we have enough alignment */
42674556Smckusick	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
42774556Smckusick		ret = -EINVAL;
42874556Smckusick		goto err;
42974556Smckusick	}
43074556Smckusick
43174556Smckusick	if (num_phys_buf > 1 &&
43274556Smckusick	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
43374556Smckusick		ret = -EINVAL;
43474556Smckusick		goto err;
43574556Smckusick	}
43674556Smckusick
43786514Siedowse	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
43874556Smckusick				   &total_size, &npages, &shift, &page_list);
43974556Smckusick	if (ret)
44074556Smckusick		goto err;
44174556Smckusick
44274556Smckusick	ret = iwch_alloc_pbl(mhp, npages);
44374556Smckusick	if (ret) {
44474556Smckusick		cxfree(page_list);
44574556Smckusick		goto err_pbl;
44674556Smckusick	}
44774556Smckusick
44874556Smckusick	ret = iwch_write_pbl(mhp, page_list, npages, 0);
44974556Smckusick	cxfree(page_list);
45074556Smckusick	if (ret)
45174556Smckusick		goto err;
45274556Smckusick
45374556Smckusick	mhp->attr.pdid = php->pdid;
45474556Smckusick	mhp->attr.zbva = 0;
45574556Smckusick
45674556Smckusick	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
45786514Siedowse	mhp->attr.va_fbo = *iova_start;
45874556Smckusick	mhp->attr.page_size = shift - 12;
45975557Smckusick
46074556Smckusick	mhp->attr.len = (u32) total_size;
46175557Smckusick	mhp->attr.pbl_size = npages;
46274556Smckusick	ret = iwch_register_mem(rhp, php, mhp, shift);
46386514Siedowse	if (ret)
46474556Smckusick		goto err_pbl;
46574556Smckusick
46674556Smckusick	return &mhp->ibmr;
46774556Smckusick
46874556Smckusickerr_pbl:
46974556Smckusick	iwch_free_pbl(mhp);
47074556Smckusick
47174556Smckusickerr:
47274556Smckusick	cxfree(mhp);
47374556Smckusick	return ERR_PTR(ret);
47474556Smckusick
47574556Smckusick}
47674556Smckusick
47774556Smckusickstatic int iwch_reregister_phys_mem(struct ib_mr *mr,
47874556Smckusick				     int mr_rereg_mask,
47986514Siedowse				     struct ib_pd *pd,
48074556Smckusick	                             struct ib_phys_buf *buffer_list,
48174556Smckusick	                             int num_phys_buf,
48274556Smckusick	                             int acc, u64 * iova_start)
48374556Smckusick{
48474556Smckusick
48574556Smckusick	struct iwch_mr mh, *mhp;
48674556Smckusick	struct iwch_pd *php;
48774556Smckusick	struct iwch_dev *rhp;
48874556Smckusick	__be64 *page_list = NULL;
48974556Smckusick	int shift = 0;
49074556Smckusick	u64 total_size;
49174556Smckusick	int npages = 0;
49274556Smckusick	int ret;
493
494	CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
495
496	/* There can be no memory windows */
497	if (atomic_load_acq_int(&mr->usecnt.counter))
498		return (-EINVAL);
499
500	mhp = to_iwch_mr(mr);
501	rhp = mhp->rhp;
502	php = to_iwch_pd(mr->pd);
503
504	/* make sure we are on the same adapter */
505	if (rhp != php->rhp)
506		return (-EINVAL);
507
508	memcpy(&mh, mhp, sizeof *mhp);
509
510	if (mr_rereg_mask & IB_MR_REREG_PD)
511		php = to_iwch_pd(pd);
512	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
513		mh.attr.perms = iwch_ib_to_tpt_access(acc);
514	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
515		ret = build_phys_page_list(buffer_list, num_phys_buf,
516					   iova_start,
517					   &total_size, &npages,
518					   &shift, &page_list);
519		if (ret)
520			return ret;
521	}
522
523	ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
524	cxfree(page_list);
525	if (ret) {
526		return ret;
527	}
528	if (mr_rereg_mask & IB_MR_REREG_PD)
529		mhp->attr.pdid = php->pdid;
530	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
531		mhp->attr.perms = iwch_ib_to_tpt_access(acc);
532	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
533		mhp->attr.zbva = 0;
534		mhp->attr.va_fbo = *iova_start;
535		mhp->attr.page_size = shift - 12;
536		mhp->attr.len = (u32) total_size;
537		mhp->attr.pbl_size = npages;
538	}
539
540	return 0;
541}
542
543
544static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
545				      u64 virt, int acc, struct ib_udata *udata,
546				      int mr_id)
547{
548	__be64 *pages;
549	int shift, n, len;
550	int i, k, entry;
551	int err = 0;
552	struct iwch_dev *rhp;
553	struct iwch_pd *php;
554	struct iwch_mr *mhp;
555	struct iwch_reg_user_mr_resp uresp;
556	struct scatterlist *sg;
557
558	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
559
560	php = to_iwch_pd(pd);
561	rhp = php->rhp;
562	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
563	if (!mhp)
564		return ERR_PTR(-ENOMEM);
565
566	mhp->rhp = rhp;
567
568	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569	if (IS_ERR(mhp->umem)) {
570		err = PTR_ERR(mhp->umem);
571		cxfree(mhp);
572		return ERR_PTR(-err);
573	}
574
575	shift = ffs(mhp->umem->page_size) - 1;
576
577	n = mhp->umem->nmap;
578
579	err = iwch_alloc_pbl(mhp, n);
580	if (err)
581		goto err;
582
583	pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
584	if (!pages) {
585		err = -ENOMEM;
586		goto err_pbl;
587	}
588
589	i = n = 0;
590
591	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
592		len = sg_dma_len(sg) >> shift;
593		for (k = 0; k < len; ++k) {
594			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
595					mhp->umem->page_size * k);
596			if (i == PAGE_SIZE / sizeof *pages) {
597				err = iwch_write_pbl(mhp, pages, i, n);
598				if (err)
599					goto pbl_done;
600				n += i;
601				i = 0;
602			}
603		}
604	}
605#if 0
606	TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
607		for (j = 0; j < chunk->nmap; ++j) {
608			len = sg_dma_len(&chunk->page_list[j]) >> shift;
609			for (k = 0; k < len; ++k) {
610				pages[i++] = htobe64(sg_dma_address(
611					&chunk->page_list[j]) +
612					mhp->umem->page_size * k);
613				if (i == PAGE_SIZE / sizeof *pages) {
614					err = iwch_write_pbl(mhp, pages, i, n);
615					if (err)
616						goto pbl_done;
617					n += i;
618					i = 0;
619				}
620			}
621		}
622#endif
623
624	if (i)
625		err = iwch_write_pbl(mhp, pages, i, n);
626pbl_done:
627	cxfree(pages);
628	if (err)
629		goto err_pbl;
630
631	mhp->attr.pdid = php->pdid;
632	mhp->attr.zbva = 0;
633	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634	mhp->attr.va_fbo = virt;
635	mhp->attr.page_size = shift - 12;
636	mhp->attr.len = (u32) length;
637
638	err = iwch_register_mem(rhp, php, mhp, shift);
639	if (err)
640		goto err_pbl;
641
642	if (udata && !t3a_device(rhp)) {
643		uresp.pbl_addr = (mhp->attr.pbl_addr -
644	                         rhp->rdev.rnic_info.pbl_base) >> 3;
645		CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
646		     uresp.pbl_addr);
647
648		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
649			iwch_dereg_mr(&mhp->ibmr);
650			err = EFAULT;
651			goto err;
652		}
653	}
654
655	return &mhp->ibmr;
656
657err_pbl:
658	iwch_free_pbl(mhp);
659
660err:
661	ib_umem_release(mhp->umem);
662	cxfree(mhp);
663	return ERR_PTR(-err);
664}
665
666static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
667{
668	struct ib_phys_buf bl;
669	u64 kva;
670	struct ib_mr *ibmr;
671
672	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
673
674	/*
675	 * T3 only supports 32 bits of size.
676	 */
677	bl.size = 0xffffffff;
678	bl.addr = 0;
679	kva = 0;
680	ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
681	return ibmr;
682}
683
684static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
685{
686	struct iwch_dev *rhp;
687	struct iwch_pd *php;
688	struct iwch_mw *mhp;
689	u32 mmid;
690	u32 stag = 0;
691	int ret;
692
693	php = to_iwch_pd(pd);
694	rhp = php->rhp;
695	mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
696	if (!mhp)
697		return ERR_PTR(-ENOMEM);
698	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
699	if (ret) {
700		cxfree(mhp);
701		return ERR_PTR(-ret);
702	}
703	mhp->rhp = rhp;
704	mhp->attr.pdid = php->pdid;
705	mhp->attr.type = TPT_MW;
706	mhp->attr.stag = stag;
707	mmid = (stag) >> 8;
708	mhp->ibmw.rkey = stag;
709	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
710		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
711		cxfree(mhp);
712		return ERR_PTR(-ENOMEM);
713	}
714	CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
715	return &(mhp->ibmw);
716}
717
718static int iwch_dealloc_mw(struct ib_mw *mw)
719{
720	struct iwch_dev *rhp;
721	struct iwch_mw *mhp;
722	u32 mmid;
723
724	mhp = to_iwch_mw(mw);
725	rhp = mhp->rhp;
726	mmid = (mw->rkey) >> 8;
727	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
728	remove_handle(rhp, &rhp->mmidr, mmid);
729	cxfree(mhp);
730	CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
731	return 0;
732}
733
734static int iwch_destroy_qp(struct ib_qp *ib_qp)
735{
736	struct iwch_dev *rhp;
737	struct iwch_qp *qhp;
738	struct iwch_qp_attributes attrs;
739	struct iwch_ucontext *ucontext;
740
741	qhp = to_iwch_qp(ib_qp);
742	rhp = qhp->rhp;
743
744	attrs.next_state = IWCH_QP_STATE_ERROR;
745	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
746	mtx_lock(&qhp->lock);
747	if (qhp->ep)
748		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
749	mtx_unlock(&qhp->lock);
750
751	remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
752
753	mtx_lock(&qhp->lock);
754	if (--qhp->refcnt)
755		msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
756	mtx_unlock(&qhp->lock);
757
758	ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
759				  : NULL;
760	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
761			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
762
763	CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
764	     ib_qp, qhp->wq.qpid, qhp);
765	cxfree(qhp);
766	return 0;
767}
768
769static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
770			     struct ib_qp_init_attr *attrs,
771			     struct ib_udata *udata)
772{
773	struct iwch_dev *rhp;
774	struct iwch_qp *qhp;
775	struct iwch_pd *php;
776	struct iwch_cq *schp;
777	struct iwch_cq *rchp;
778	struct iwch_create_qp_resp uresp;
779	int wqsize, sqsize, rqsize;
780	struct iwch_ucontext *ucontext;
781
782	CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
783	if (attrs->qp_type != IB_QPT_RC)
784		return ERR_PTR(-EINVAL);
785	php = to_iwch_pd(pd);
786	rhp = php->rhp;
787	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
788	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
789	if (!schp || !rchp)
790		return ERR_PTR(-EINVAL);
791
792	/* The RQT size must be # of entries + 1 rounded up to a power of two */
793	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
794	if (rqsize == attrs->cap.max_recv_wr)
795		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
796
797	/* T3 doesn't support RQT depth < 16 */
798	if (rqsize < 16)
799		rqsize = 16;
800
801	if (rqsize > T3_MAX_RQ_SIZE)
802		return ERR_PTR(-EINVAL);
803
804	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
805		return ERR_PTR(-EINVAL);
806
807	/*
808	 * NOTE: The SQ and total WQ sizes don't need to be
809	 * a power of two.  However, all the code assumes
810	 * they are. EG: Q_FREECNT() and friends.
811	 */
812	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
813	wqsize = roundup_pow_of_two(rqsize + sqsize);
814	CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
815	     wqsize, sqsize, rqsize);
816	qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
817	if (!qhp)
818		return ERR_PTR(-ENOMEM);
819	qhp->wq.size_log2 = ilog2(wqsize);
820	qhp->wq.rq_size_log2 = ilog2(rqsize);
821	qhp->wq.sq_size_log2 = ilog2(sqsize);
822	ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
823	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
824			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
825		cxfree(qhp);
826		return ERR_PTR(-ENOMEM);
827	}
828
829	attrs->cap.max_recv_wr = rqsize - 1;
830	attrs->cap.max_send_wr = sqsize;
831	attrs->cap.max_inline_data = T3_MAX_INLINE;
832
833	qhp->rhp = rhp;
834	qhp->attr.pd = php->pdid;
835	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
836	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
837	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
838	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
839	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
840	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
841	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
842	qhp->attr.state = IWCH_QP_STATE_IDLE;
843	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
844
845	/*
846	 * XXX - These don't get passed in from the openib user
847	 * at create time.  The CM sets them via a QP modify.
848	 * Need to fix...  I think the CM should
849	 */
850	qhp->attr.enable_rdma_read = 1;
851	qhp->attr.enable_rdma_write = 1;
852	qhp->attr.enable_bind = 1;
853	qhp->attr.max_ord = 1;
854	qhp->attr.max_ird = 1;
855
856	mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
857	qhp->refcnt = 1;
858
859	if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
860		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
861			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
862		cxfree(qhp);
863		return ERR_PTR(-ENOMEM);
864	}
865
866	if (udata) {
867
868		struct iwch_mm_entry *mm1, *mm2;
869
870		mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
871		if (!mm1) {
872			iwch_destroy_qp(&qhp->ibqp);
873			return ERR_PTR(-ENOMEM);
874		}
875
876		mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
877		if (!mm2) {
878			cxfree(mm1);
879			iwch_destroy_qp(&qhp->ibqp);
880			return ERR_PTR(-ENOMEM);
881		}
882
883		uresp.qpid = qhp->wq.qpid;
884		uresp.size_log2 = qhp->wq.size_log2;
885		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
886		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
887		mtx_lock(&ucontext->mmap_lock);
888		uresp.key = ucontext->key;
889		ucontext->key += PAGE_SIZE;
890		uresp.db_key = ucontext->key;
891		ucontext->key += PAGE_SIZE;
892		mtx_unlock(&ucontext->mmap_lock);
893		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
894			cxfree(mm1);
895			cxfree(mm2);
896			iwch_destroy_qp(&qhp->ibqp);
897			return ERR_PTR(-EFAULT);
898		}
899		mm1->key = uresp.key;
900		mm1->addr = vtophys(qhp->wq.queue);
901		mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
902		insert_mmap(ucontext, mm1);
903		mm2->key = uresp.db_key;
904		mm2->addr = qhp->wq.udb & PAGE_MASK;
905		mm2->len = PAGE_SIZE;
906		insert_mmap(ucontext, mm2);
907	}
908	qhp->ibqp.qp_num = qhp->wq.qpid;
909	callout_init(&(qhp->timer), TRUE);
910	CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
911	     "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
912	     qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
913	     qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
914	     1 << qhp->wq.size_log2);
915	return &qhp->ibqp;
916}
917
918static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
919		      int attr_mask, struct ib_udata *udata)
920{
921	struct iwch_dev *rhp;
922	struct iwch_qp *qhp;
923	enum iwch_qp_attr_mask mask = 0;
924	struct iwch_qp_attributes attrs;
925
926	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
927
928	/* iwarp does not support the RTR state */
929	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
930		attr_mask &= ~IB_QP_STATE;
931
932	/* Make sure we still have something left to do */
933	if (!attr_mask)
934		return 0;
935
936	memset(&attrs, 0, sizeof attrs);
937	qhp = to_iwch_qp(ibqp);
938	rhp = qhp->rhp;
939
940	attrs.next_state = iwch_convert_state(attr->qp_state);
941	attrs.enable_rdma_read = (attr->qp_access_flags &
942			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
943	attrs.enable_rdma_write = (attr->qp_access_flags &
944				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
945	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
946
947
948	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
949	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
950			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
951			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
952			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
953
954	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
955}
956
957void iwch_qp_add_ref(struct ib_qp *qp)
958{
959	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
960	mtx_lock(&to_iwch_qp(qp)->lock);
961	to_iwch_qp(qp)->refcnt++;
962	mtx_unlock(&to_iwch_qp(qp)->lock);
963}
964
965void iwch_qp_rem_ref(struct ib_qp *qp)
966{
967	CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
968	mtx_lock(&to_iwch_qp(qp)->lock);
969	if (--to_iwch_qp(qp)->refcnt == 0)
970	        wakeup(to_iwch_qp(qp));
971	mtx_unlock(&to_iwch_qp(qp)->lock);
972}
973
974static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
975{
976	CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
977	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
978}
979
980
981static int iwch_query_pkey(struct ib_device *ibdev,
982			   u8 port, u16 index, u16 * pkey)
983{
984	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
985	*pkey = 0;
986	return 0;
987}
988
989static int iwch_query_gid(struct ib_device *ibdev, u8 port,
990			  int index, union ib_gid *gid)
991{
992	struct iwch_dev *dev;
993	struct port_info *pi;
994	struct adapter *sc;
995
996	CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
997	       __FUNCTION__, ibdev, port, index, gid);
998	dev = to_iwch_dev(ibdev);
999	sc = dev->rdev.adap;
1000	PANIC_IF(port == 0 || port > 2);
1001	pi = &sc->port[port - 1];
1002	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1003	memcpy(&(gid->raw[0]), pi->hw_addr, 6);
1004	return 0;
1005}
1006
1007static int iwch_query_device(struct ib_device *ibdev,
1008			     struct ib_device_attr *props)
1009{
1010	struct iwch_dev *dev;
1011	struct adapter *sc;
1012
1013	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1014
1015	dev = to_iwch_dev(ibdev);
1016	sc = dev->rdev.adap;
1017	memset(props, 0, sizeof *props);
1018	memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1019	props->device_cap_flags = dev->device_cap_flags;
1020	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1021	props->vendor_id = pci_get_vendor(sc->dev);
1022	props->vendor_part_id = pci_get_device(sc->dev);
1023	props->max_mr_size = dev->attr.max_mr_size;
1024	props->max_qp = dev->attr.max_qps;
1025	props->max_qp_wr = dev->attr.max_wrs;
1026	props->max_sge = dev->attr.max_sge_per_wr;
1027	props->max_sge_rd = 1;
1028	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1029	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1030	props->max_cq = dev->attr.max_cqs;
1031	props->max_cqe = dev->attr.max_cqes_per_cq;
1032	props->max_mr = dev->attr.max_mem_regs;
1033	props->max_pd = dev->attr.max_pds;
1034	props->local_ca_ack_delay = 0;
1035
1036	return 0;
1037}
1038
1039static int iwch_query_port(struct ib_device *ibdev,
1040			   u8 port, struct ib_port_attr *props)
1041{
1042	CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1043	memset(props, 0, sizeof(struct ib_port_attr));
1044	props->max_mtu = IB_MTU_4096;
1045	props->active_mtu = IB_MTU_2048;
1046	props->state = IB_PORT_ACTIVE;
1047	props->port_cap_flags =
1048	    IB_PORT_CM_SUP |
1049	    IB_PORT_SNMP_TUNNEL_SUP |
1050	    IB_PORT_REINIT_SUP |
1051	    IB_PORT_DEVICE_MGMT_SUP |
1052	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1053	props->gid_tbl_len = 1;
1054	props->pkey_tbl_len = 1;
1055	props->active_width = 2;
1056	props->active_speed = 2;
1057	props->max_msg_sz = -1;
1058
1059	return 0;
1060}
1061
1062int iwch_register_device(struct iwch_dev *dev)
1063{
1064	int ret;
1065	struct adapter *sc = dev->rdev.adap;
1066
1067	CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1068	strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1069	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1070	memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1071	dev->device_cap_flags =
1072		(IB_DEVICE_LOCAL_DMA_LKEY |
1073		 IB_DEVICE_MEM_WINDOW);
1074
1075	dev->ibdev.uverbs_cmd_mask =
1076	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1077	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1078	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1079	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1080	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1081	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1082	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1083	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1084	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1085	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1086	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1087	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1088	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1089	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1090	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1091	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1092	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1093	dev->ibdev.node_type = RDMA_NODE_RNIC;
1094	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1095	dev->ibdev.phys_port_cnt = sc->params.nports;
1096	dev->ibdev.num_comp_vectors = 1;
1097	dev->ibdev.dma_device = dev->rdev.adap->dev;
1098	dev->ibdev.query_device = iwch_query_device;
1099	dev->ibdev.query_port = iwch_query_port;
1100	dev->ibdev.modify_port = iwch_modify_port;
1101	dev->ibdev.query_pkey = iwch_query_pkey;
1102	dev->ibdev.query_gid = iwch_query_gid;
1103	dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1104	dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1105	dev->ibdev.mmap = iwch_mmap;
1106	dev->ibdev.alloc_pd = iwch_allocate_pd;
1107	dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1108	dev->ibdev.create_ah = iwch_ah_create;
1109	dev->ibdev.destroy_ah = iwch_ah_destroy;
1110	dev->ibdev.create_qp = iwch_create_qp;
1111	dev->ibdev.modify_qp = iwch_ib_modify_qp;
1112	dev->ibdev.destroy_qp = iwch_destroy_qp;
1113	dev->ibdev.create_cq = iwch_create_cq;
1114	dev->ibdev.destroy_cq = iwch_destroy_cq;
1115	dev->ibdev.resize_cq = iwch_resize_cq;
1116	dev->ibdev.poll_cq = iwch_poll_cq;
1117	dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1118	dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1119	dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1120	dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1121	dev->ibdev.dereg_mr = iwch_dereg_mr;
1122	dev->ibdev.alloc_mw = iwch_alloc_mw;
1123	dev->ibdev.bind_mw = iwch_bind_mw;
1124	dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1125
1126	dev->ibdev.attach_mcast = iwch_multicast_attach;
1127	dev->ibdev.detach_mcast = iwch_multicast_detach;
1128	dev->ibdev.process_mad = iwch_process_mad;
1129
1130	dev->ibdev.req_notify_cq = iwch_arm_cq;
1131	dev->ibdev.post_send = iwch_post_send;
1132	dev->ibdev.post_recv = iwch_post_receive;
1133	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1134
1135	dev->ibdev.iwcm =
1136	    kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1137	if (!dev->ibdev.iwcm)
1138		return (ENOMEM);
1139
1140	dev->ibdev.iwcm->connect = iwch_connect;
1141	dev->ibdev.iwcm->accept = iwch_accept_cr;
1142	dev->ibdev.iwcm->reject = iwch_reject_cr;
1143	dev->ibdev.iwcm->create_listen = iwch_create_listen;
1144	dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1145	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1146	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1147	dev->ibdev.iwcm->get_qp = iwch_get_qp;
1148
1149	ret = ib_register_device(&dev->ibdev, NULL);
1150	if (ret)
1151		goto bail1;
1152
1153	return (0);
1154
1155bail1:
1156	cxfree(dev->ibdev.iwcm);
1157	return (ret);
1158}
1159
1160void iwch_unregister_device(struct iwch_dev *dev)
1161{
1162
1163	ib_unregister_device(&dev->ibdev);
1164	cxfree(dev->ibdev.iwcm);
1165	return;
1166}
1167#endif
1168