Deleted Added
full compact
provider.c (256694) provider.c (256819)
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/cxgbe/iw_cxgbe/provider.c 256694 2013-10-17 18:37:25Z np $");
33__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/provider.c 256694 2013-10-17 18:37:25Z np $");
34
35#include "opt_inet.h"
36
37#ifdef TCP_OFFLOAD
38#include <asm/pgtable.h>
39#include <linux/page.h>
40#include <rdma/ib_verbs.h>
41#include <rdma/ib_user_verbs.h>
42
43#include "iw_cxgbe.h"
44#include "user.h"
45
46static int fastreg_support = 1;
47module_param(fastreg_support, int, 0644);
48MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
49
50static int c4iw_modify_port(struct ib_device *ibdev,
51 u8 port, int port_modify_mask,
52 struct ib_port_modify *props)
53{
54 return -ENOSYS;
55}
56
57static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
58 struct ib_ah_attr *ah_attr)
59{
60 return ERR_PTR(-ENOSYS);
61}
62
63static int c4iw_ah_destroy(struct ib_ah *ah)
64{
65 return -ENOSYS;
66}
67
68static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
69{
70 return -ENOSYS;
71}
72
73static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
74{
75 return -ENOSYS;
76}
77
78static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
79 u8 port_num, struct ib_wc *in_wc,
80 struct ib_grh *in_grh, struct ib_mad *in_mad,
81 struct ib_mad *out_mad)
82{
83 return -ENOSYS;
84}
85
86static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
87{
88 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
89 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
90 struct c4iw_mm_entry *mm, *tmp;
91
92 CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
93 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
94 kfree(mm);
95 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
96 kfree(ucontext);
97 return 0;
98}
99
100static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
101 struct ib_udata *udata)
102{
103 struct c4iw_ucontext *context;
104 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
105
106 CTR2(KTR_IW_CXGBE, "%s ibdev %p", __func__, ibdev);
107 context = kzalloc(sizeof(*context), GFP_KERNEL);
108 if (!context)
109 return ERR_PTR(-ENOMEM);
110 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
111 INIT_LIST_HEAD(&context->mmaps);
112 spin_lock_init(&context->mmap_lock);
113 return &context->ibucontext;
114}
115
116static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
117{
118 return pgprot_writecombine(prot);
119}
120
121static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
122{
123 int len = vma->vm_end - vma->vm_start;
124 u32 key = vma->vm_pgoff << PAGE_SHIFT;
125 struct c4iw_rdev *rdev;
126 int ret = 0;
127 struct c4iw_mm_entry *mm;
128 struct c4iw_ucontext *ucontext;
129 u64 addr, paddr;
130
131 u64 va_regs_res = 0, va_udbs_res = 0;
132 u64 len_regs_res = 0, len_udbs_res = 0;
133
134 CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);
135
136 CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
137 vma->vm_pgoff, key, len);
138
139 if (vma->vm_start & (PAGE_SIZE-1)) {
140 CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
141 __func__, vma->vm_start, vma);
142 return -EINVAL;
143 }
144
145 rdev = &(to_c4iw_dev(context->device)->rdev);
146 ucontext = to_c4iw_ucontext(context);
147
148 mm = remove_mmap(ucontext, key, len);
149 if (!mm) {
150 CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
151 ucontext, key, len);
152 return -EINVAL;
153 }
154 addr = mm->addr;
155 kfree(mm);
156
157 va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
158 len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
159 va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
160 len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);
161
162 CTR6(KTR_IW_CXGBE,
163 "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
164 addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
165 va_udbs_res+len_udbs_res);
166
167 if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
168 CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
169 __func__, addr, va_regs_res, len_regs_res);
170 /*
171 * MA_SYNC register...
172 */
173 paddr = vtophys(addr);
174 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
175 ret = io_remap_pfn_range(vma, vma->vm_start,
176 paddr >> PAGE_SHIFT,
177 len, vma->vm_page_prot);
178 } else {
179
180 if (addr >= va_udbs_res && addr < va_udbs_res + len_udbs_res) {
181 /*
182 * Map user DB or OCQP memory...
183 */
184 paddr = vtophys(addr);
185 CTR4(KTR_IW_CXGBE,
186 "%s:6 USER DB-GTS addr %p region %p, reglen %u",
187 __func__, addr, va_udbs_res, len_udbs_res);
188#ifdef DOT5
189 if (is_t5(rdev->lldi.adapter_type) && map_udb_as_wc)
190 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
191 else
192#endif
193 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
194 ret = io_remap_pfn_range(vma, vma->vm_start,
195 paddr >> PAGE_SHIFT,
196 len, vma->vm_page_prot);
197 } else {
198 /*
199 * Map WQ or CQ contig dma memory...
200 */
201 CTR4(KTR_IW_CXGBE,
202 "%s:7 WQ/CQ addr %p vm_start %u vma %p", __func__,
203 addr, vma->vm_start, vma);
204 ret = io_remap_pfn_range(vma, vma->vm_start,
205 addr >> PAGE_SHIFT,
206 len, vma->vm_page_prot);
207 }
208 }
209 CTR4(KTR_IW_CXGBE, "%s:8 ctx %p vma %p ret %u", __func__, context, vma,
210 ret);
211 return ret;
212}
213
214static int
215c4iw_deallocate_pd(struct ib_pd *pd)
216{
217 struct c4iw_pd *php = to_c4iw_pd(pd);
218 struct c4iw_dev *rhp = php->rhp;
219
220 CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
221
222 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
223 mutex_lock(&rhp->rdev.stats.lock);
224 rhp->rdev.stats.pd.cur--;
225 mutex_unlock(&rhp->rdev.stats.lock);
226 kfree(php);
227
228 return (0);
229}
230
231static struct ib_pd *
232c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
233 struct ib_udata *udata)
234{
235 struct c4iw_pd *php;
236 u32 pdid;
237 struct c4iw_dev *rhp;
238
239 CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
240 context, udata);
241 rhp = (struct c4iw_dev *) ibdev;
242 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
243 if (!pdid)
244 return ERR_PTR(-EINVAL);
245 php = kzalloc(sizeof(*php), GFP_KERNEL);
246 if (!php) {
247 c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
248 return ERR_PTR(-ENOMEM);
249 }
250 php->pdid = pdid;
251 php->rhp = rhp;
252 if (context) {
253 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
254 c4iw_deallocate_pd(&php->ibpd);
255 return ERR_PTR(-EFAULT);
256 }
257 }
258 mutex_lock(&rhp->rdev.stats.lock);
259 rhp->rdev.stats.pd.cur++;
260 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
261 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
262 mutex_unlock(&rhp->rdev.stats.lock);
263
264 CTR6(KTR_IW_CXGBE,
265 "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
266 ibdev, context, udata, pdid, php);
267 return (&php->ibpd);
268}
269
270static int
271c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
272{
273
274 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
275 ibdev, port, index, pkey);
276
277 *pkey = 0;
278 return (0);
279}
280
281static int
282c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
283{
284 struct c4iw_dev *dev;
285 struct port_info *pi;
286 struct adapter *sc;
287
288 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
289 ibdev, port, index, gid);
290
291 memset(&gid->raw[0], 0, sizeof(gid->raw));
292 dev = to_c4iw_dev(ibdev);
293 sc = dev->rdev.adap;
294 if (port == 0 || port > sc->params.nports)
295 return (-EINVAL);
296 pi = sc->port[port - 1];
297 memcpy(&gid->raw[0], pi->hw_addr, sizeof(pi->hw_addr));
298 return (0);
299}
300
301static int
302c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
303{
304 struct c4iw_dev *dev = to_c4iw_dev(ibdev);
305 struct adapter *sc = dev->rdev.adap;
306
307 CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
308
309 memset(props, 0, sizeof *props);
310 memcpy(&props->sys_image_guid, sc->port[0]->hw_addr, 6);
311 props->hw_ver = sc->params.chipid;
312 props->fw_ver = sc->params.fw_vers;
313 props->device_cap_flags = dev->device_cap_flags;
314 props->page_size_cap = T4_PAGESIZE_MASK;
315 props->vendor_id = pci_get_vendor(sc->dev);
316 props->vendor_part_id = pci_get_device(sc->dev);
317 props->max_mr_size = T4_MAX_MR_SIZE;
318 props->max_qp = T4_MAX_NUM_QP;
319 props->max_qp_wr = T4_MAX_QP_DEPTH;
320 props->max_sge = T4_MAX_RECV_SGE;
321 props->max_sge_rd = 1;
322 props->max_qp_rd_atom = c4iw_max_read_depth;
323 props->max_qp_init_rd_atom = c4iw_max_read_depth;
324 props->max_cq = T4_MAX_NUM_CQ;
325 props->max_cqe = T4_MAX_CQ_DEPTH;
326 props->max_mr = c4iw_num_stags(&dev->rdev);
327 props->max_pd = T4_MAX_NUM_PD;
328 props->local_ca_ack_delay = 0;
329 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
330
331 return (0);
332}
333
334/*
335 * Returns -errno on failure.
336 */
337static int
338c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
339{
340 struct c4iw_dev *dev;
341 struct adapter *sc;
342 struct port_info *pi;
343 struct ifnet *ifp;
344
345 CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
346 port, props);
347
348 dev = to_c4iw_dev(ibdev);
349 sc = dev->rdev.adap;
350 if (port > sc->params.nports)
351 return (-EINVAL);
352 pi = sc->port[port - 1];
353 ifp = pi->ifp;
354
355 memset(props, 0, sizeof(struct ib_port_attr));
356 props->max_mtu = IB_MTU_4096;
357 if (ifp->if_mtu >= 4096)
358 props->active_mtu = IB_MTU_4096;
359 else if (ifp->if_mtu >= 2048)
360 props->active_mtu = IB_MTU_2048;
361 else if (ifp->if_mtu >= 1024)
362 props->active_mtu = IB_MTU_1024;
363 else if (ifp->if_mtu >= 512)
364 props->active_mtu = IB_MTU_512;
365 else
366 props->active_mtu = IB_MTU_256;
367 props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
368 props->port_cap_flags =
369 IB_PORT_CM_SUP |
370 IB_PORT_SNMP_TUNNEL_SUP |
371 IB_PORT_REINIT_SUP |
372 IB_PORT_DEVICE_MGMT_SUP |
373 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
374 props->gid_tbl_len = 1;
375 props->pkey_tbl_len = 1;
376 props->active_width = 2;
377 props->active_speed = 2;
378 props->max_msg_sz = -1;
379
380 return 0;
381}
382
383/*
384 * Returns -errno on error.
385 */
386int
387c4iw_register_device(struct c4iw_dev *dev)
388{
389 struct adapter *sc = dev->rdev.adap;
390 struct ib_device *ibdev = &dev->ibdev;
391 struct iw_cm_verbs *iwcm;
392 int ret;
393
394 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
395 BUG_ON(!sc->port[0]);
396 strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
397 memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
398 memcpy(&ibdev->node_guid, sc->port[0]->hw_addr, 6);
399 ibdev->owner = THIS_MODULE;
400 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
401 if (fastreg_support)
402 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
403 ibdev->local_dma_lkey = 0;
404 ibdev->uverbs_cmd_mask =
405 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
406 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
407 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
408 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
409 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
410 (1ull << IB_USER_VERBS_CMD_REG_MR) |
411 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
412 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
413 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
414 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
415 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
416 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
417 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
419 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
420 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
421 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
422 (1ull << IB_USER_VERBS_CMD_POST_RECV);
423 ibdev->node_type = RDMA_NODE_RNIC;
424 strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
425 ibdev->phys_port_cnt = sc->params.nports;
426 ibdev->num_comp_vectors = 1;
427 ibdev->dma_device = sc->dev;
428 ibdev->query_device = c4iw_query_device;
429 ibdev->query_port = c4iw_query_port;
430 ibdev->modify_port = c4iw_modify_port;
431 ibdev->query_pkey = c4iw_query_pkey;
432 ibdev->query_gid = c4iw_query_gid;
433 ibdev->alloc_ucontext = c4iw_alloc_ucontext;
434 ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
435 ibdev->mmap = c4iw_mmap;
436 ibdev->alloc_pd = c4iw_allocate_pd;
437 ibdev->dealloc_pd = c4iw_deallocate_pd;
438 ibdev->create_ah = c4iw_ah_create;
439 ibdev->destroy_ah = c4iw_ah_destroy;
440 ibdev->create_qp = c4iw_create_qp;
441 ibdev->modify_qp = c4iw_ib_modify_qp;
442 ibdev->query_qp = c4iw_ib_query_qp;
443 ibdev->destroy_qp = c4iw_destroy_qp;
444 ibdev->create_cq = c4iw_create_cq;
445 ibdev->destroy_cq = c4iw_destroy_cq;
446 ibdev->resize_cq = c4iw_resize_cq;
447 ibdev->poll_cq = c4iw_poll_cq;
448 ibdev->get_dma_mr = c4iw_get_dma_mr;
449 ibdev->reg_phys_mr = c4iw_register_phys_mem;
450 ibdev->rereg_phys_mr = c4iw_reregister_phys_mem;
451 ibdev->reg_user_mr = c4iw_reg_user_mr;
452 ibdev->dereg_mr = c4iw_dereg_mr;
453 ibdev->alloc_mw = c4iw_alloc_mw;
454 ibdev->bind_mw = c4iw_bind_mw;
455 ibdev->dealloc_mw = c4iw_dealloc_mw;
456 ibdev->alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
457 ibdev->alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
458 ibdev->free_fast_reg_page_list = c4iw_free_fastreg_pbl;
459 ibdev->attach_mcast = c4iw_multicast_attach;
460 ibdev->detach_mcast = c4iw_multicast_detach;
461 ibdev->process_mad = c4iw_process_mad;
462 ibdev->req_notify_cq = c4iw_arm_cq;
463 ibdev->post_send = c4iw_post_send;
464 ibdev->post_recv = c4iw_post_receive;
465 ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
466
467 iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
468 if (iwcm == NULL)
469 return (-ENOMEM);
470
471 iwcm->connect = c4iw_connect;
472 iwcm->accept = c4iw_accept_cr;
473 iwcm->reject = c4iw_reject_cr;
474 iwcm->create_listen = c4iw_create_listen;
475 iwcm->destroy_listen = c4iw_destroy_listen;
476 iwcm->add_ref = c4iw_qp_add_ref;
477 iwcm->rem_ref = c4iw_qp_rem_ref;
478 iwcm->get_qp = c4iw_get_qp;
479 ibdev->iwcm = iwcm;
480
481 ret = ib_register_device(&dev->ibdev, NULL);
482 if (ret)
483 kfree(iwcm);
484
485 return (ret);
486}
487
488void
489c4iw_unregister_device(struct c4iw_dev *dev)
490{
491
492 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
493 dev->rdev.adap);
494 ib_unregister_device(&dev->ibdev);
495 kfree(dev->ibdev.iwcm);
496 return;
497}
498#endif
34
35#include "opt_inet.h"
36
37#ifdef TCP_OFFLOAD
38#include <asm/pgtable.h>
39#include <linux/page.h>
40#include <rdma/ib_verbs.h>
41#include <rdma/ib_user_verbs.h>
42
43#include "iw_cxgbe.h"
44#include "user.h"
45
46static int fastreg_support = 1;
47module_param(fastreg_support, int, 0644);
48MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
49
50static int c4iw_modify_port(struct ib_device *ibdev,
51 u8 port, int port_modify_mask,
52 struct ib_port_modify *props)
53{
54 return -ENOSYS;
55}
56
57static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
58 struct ib_ah_attr *ah_attr)
59{
60 return ERR_PTR(-ENOSYS);
61}
62
63static int c4iw_ah_destroy(struct ib_ah *ah)
64{
65 return -ENOSYS;
66}
67
68static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
69{
70 return -ENOSYS;
71}
72
73static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
74{
75 return -ENOSYS;
76}
77
78static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
79 u8 port_num, struct ib_wc *in_wc,
80 struct ib_grh *in_grh, struct ib_mad *in_mad,
81 struct ib_mad *out_mad)
82{
83 return -ENOSYS;
84}
85
86static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
87{
88 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
89 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
90 struct c4iw_mm_entry *mm, *tmp;
91
92 CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
93 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
94 kfree(mm);
95 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
96 kfree(ucontext);
97 return 0;
98}
99
100static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
101 struct ib_udata *udata)
102{
103 struct c4iw_ucontext *context;
104 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
105
106 CTR2(KTR_IW_CXGBE, "%s ibdev %p", __func__, ibdev);
107 context = kzalloc(sizeof(*context), GFP_KERNEL);
108 if (!context)
109 return ERR_PTR(-ENOMEM);
110 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
111 INIT_LIST_HEAD(&context->mmaps);
112 spin_lock_init(&context->mmap_lock);
113 return &context->ibucontext;
114}
115
116static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
117{
118 return pgprot_writecombine(prot);
119}
120
121static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
122{
123 int len = vma->vm_end - vma->vm_start;
124 u32 key = vma->vm_pgoff << PAGE_SHIFT;
125 struct c4iw_rdev *rdev;
126 int ret = 0;
127 struct c4iw_mm_entry *mm;
128 struct c4iw_ucontext *ucontext;
129 u64 addr, paddr;
130
131 u64 va_regs_res = 0, va_udbs_res = 0;
132 u64 len_regs_res = 0, len_udbs_res = 0;
133
134 CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);
135
136 CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
137 vma->vm_pgoff, key, len);
138
139 if (vma->vm_start & (PAGE_SIZE-1)) {
140 CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
141 __func__, vma->vm_start, vma);
142 return -EINVAL;
143 }
144
145 rdev = &(to_c4iw_dev(context->device)->rdev);
146 ucontext = to_c4iw_ucontext(context);
147
148 mm = remove_mmap(ucontext, key, len);
149 if (!mm) {
150 CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
151 ucontext, key, len);
152 return -EINVAL;
153 }
154 addr = mm->addr;
155 kfree(mm);
156
157 va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
158 len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
159 va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
160 len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);
161
162 CTR6(KTR_IW_CXGBE,
163 "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
164 addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
165 va_udbs_res+len_udbs_res);
166
167 if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
168 CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
169 __func__, addr, va_regs_res, len_regs_res);
170 /*
171 * MA_SYNC register...
172 */
173 paddr = vtophys(addr);
174 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
175 ret = io_remap_pfn_range(vma, vma->vm_start,
176 paddr >> PAGE_SHIFT,
177 len, vma->vm_page_prot);
178 } else {
179
180 if (addr >= va_udbs_res && addr < va_udbs_res + len_udbs_res) {
181 /*
182 * Map user DB or OCQP memory...
183 */
184 paddr = vtophys(addr);
185 CTR4(KTR_IW_CXGBE,
186 "%s:6 USER DB-GTS addr %p region %p, reglen %u",
187 __func__, addr, va_udbs_res, len_udbs_res);
188#ifdef DOT5
189 if (is_t5(rdev->lldi.adapter_type) && map_udb_as_wc)
190 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
191 else
192#endif
193 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
194 ret = io_remap_pfn_range(vma, vma->vm_start,
195 paddr >> PAGE_SHIFT,
196 len, vma->vm_page_prot);
197 } else {
198 /*
199 * Map WQ or CQ contig dma memory...
200 */
201 CTR4(KTR_IW_CXGBE,
202 "%s:7 WQ/CQ addr %p vm_start %u vma %p", __func__,
203 addr, vma->vm_start, vma);
204 ret = io_remap_pfn_range(vma, vma->vm_start,
205 addr >> PAGE_SHIFT,
206 len, vma->vm_page_prot);
207 }
208 }
209 CTR4(KTR_IW_CXGBE, "%s:8 ctx %p vma %p ret %u", __func__, context, vma,
210 ret);
211 return ret;
212}
213
214static int
215c4iw_deallocate_pd(struct ib_pd *pd)
216{
217 struct c4iw_pd *php = to_c4iw_pd(pd);
218 struct c4iw_dev *rhp = php->rhp;
219
220 CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
221
222 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
223 mutex_lock(&rhp->rdev.stats.lock);
224 rhp->rdev.stats.pd.cur--;
225 mutex_unlock(&rhp->rdev.stats.lock);
226 kfree(php);
227
228 return (0);
229}
230
231static struct ib_pd *
232c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
233 struct ib_udata *udata)
234{
235 struct c4iw_pd *php;
236 u32 pdid;
237 struct c4iw_dev *rhp;
238
239 CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
240 context, udata);
241 rhp = (struct c4iw_dev *) ibdev;
242 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
243 if (!pdid)
244 return ERR_PTR(-EINVAL);
245 php = kzalloc(sizeof(*php), GFP_KERNEL);
246 if (!php) {
247 c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
248 return ERR_PTR(-ENOMEM);
249 }
250 php->pdid = pdid;
251 php->rhp = rhp;
252 if (context) {
253 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
254 c4iw_deallocate_pd(&php->ibpd);
255 return ERR_PTR(-EFAULT);
256 }
257 }
258 mutex_lock(&rhp->rdev.stats.lock);
259 rhp->rdev.stats.pd.cur++;
260 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
261 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
262 mutex_unlock(&rhp->rdev.stats.lock);
263
264 CTR6(KTR_IW_CXGBE,
265 "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
266 ibdev, context, udata, pdid, php);
267 return (&php->ibpd);
268}
269
270static int
271c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
272{
273
274 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
275 ibdev, port, index, pkey);
276
277 *pkey = 0;
278 return (0);
279}
280
281static int
282c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
283{
284 struct c4iw_dev *dev;
285 struct port_info *pi;
286 struct adapter *sc;
287
288 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
289 ibdev, port, index, gid);
290
291 memset(&gid->raw[0], 0, sizeof(gid->raw));
292 dev = to_c4iw_dev(ibdev);
293 sc = dev->rdev.adap;
294 if (port == 0 || port > sc->params.nports)
295 return (-EINVAL);
296 pi = sc->port[port - 1];
297 memcpy(&gid->raw[0], pi->hw_addr, sizeof(pi->hw_addr));
298 return (0);
299}
300
301static int
302c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
303{
304 struct c4iw_dev *dev = to_c4iw_dev(ibdev);
305 struct adapter *sc = dev->rdev.adap;
306
307 CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
308
309 memset(props, 0, sizeof *props);
310 memcpy(&props->sys_image_guid, sc->port[0]->hw_addr, 6);
311 props->hw_ver = sc->params.chipid;
312 props->fw_ver = sc->params.fw_vers;
313 props->device_cap_flags = dev->device_cap_flags;
314 props->page_size_cap = T4_PAGESIZE_MASK;
315 props->vendor_id = pci_get_vendor(sc->dev);
316 props->vendor_part_id = pci_get_device(sc->dev);
317 props->max_mr_size = T4_MAX_MR_SIZE;
318 props->max_qp = T4_MAX_NUM_QP;
319 props->max_qp_wr = T4_MAX_QP_DEPTH;
320 props->max_sge = T4_MAX_RECV_SGE;
321 props->max_sge_rd = 1;
322 props->max_qp_rd_atom = c4iw_max_read_depth;
323 props->max_qp_init_rd_atom = c4iw_max_read_depth;
324 props->max_cq = T4_MAX_NUM_CQ;
325 props->max_cqe = T4_MAX_CQ_DEPTH;
326 props->max_mr = c4iw_num_stags(&dev->rdev);
327 props->max_pd = T4_MAX_NUM_PD;
328 props->local_ca_ack_delay = 0;
329 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
330
331 return (0);
332}
333
334/*
335 * Returns -errno on failure.
336 */
337static int
338c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
339{
340 struct c4iw_dev *dev;
341 struct adapter *sc;
342 struct port_info *pi;
343 struct ifnet *ifp;
344
345 CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
346 port, props);
347
348 dev = to_c4iw_dev(ibdev);
349 sc = dev->rdev.adap;
350 if (port > sc->params.nports)
351 return (-EINVAL);
352 pi = sc->port[port - 1];
353 ifp = pi->ifp;
354
355 memset(props, 0, sizeof(struct ib_port_attr));
356 props->max_mtu = IB_MTU_4096;
357 if (ifp->if_mtu >= 4096)
358 props->active_mtu = IB_MTU_4096;
359 else if (ifp->if_mtu >= 2048)
360 props->active_mtu = IB_MTU_2048;
361 else if (ifp->if_mtu >= 1024)
362 props->active_mtu = IB_MTU_1024;
363 else if (ifp->if_mtu >= 512)
364 props->active_mtu = IB_MTU_512;
365 else
366 props->active_mtu = IB_MTU_256;
367 props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
368 props->port_cap_flags =
369 IB_PORT_CM_SUP |
370 IB_PORT_SNMP_TUNNEL_SUP |
371 IB_PORT_REINIT_SUP |
372 IB_PORT_DEVICE_MGMT_SUP |
373 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
374 props->gid_tbl_len = 1;
375 props->pkey_tbl_len = 1;
376 props->active_width = 2;
377 props->active_speed = 2;
378 props->max_msg_sz = -1;
379
380 return 0;
381}
382
383/*
384 * Returns -errno on error.
385 */
386int
387c4iw_register_device(struct c4iw_dev *dev)
388{
389 struct adapter *sc = dev->rdev.adap;
390 struct ib_device *ibdev = &dev->ibdev;
391 struct iw_cm_verbs *iwcm;
392 int ret;
393
394 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
395 BUG_ON(!sc->port[0]);
396 strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
397 memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
398 memcpy(&ibdev->node_guid, sc->port[0]->hw_addr, 6);
399 ibdev->owner = THIS_MODULE;
400 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
401 if (fastreg_support)
402 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
403 ibdev->local_dma_lkey = 0;
404 ibdev->uverbs_cmd_mask =
405 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
406 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
407 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
408 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
409 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
410 (1ull << IB_USER_VERBS_CMD_REG_MR) |
411 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
412 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
413 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
414 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
415 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
416 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
417 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
419 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
420 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
421 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
422 (1ull << IB_USER_VERBS_CMD_POST_RECV);
423 ibdev->node_type = RDMA_NODE_RNIC;
424 strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
425 ibdev->phys_port_cnt = sc->params.nports;
426 ibdev->num_comp_vectors = 1;
427 ibdev->dma_device = sc->dev;
428 ibdev->query_device = c4iw_query_device;
429 ibdev->query_port = c4iw_query_port;
430 ibdev->modify_port = c4iw_modify_port;
431 ibdev->query_pkey = c4iw_query_pkey;
432 ibdev->query_gid = c4iw_query_gid;
433 ibdev->alloc_ucontext = c4iw_alloc_ucontext;
434 ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
435 ibdev->mmap = c4iw_mmap;
436 ibdev->alloc_pd = c4iw_allocate_pd;
437 ibdev->dealloc_pd = c4iw_deallocate_pd;
438 ibdev->create_ah = c4iw_ah_create;
439 ibdev->destroy_ah = c4iw_ah_destroy;
440 ibdev->create_qp = c4iw_create_qp;
441 ibdev->modify_qp = c4iw_ib_modify_qp;
442 ibdev->query_qp = c4iw_ib_query_qp;
443 ibdev->destroy_qp = c4iw_destroy_qp;
444 ibdev->create_cq = c4iw_create_cq;
445 ibdev->destroy_cq = c4iw_destroy_cq;
446 ibdev->resize_cq = c4iw_resize_cq;
447 ibdev->poll_cq = c4iw_poll_cq;
448 ibdev->get_dma_mr = c4iw_get_dma_mr;
449 ibdev->reg_phys_mr = c4iw_register_phys_mem;
450 ibdev->rereg_phys_mr = c4iw_reregister_phys_mem;
451 ibdev->reg_user_mr = c4iw_reg_user_mr;
452 ibdev->dereg_mr = c4iw_dereg_mr;
453 ibdev->alloc_mw = c4iw_alloc_mw;
454 ibdev->bind_mw = c4iw_bind_mw;
455 ibdev->dealloc_mw = c4iw_dealloc_mw;
456 ibdev->alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
457 ibdev->alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
458 ibdev->free_fast_reg_page_list = c4iw_free_fastreg_pbl;
459 ibdev->attach_mcast = c4iw_multicast_attach;
460 ibdev->detach_mcast = c4iw_multicast_detach;
461 ibdev->process_mad = c4iw_process_mad;
462 ibdev->req_notify_cq = c4iw_arm_cq;
463 ibdev->post_send = c4iw_post_send;
464 ibdev->post_recv = c4iw_post_receive;
465 ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
466
467 iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
468 if (iwcm == NULL)
469 return (-ENOMEM);
470
471 iwcm->connect = c4iw_connect;
472 iwcm->accept = c4iw_accept_cr;
473 iwcm->reject = c4iw_reject_cr;
474 iwcm->create_listen = c4iw_create_listen;
475 iwcm->destroy_listen = c4iw_destroy_listen;
476 iwcm->add_ref = c4iw_qp_add_ref;
477 iwcm->rem_ref = c4iw_qp_rem_ref;
478 iwcm->get_qp = c4iw_get_qp;
479 ibdev->iwcm = iwcm;
480
481 ret = ib_register_device(&dev->ibdev, NULL);
482 if (ret)
483 kfree(iwcm);
484
485 return (ret);
486}
487
488void
489c4iw_unregister_device(struct c4iw_dev *dev)
490{
491
492 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
493 dev->rdev.adap);
494 ib_unregister_device(&dev->ibdev);
495 kfree(dev->ibdev.iwcm);
496 return;
497}
498#endif