device.c revision 331769
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *	  copyright notice, this list of conditions and the following
16 *	  disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *	  copyright notice, this list of conditions and the following
20 *	  disclaimer in the documentation and/or other materials
21 *	  provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/device.c 331769 2018-03-30 18:06:29Z hselasky $");
34
35#include "opt_inet.h"
36
37#include <sys/ktr.h>
38
39#include <linux/module.h>
40#include <linux/moduleparam.h>
41
42#include <rdma/ib_verbs.h>
43#include <linux/idr.h>
44
45#ifdef TCP_OFFLOAD
46#include "iw_cxgbe.h"
47
48void
49c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
50    struct c4iw_dev_ucontext *uctx)
51{
52	struct list_head *pos, *nxt;
53	struct c4iw_qid_list *entry;
54
55	mutex_lock(&uctx->lock);
56	list_for_each_safe(pos, nxt, &uctx->qpids) {
57		entry = list_entry(pos, struct c4iw_qid_list, entry);
58		list_del_init(&entry->entry);
59		if (!(entry->qid & rdev->qpmask)) {
60			c4iw_put_resource(&rdev->resource.qid_table,
61					  entry->qid);
62			mutex_lock(&rdev->stats.lock);
63			rdev->stats.qid.cur -= rdev->qpmask + 1;
64			mutex_unlock(&rdev->stats.lock);
65		}
66		kfree(entry);
67	}
68
69	list_for_each_safe(pos, nxt, &uctx->cqids) {
70		entry = list_entry(pos, struct c4iw_qid_list, entry);
71		list_del_init(&entry->entry);
72		kfree(entry);
73	}
74	mutex_unlock(&uctx->lock);
75}
76
77void
78c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
79{
80
81	INIT_LIST_HEAD(&uctx->qpids);
82	INIT_LIST_HEAD(&uctx->cqids);
83	mutex_init(&uctx->lock);
84}
85
86static int
87c4iw_rdev_open(struct c4iw_rdev *rdev)
88{
89	struct adapter *sc = rdev->adap;
90	struct sge_params *sp = &sc->params.sge;
91	int rc;
92	unsigned short ucq_density = 1 << sp->iq_s_qpp; /* # of user CQs/page */
93	unsigned short udb_density = 1 << sp->eq_s_qpp; /* # of user DB/page */
94
95
96	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
97
98	/*
99	 * This implementation assumes udb_density == ucq_density!  Eventually
100	 * we might need to support this but for now fail the open. Also the
101	 * cqid and qpid range must match for now.
102	 */
103	if (udb_density != ucq_density) {
104		device_printf(sc->dev, "unsupported udb/ucq densities %u/%u\n",
105		    udb_density, ucq_density);
106		rc = -EINVAL;
107		goto err1;
108	}
109	if (sc->vres.qp.start != sc->vres.cq.start ||
110	    sc->vres.qp.size != sc->vres.cq.size) {
111		device_printf(sc->dev, "%s: unsupported qp and cq id ranges "
112			"qp start %u size %u cq start %u size %u\n", __func__,
113			sc->vres.qp.start, sc->vres.qp.size, sc->vres.cq.start,
114			sc->vres.cq.size);
115		rc = -EINVAL;
116		goto err1;
117	}
118
119	rdev->qpshift = PAGE_SHIFT - sp->eq_s_qpp;
120	rdev->qpmask = udb_density - 1;
121	rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp;
122	rdev->cqmask = ucq_density - 1;
123	CTR5(KTR_IW_CXGBE, "%s dev %s stag start 0x%0x size 0x%0x num stags %d",
124		__func__, device_get_nameunit(sc->dev), sc->vres.stag.start,
125		sc->vres.stag.size, c4iw_num_stags(rdev));
126	CTR5(KTR_IW_CXGBE, "%s pbl start 0x%0x size 0x%0x"
127			" rq start 0x%0x size 0x%0x", __func__,
128			sc->vres.pbl.start, sc->vres.pbl.size,
129			sc->vres.rq.start, sc->vres.rq.size);
130	CTR5(KTR_IW_CXGBE, "%s:qp qid start %u size %u cq qid start %u size %u",
131			 __func__, sc->vres.qp.start, sc->vres.qp.size,
132			 sc->vres.cq.start, sc->vres.cq.size);
133	/*TODO
134	CTR5(KTR_IW_CXGBE, "%s udb %pR db_reg %p gts_reg %p"
135			"qpmask 0x%x cqmask 0x%x", __func__,
136			db_reg,gts_reg,rdev->qpmask, rdev->cqmask);
137			*/
138
139
140
141	if (c4iw_num_stags(rdev) == 0) {
142		rc = -EINVAL;
143		goto err1;
144	}
145
146	rdev->stats.pd.total = T4_MAX_NUM_PD;
147	rdev->stats.stag.total = sc->vres.stag.size;
148	rdev->stats.pbl.total = sc->vres.pbl.size;
149	rdev->stats.rqt.total = sc->vres.rq.size;
150	rdev->stats.qid.total = sc->vres.qp.size;
151
152	rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
153	if (rc) {
154		device_printf(sc->dev, "error %d initializing resources\n", rc);
155		goto err1;
156	}
157	rc = c4iw_pblpool_create(rdev);
158	if (rc) {
159		device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
160		goto err2;
161	}
162	rc = c4iw_rqtpool_create(rdev);
163	if (rc) {
164		device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
165		goto err3;
166	}
167	rdev->status_page = (struct t4_dev_status_page *)
168				__get_free_page(GFP_KERNEL);
169	if (!rdev->status_page) {
170		rc = -ENOMEM;
171		goto err4;
172	}
173	rdev->status_page->qp_start = sc->vres.qp.start;
174	rdev->status_page->qp_size = sc->vres.qp.size;
175	rdev->status_page->cq_start = sc->vres.cq.start;
176	rdev->status_page->cq_size = sc->vres.cq.size;
177
178	/* T5 and above devices don't need Doorbell recovery logic,
179	 * so db_off is always set to '0'.
180	 */
181	rdev->status_page->db_off = 0;
182
183	rdev->status_page->wc_supported = rdev->adap->iwt.wc_en;
184
185	rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
186	if (!rdev->free_workq) {
187		rc = -ENOMEM;
188		goto err5;
189	}
190	return (0);
191err5:
192	free_page((unsigned long)rdev->status_page);
193err4:
194	c4iw_rqtpool_destroy(rdev);
195err3:
196	c4iw_pblpool_destroy(rdev);
197err2:
198	c4iw_destroy_resource(&rdev->resource);
199err1:
200	return (rc);
201}
202
203static void c4iw_rdev_close(struct c4iw_rdev *rdev)
204{
205	free_page((unsigned long)rdev->status_page);
206	c4iw_pblpool_destroy(rdev);
207	c4iw_rqtpool_destroy(rdev);
208	c4iw_destroy_resource(&rdev->resource);
209}
210
211static void
212c4iw_dealloc(struct c4iw_dev *iwsc)
213{
214
215	c4iw_rdev_close(&iwsc->rdev);
216	idr_destroy(&iwsc->cqidr);
217	idr_destroy(&iwsc->qpidr);
218	idr_destroy(&iwsc->mmidr);
219	ib_dealloc_device(&iwsc->ibdev);
220}
221
222static struct c4iw_dev *
223c4iw_alloc(struct adapter *sc)
224{
225	struct c4iw_dev *iwsc;
226	int rc;
227
228	iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc));
229	if (iwsc == NULL) {
230		device_printf(sc->dev, "Cannot allocate ib device.\n");
231		return (ERR_PTR(-ENOMEM));
232	}
233	iwsc->rdev.adap = sc;
234
235	/* init various hw-queue params based on lld info */
236	CTR3(KTR_IW_CXGBE, "%s: Ing. padding boundary is %d, "
237			"egrsstatuspagesize = %d", __func__,
238			sc->params.sge.pad_boundary,
239			sc->params.sge.spg_len);
240
241	iwsc->rdev.hw_queue.t4_eq_status_entries =
242		sc->params.sge.spg_len / EQ_ESIZE;
243	iwsc->rdev.hw_queue.t4_max_eq_size = 65520;
244	iwsc->rdev.hw_queue.t4_max_iq_size = 65520;
245	iwsc->rdev.hw_queue.t4_max_rq_size = 8192 -
246		iwsc->rdev.hw_queue.t4_eq_status_entries - 1;
247	iwsc->rdev.hw_queue.t4_max_sq_size =
248		iwsc->rdev.hw_queue.t4_max_eq_size -
249		iwsc->rdev.hw_queue.t4_eq_status_entries - 1;
250	iwsc->rdev.hw_queue.t4_max_qp_depth =
251		iwsc->rdev.hw_queue.t4_max_rq_size;
252	iwsc->rdev.hw_queue.t4_max_cq_depth =
253		iwsc->rdev.hw_queue.t4_max_iq_size - 2;
254	iwsc->rdev.hw_queue.t4_stat_len = iwsc->rdev.adap->params.sge.spg_len;
255
256	/* As T5 and above devices support BAR2 kernel doorbells & WC, we map
257	 * all of BAR2, for both User and Kernel Doorbells-GTS.
258	 */
259	iwsc->rdev.bar2_kva = (void __iomem *)((u64)iwsc->rdev.adap->udbs_base);
260	iwsc->rdev.bar2_pa = vtophys(iwsc->rdev.adap->udbs_base);
261	iwsc->rdev.bar2_len = rman_get_size(iwsc->rdev.adap->udbs_res);
262
263	rc = c4iw_rdev_open(&iwsc->rdev);
264	if (rc != 0) {
265		device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc);
266		ib_dealloc_device(&iwsc->ibdev);
267		return (ERR_PTR(rc));
268	}
269
270	idr_init(&iwsc->cqidr);
271	idr_init(&iwsc->qpidr);
272	idr_init(&iwsc->mmidr);
273	spin_lock_init(&iwsc->lock);
274	mutex_init(&iwsc->rdev.stats.lock);
275	iwsc->avail_ird = iwsc->rdev.adap->params.max_ird_adapter;
276
277	return (iwsc);
278}
279
280static int c4iw_mod_load(void);
281static int c4iw_mod_unload(void);
282static int c4iw_activate(struct adapter *);
283static int c4iw_deactivate(struct adapter *);
284
285static struct uld_info c4iw_uld_info = {
286	.uld_id = ULD_IWARP,
287	.activate = c4iw_activate,
288	.deactivate = c4iw_deactivate,
289};
290
291static int
292c4iw_activate(struct adapter *sc)
293{
294	struct c4iw_dev *iwsc;
295	int rc;
296
297	ASSERT_SYNCHRONIZED_OP(sc);
298
299	if (is_t4(sc)) {
300		device_printf(sc->dev, "No iWARP support for T4 devices, "
301				"please install T5 or above devices.\n");
302		return (ENOSYS);
303	}
304
305	if (uld_active(sc, ULD_IWARP)) {
306		KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
307		return (0);
308	}
309
310	if (sc->rdmacaps == 0) {
311		device_printf(sc->dev,
312		    "RDMA not supported or RDMA cap is not enabled.\n");
313		return (ENOSYS);
314	}
315
316	iwsc = c4iw_alloc(sc);
317	if (IS_ERR(iwsc)) {
318		rc = -PTR_ERR(iwsc);
319		device_printf(sc->dev, "initialization failed: %d\n", rc);
320		return (rc);
321	}
322
323	sc->iwarp_softc = iwsc;
324
325	rc = -c4iw_register_device(iwsc);
326	if (rc) {
327		device_printf(sc->dev, "RDMA registration failed: %d\n", rc);
328		c4iw_dealloc(iwsc);
329		sc->iwarp_softc = NULL;
330	}
331
332	return (rc);
333}
334
335static int
336c4iw_deactivate(struct adapter *sc)
337{
338	struct c4iw_dev *iwsc = sc->iwarp_softc;
339
340	ASSERT_SYNCHRONIZED_OP(sc);
341
342	c4iw_unregister_device(iwsc);
343	c4iw_dealloc(iwsc);
344	sc->iwarp_softc = NULL;
345
346	return (0);
347}
348
349static void
350c4iw_activate_all(struct adapter *sc, void *arg __unused)
351{
352
353	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
354		return;
355
356	/* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */
357	if (sc->offload_map && !uld_active(sc, ULD_IWARP))
358		(void) t4_activate_uld(sc, ULD_IWARP);
359
360	end_synchronized_op(sc, 0);
361}
362
363static void
364c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
365{
366
367	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
368		return;
369
370	if (uld_active(sc, ULD_IWARP))
371	    (void) t4_deactivate_uld(sc, ULD_IWARP);
372
373	end_synchronized_op(sc, 0);
374}
375
376static int
377c4iw_mod_load(void)
378{
379	int rc;
380
381	rc = -c4iw_cm_init();
382	if (rc != 0)
383		return (rc);
384
385	rc = t4_register_uld(&c4iw_uld_info);
386	if (rc != 0) {
387		c4iw_cm_term();
388		return (rc);
389	}
390
391	t4_iterate(c4iw_activate_all, NULL);
392
393	return (rc);
394}
395
396static int
397c4iw_mod_unload(void)
398{
399
400	t4_iterate(c4iw_deactivate_all, NULL);
401
402	c4iw_cm_term();
403
404	if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
405		return (EBUSY);
406
407	return (0);
408}
409
410#endif
411
412/*
413 * t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency
414 * on t4_tom ensures that it won't either.  So we don't directly check for
415 * TCP_OFFLOAD here.
416 */
417static int
418c4iw_modevent(module_t mod, int cmd, void *arg)
419{
420	int rc = 0;
421
422#ifdef TCP_OFFLOAD
423	switch (cmd) {
424	case MOD_LOAD:
425		rc = c4iw_mod_load();
426		if (rc == 0)
427			printf("iw_cxgbe: Chelsio T5/T6 RDMA driver loaded.\n");
428		break;
429
430	case MOD_UNLOAD:
431		rc = c4iw_mod_unload();
432		break;
433
434	default:
435		rc = EINVAL;
436	}
437#else
438	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
439	rc = EOPNOTSUPP;
440#endif
441	return (rc);
442}
443
444static moduledata_t c4iw_mod_data = {
445	"iw_cxgbe",
446	c4iw_modevent,
447	0
448};
449
450MODULE_VERSION(iw_cxgbe, 1);
451MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1);
452MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1);
453MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1);
454MODULE_DEPEND(iw_cxgbe, linuxkpi, 1, 1, 1);
455DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);
456