1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9				      struct net_device *ndev,
10				      mana_handle_t default_rxobj,
11				      mana_handle_t ind_table[],
12				      u32 log_ind_tbl_size, u32 rx_hash_key_len,
13				      u8 *rx_hash_key)
14{
15	struct mana_port_context *mpc = netdev_priv(ndev);
16	struct mana_cfg_rx_steer_req_v2 *req;
17	struct mana_cfg_rx_steer_resp resp = {};
18	mana_handle_t *req_indir_tab;
19	struct gdma_context *gc;
20	u32 req_buf_size;
21	int i, err;
22
23	gc = mdev_to_gc(dev);
24
25	req_buf_size =
26		sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
27	req = kzalloc(req_buf_size, GFP_KERNEL);
28	if (!req)
29		return -ENOMEM;
30
31	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
32			     sizeof(resp));
33
34	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
35
36	req->vport = mpc->port_handle;
37	req->rx_enable = 1;
38	req->update_default_rxobj = 1;
39	req->default_rxobj = default_rxobj;
40	req->hdr.dev_id = gc->mana.dev_id;
41
42	/* If there are more than 1 entries in indirection table, enable RSS */
43	if (log_ind_tbl_size)
44		req->rss_enable = true;
45
46	req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
47	req->indir_tab_offset = sizeof(*req);
48	req->update_indir_tab = true;
49	req->cqe_coalescing_enable = 1;
50
51	req_indir_tab = (mana_handle_t *)(req + 1);
52	/* The ind table passed to the hardware must have
53	 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
54	 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
55	 */
56	ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
57	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
58		req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
59		ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
60			  req_indir_tab[i]);
61	}
62
63	req->update_hashkey = true;
64	if (rx_hash_key_len)
65		memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
66	else
67		netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
68
69	ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
70		  req->vport, default_rxobj);
71
72	err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
73	if (err) {
74		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
75		goto out;
76	}
77
78	if (resp.hdr.status) {
79		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
80			   resp.hdr.status);
81		err = -EPROTO;
82		goto out;
83	}
84
85	netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
86		    mpc->port_handle, log_ind_tbl_size);
87
88out:
89	kfree(req);
90	return err;
91}
92
93static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
94				 struct ib_qp_init_attr *attr,
95				 struct ib_udata *udata)
96{
97	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
98	struct mana_ib_dev *mdev =
99		container_of(pd->device, struct mana_ib_dev, ib_dev);
100	struct gdma_context *gc = mdev_to_gc(mdev);
101	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
102	struct mana_ib_create_qp_rss_resp resp = {};
103	struct mana_ib_create_qp_rss ucmd = {};
104	struct gdma_queue **gdma_cq_allocated;
105	mana_handle_t *mana_ind_table;
106	struct mana_port_context *mpc;
107	unsigned int ind_tbl_size;
108	struct net_device *ndev;
109	struct mana_ib_cq *cq;
110	struct mana_ib_wq *wq;
111	struct mana_eq *eq;
112	struct ib_cq *ibcq;
113	struct ib_wq *ibwq;
114	int i = 0;
115	u32 port;
116	int ret;
117
118	if (!udata || udata->inlen < sizeof(ucmd))
119		return -EINVAL;
120
121	ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
122	if (ret) {
123		ibdev_dbg(&mdev->ib_dev,
124			  "Failed copy from udata for create rss-qp, err %d\n",
125			  ret);
126		return ret;
127	}
128
129	if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
130		ibdev_dbg(&mdev->ib_dev,
131			  "Requested max_recv_wr %d exceeding limit\n",
132			  attr->cap.max_recv_wr);
133		return -EINVAL;
134	}
135
136	if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
137		ibdev_dbg(&mdev->ib_dev,
138			  "Requested max_recv_sge %d exceeding limit\n",
139			  attr->cap.max_recv_sge);
140		return -EINVAL;
141	}
142
143	ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
144	if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
145		ibdev_dbg(&mdev->ib_dev,
146			  "Indirect table size %d exceeding limit\n",
147			  ind_tbl_size);
148		return -EINVAL;
149	}
150
151	if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
152		ibdev_dbg(&mdev->ib_dev,
153			  "RX Hash function is not supported, %d\n",
154			  ucmd.rx_hash_function);
155		return -EINVAL;
156	}
157
158	/* IB ports start with 1, MANA start with 0 */
159	port = ucmd.port;
160	ndev = mana_ib_get_netdev(pd->device, port);
161	if (!ndev) {
162		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
163			  port);
164		return -EINVAL;
165	}
166	mpc = netdev_priv(ndev);
167
168	ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
169		  ucmd.rx_hash_function, port);
170
171	mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
172				 GFP_KERNEL);
173	if (!mana_ind_table) {
174		ret = -ENOMEM;
175		goto fail;
176	}
177
178	gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
179				    GFP_KERNEL);
180	if (!gdma_cq_allocated) {
181		ret = -ENOMEM;
182		goto fail;
183	}
184
185	qp->port = port;
186
187	for (i = 0; i < ind_tbl_size; i++) {
188		struct mana_obj_spec wq_spec = {};
189		struct mana_obj_spec cq_spec = {};
190
191		ibwq = ind_tbl->ind_tbl[i];
192		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
193
194		ibcq = ibwq->cq;
195		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
196
197		wq_spec.gdma_region = wq->gdma_region;
198		wq_spec.queue_size = wq->wq_buf_size;
199
200		cq_spec.gdma_region = cq->gdma_region;
201		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
202		cq_spec.modr_ctx_id = 0;
203		eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
204		cq_spec.attached_eq = eq->eq->id;
205
206		ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
207					 &wq_spec, &cq_spec, &wq->rx_object);
208		if (ret) {
209			/* Do cleanup starting with index i-1 */
210			i--;
211			goto fail;
212		}
213
214		/* The GDMA regions are now owned by the WQ object */
215		wq->gdma_region = GDMA_INVALID_DMA_REGION;
216		cq->gdma_region = GDMA_INVALID_DMA_REGION;
217
218		wq->id = wq_spec.queue_index;
219		cq->id = cq_spec.queue_index;
220
221		ibdev_dbg(&mdev->ib_dev,
222			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
223			  ret, wq->rx_object, wq->id, cq->id);
224
225		resp.entries[i].cqid = cq->id;
226		resp.entries[i].wqid = wq->id;
227
228		mana_ind_table[i] = wq->rx_object;
229
230		/* Create CQ table entry */
231		ret = mana_ib_install_cq_cb(mdev, cq);
232		if (ret)
233			goto fail;
234
235		gdma_cq_allocated[i] = gc->cq_table[cq->id];
236	}
237	resp.num_entries = i;
238
239	ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
240					 mana_ind_table,
241					 ind_tbl->log_ind_tbl_size,
242					 ucmd.rx_hash_key_len,
243					 ucmd.rx_hash_key);
244	if (ret)
245		goto fail;
246
247	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
248	if (ret) {
249		ibdev_dbg(&mdev->ib_dev,
250			  "Failed to copy to udata create rss-qp, %d\n",
251			  ret);
252		goto fail;
253	}
254
255	kfree(gdma_cq_allocated);
256	kfree(mana_ind_table);
257
258	return 0;
259
260fail:
261	while (i-- > 0) {
262		ibwq = ind_tbl->ind_tbl[i];
263		ibcq = ibwq->cq;
264		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
265		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
266
267		gc->cq_table[cq->id] = NULL;
268		kfree(gdma_cq_allocated[i]);
269
270		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
271	}
272
273	kfree(gdma_cq_allocated);
274	kfree(mana_ind_table);
275
276	return ret;
277}
278
279static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
280				 struct ib_qp_init_attr *attr,
281				 struct ib_udata *udata)
282{
283	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
284	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
285	struct mana_ib_dev *mdev =
286		container_of(ibpd->device, struct mana_ib_dev, ib_dev);
287	struct mana_ib_cq *send_cq =
288		container_of(attr->send_cq, struct mana_ib_cq, ibcq);
289	struct mana_ib_ucontext *mana_ucontext =
290		rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
291					  ibucontext);
292	struct gdma_context *gc = mdev_to_gc(mdev);
293	struct mana_ib_create_qp_resp resp = {};
294	struct mana_ib_create_qp ucmd = {};
295	struct gdma_queue *gdma_cq = NULL;
296	struct mana_obj_spec wq_spec = {};
297	struct mana_obj_spec cq_spec = {};
298	struct mana_port_context *mpc;
299	struct net_device *ndev;
300	struct ib_umem *umem;
301	struct mana_eq *eq;
302	int eq_vec;
303	u32 port;
304	int err;
305
306	if (!mana_ucontext || udata->inlen < sizeof(ucmd))
307		return -EINVAL;
308
309	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
310	if (err) {
311		ibdev_dbg(&mdev->ib_dev,
312			  "Failed to copy from udata create qp-raw, %d\n", err);
313		return err;
314	}
315
316	if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
317		ibdev_dbg(&mdev->ib_dev,
318			  "Requested max_send_wr %d exceeding limit\n",
319			  attr->cap.max_send_wr);
320		return -EINVAL;
321	}
322
323	if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
324		ibdev_dbg(&mdev->ib_dev,
325			  "Requested max_send_sge %d exceeding limit\n",
326			  attr->cap.max_send_sge);
327		return -EINVAL;
328	}
329
330	port = ucmd.port;
331	ndev = mana_ib_get_netdev(ibpd->device, port);
332	if (!ndev) {
333		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
334			  port);
335		return -EINVAL;
336	}
337	mpc = netdev_priv(ndev);
338	ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
339
340	err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
341	if (err)
342		return -ENODEV;
343
344	qp->port = port;
345
346	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
347		  ucmd.sq_buf_addr, ucmd.port);
348
349	umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
350			   IB_ACCESS_LOCAL_WRITE);
351	if (IS_ERR(umem)) {
352		err = PTR_ERR(umem);
353		ibdev_dbg(&mdev->ib_dev,
354			  "Failed to get umem for create qp-raw, err %d\n",
355			  err);
356		goto err_free_vport;
357	}
358	qp->sq_umem = umem;
359
360	err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
361						    &qp->sq_gdma_region);
362	if (err) {
363		ibdev_dbg(&mdev->ib_dev,
364			  "Failed to create dma region for create qp-raw, %d\n",
365			  err);
366		goto err_release_umem;
367	}
368
369	ibdev_dbg(&mdev->ib_dev,
370		  "create_dma_region ret %d gdma_region 0x%llx\n",
371		  err, qp->sq_gdma_region);
372
373	/* Create a WQ on the same port handle used by the Ethernet */
374	wq_spec.gdma_region = qp->sq_gdma_region;
375	wq_spec.queue_size = ucmd.sq_buf_size;
376
377	cq_spec.gdma_region = send_cq->gdma_region;
378	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
379	cq_spec.modr_ctx_id = 0;
380	eq_vec = send_cq->comp_vector % gc->max_num_queues;
381	eq = &mpc->ac->eqs[eq_vec];
382	cq_spec.attached_eq = eq->eq->id;
383
384	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
385				 &cq_spec, &qp->tx_object);
386	if (err) {
387		ibdev_dbg(&mdev->ib_dev,
388			  "Failed to create wq for create raw-qp, err %d\n",
389			  err);
390		goto err_destroy_dma_region;
391	}
392
393	/* The GDMA regions are now owned by the WQ object */
394	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
395	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
396
397	qp->sq_id = wq_spec.queue_index;
398	send_cq->id = cq_spec.queue_index;
399
400	/* Create CQ table entry */
401	err = mana_ib_install_cq_cb(mdev, send_cq);
402	if (err)
403		goto err_destroy_wq_obj;
404
405	ibdev_dbg(&mdev->ib_dev,
406		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
407		  qp->tx_object, qp->sq_id, send_cq->id);
408
409	resp.sqid = qp->sq_id;
410	resp.cqid = send_cq->id;
411	resp.tx_vp_offset = pd->tx_vp_offset;
412
413	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
414	if (err) {
415		ibdev_dbg(&mdev->ib_dev,
416			  "Failed copy udata for create qp-raw, %d\n",
417			  err);
418		goto err_release_gdma_cq;
419	}
420
421	return 0;
422
423err_release_gdma_cq:
424	kfree(gdma_cq);
425	gc->cq_table[send_cq->id] = NULL;
426
427err_destroy_wq_obj:
428	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
429
430err_destroy_dma_region:
431	mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
432
433err_release_umem:
434	ib_umem_release(umem);
435
436err_free_vport:
437	mana_ib_uncfg_vport(mdev, pd, port);
438
439	return err;
440}
441
442int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
443		      struct ib_udata *udata)
444{
445	switch (attr->qp_type) {
446	case IB_QPT_RAW_PACKET:
447		/* When rwq_ind_tbl is used, it's for creating WQs for RSS */
448		if (attr->rwq_ind_tbl)
449			return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
450						     udata);
451
452		return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
453	default:
454		/* Creating QP other than IB_QPT_RAW_PACKET is not supported */
455		ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
456			  attr->qp_type);
457	}
458
459	return -EINVAL;
460}
461
462int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
463		      int attr_mask, struct ib_udata *udata)
464{
465	/* modify_qp is not supported by this version of the driver */
466	return -EOPNOTSUPP;
467}
468
469static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
470				  struct ib_rwq_ind_table *ind_tbl,
471				  struct ib_udata *udata)
472{
473	struct mana_ib_dev *mdev =
474		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
475	struct mana_port_context *mpc;
476	struct net_device *ndev;
477	struct mana_ib_wq *wq;
478	struct ib_wq *ibwq;
479	int i;
480
481	ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
482	mpc = netdev_priv(ndev);
483
484	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
485		ibwq = ind_tbl->ind_tbl[i];
486		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
487		ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
488			  wq->rx_object);
489		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
490	}
491
492	return 0;
493}
494
495static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
496{
497	struct mana_ib_dev *mdev =
498		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
499	struct ib_pd *ibpd = qp->ibqp.pd;
500	struct mana_port_context *mpc;
501	struct net_device *ndev;
502	struct mana_ib_pd *pd;
503
504	ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
505	mpc = netdev_priv(ndev);
506	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
507
508	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
509
510	if (qp->sq_umem) {
511		mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
512		ib_umem_release(qp->sq_umem);
513	}
514
515	mana_ib_uncfg_vport(mdev, pd, qp->port);
516
517	return 0;
518}
519
520int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
521{
522	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
523
524	switch (ibqp->qp_type) {
525	case IB_QPT_RAW_PACKET:
526		if (ibqp->rwq_ind_tbl)
527			return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
528						      udata);
529
530		return mana_ib_destroy_qp_raw(qp, udata);
531
532	default:
533		ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
534			  ibqp->qp_type);
535	}
536
537	return -ENOENT;
538}
539