1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * Copyright(c) 2020 Intel Corporation.
4 *
5 */
6
7/*
8 * This file contains HFI1 support for netdev RX functionality
9 */
10
11#include "sdma.h"
12#include "verbs.h"
13#include "netdev.h"
14#include "hfi.h"
15
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <rdma/ib_verbs.h>
19
20static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
21				  struct hfi1_ctxtdata *uctxt)
22{
23	unsigned int rcvctrl_ops;
24	struct hfi1_devdata *dd = rx->dd;
25	int ret;
26
27	uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
28	uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
29
30	/* Now allocate the RcvHdr queue and eager buffers. */
31	ret = hfi1_create_rcvhdrq(dd, uctxt);
32	if (ret)
33		goto done;
34
35	ret = hfi1_setup_eagerbufs(uctxt);
36	if (ret)
37		goto done;
38
39	clear_rcvhdrtail(uctxt);
40
41	rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
42	rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
43
44	if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
45		rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
46	if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
47		rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
48	if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
49		rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
50	if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
51		rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
52
53	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
54done:
55	return ret;
56}
57
58static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
59				     struct hfi1_ctxtdata **ctxt)
60{
61	struct hfi1_ctxtdata *uctxt;
62	int ret;
63
64	if (dd->flags & HFI1_FROZEN)
65		return -EIO;
66
67	ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
68	if (ret < 0) {
69		dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
70		return -ENOMEM;
71	}
72
73	uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
74		HFI1_CAP_KGET(NODROP_RHQ_FULL) |
75		HFI1_CAP_KGET(NODROP_EGR_FULL) |
76		HFI1_CAP_KGET(DMA_RTAIL);
77	/* Netdev contexts are always NO_RDMA_RTAIL */
78	uctxt->fast_handler = handle_receive_interrupt_napi_fp;
79	uctxt->slow_handler = handle_receive_interrupt_napi_sp;
80	hfi1_set_seq_cnt(uctxt, 1);
81	uctxt->is_vnic = true;
82
83	hfi1_stats.sps_ctxts++;
84
85	dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
86	*ctxt = uctxt;
87
88	return 0;
89}
90
91static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
92					struct hfi1_ctxtdata *uctxt)
93{
94	flush_wc();
95
96	/*
97	 * Disable receive context and interrupt available, reset all
98	 * RcvCtxtCtrl bits to default values.
99	 */
100	hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
101		     HFI1_RCVCTRL_TIDFLOW_DIS |
102		     HFI1_RCVCTRL_INTRAVAIL_DIS |
103		     HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
104		     HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
105		     HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
106
107	if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
108		msix_free_irq(dd, uctxt->msix_intr);
109
110	uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
111	uctxt->event_flags = 0;
112
113	hfi1_clear_tids(uctxt);
114	hfi1_clear_ctxt_pkey(dd, uctxt);
115
116	hfi1_stats.sps_ctxts--;
117
118	hfi1_free_ctxt(uctxt);
119}
120
121static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
122				  struct hfi1_ctxtdata **ctxt)
123{
124	int rc;
125	struct hfi1_devdata *dd = rx->dd;
126
127	rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
128	if (rc) {
129		dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
130		return rc;
131	}
132
133	rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
134	if (rc) {
135		dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
136		hfi1_netdev_deallocate_ctxt(dd, *ctxt);
137		*ctxt = NULL;
138	}
139
140	return rc;
141}
142
143/**
144 * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
145 * @dd: device on which to allocate netdev contexts
146 * @available_contexts: count of available receive contexts
147 * @cpu_mask: mask of possible cpus to include for contexts
148 *
149 * Return: count of physical cores on a node or the remaining available recv
150 * contexts for netdev recv context usage up to the maximum of
151 * HFI1_MAX_NETDEV_CTXTS.
152 * A value of 0 can be returned when acceleration is explicitly turned off,
153 * a memory allocation error occurs or when there are no available contexts.
154 *
155 */
156u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
157			     struct cpumask *cpu_mask)
158{
159	cpumask_var_t node_cpu_mask;
160	unsigned int available_cpus;
161
162	if (!HFI1_CAP_IS_KSET(AIP))
163		return 0;
164
165	/* Always give user contexts priority over netdev contexts */
166	if (available_contexts == 0) {
167		dd_dev_info(dd, "No receive contexts available for netdevs.\n");
168		return 0;
169	}
170
171	if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
172		dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
173		return 0;
174	}
175
176	cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
177
178	available_cpus = cpumask_weight(node_cpu_mask);
179
180	free_cpumask_var(node_cpu_mask);
181
182	return min3(available_cpus, available_contexts,
183		    (u32)HFI1_MAX_NETDEV_CTXTS);
184}
185
186static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
187{
188	int i;
189	int rc;
190	struct hfi1_devdata *dd = rx->dd;
191	struct net_device *dev = &rx->rx_napi;
192
193	rx->num_rx_q = dd->num_netdev_contexts;
194	rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
195			       GFP_KERNEL, dd->node);
196
197	if (!rx->rxq) {
198		dd_dev_err(dd, "Unable to allocate netdev queue data\n");
199		return (-ENOMEM);
200	}
201
202	for (i = 0; i < rx->num_rx_q; i++) {
203		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
204
205		rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
206		if (rc)
207			goto bail_context_irq_failure;
208
209		hfi1_rcd_get(rxq->rcd);
210		rxq->rx = rx;
211		rxq->rcd->napi = &rxq->napi;
212		dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
213			    i, rxq->rcd->ctxt);
214		/*
215		 * Disable BUSY_POLL on this NAPI as this is not supported
216		 * right now.
217		 */
218		set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
219		netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi);
220		rc = msix_netdev_request_rcd_irq(rxq->rcd);
221		if (rc)
222			goto bail_context_irq_failure;
223	}
224
225	return 0;
226
227bail_context_irq_failure:
228	dd_dev_err(dd, "Unable to allot receive context\n");
229	for (; i >= 0; i--) {
230		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
231
232		if (rxq->rcd) {
233			hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
234			hfi1_rcd_put(rxq->rcd);
235			rxq->rcd = NULL;
236		}
237	}
238	kfree(rx->rxq);
239	rx->rxq = NULL;
240
241	return rc;
242}
243
244static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
245{
246	int i;
247	struct hfi1_devdata *dd = rx->dd;
248
249	for (i = 0; i < rx->num_rx_q; i++) {
250		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
251
252		netif_napi_del(&rxq->napi);
253		hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
254		hfi1_rcd_put(rxq->rcd);
255		rxq->rcd = NULL;
256	}
257
258	kfree(rx->rxq);
259	rx->rxq = NULL;
260	rx->num_rx_q = 0;
261}
262
263static void enable_queues(struct hfi1_netdev_rx *rx)
264{
265	int i;
266
267	for (i = 0; i < rx->num_rx_q; i++) {
268		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
269
270		dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
271			    rxq->rcd->ctxt);
272		napi_enable(&rxq->napi);
273		hfi1_rcvctrl(rx->dd,
274			     HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
275			     rxq->rcd);
276	}
277}
278
279static void disable_queues(struct hfi1_netdev_rx *rx)
280{
281	int i;
282
283	msix_netdev_synchronize_irq(rx->dd);
284
285	for (i = 0; i < rx->num_rx_q; i++) {
286		struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
287
288		dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
289			    rxq->rcd->ctxt);
290
291		/* wait for napi if it was scheduled */
292		hfi1_rcvctrl(rx->dd,
293			     HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
294			     rxq->rcd);
295		napi_synchronize(&rxq->napi);
296		napi_disable(&rxq->napi);
297	}
298}
299
300/**
301 * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
302 * it allocates receive queue data and calls netif_napi_add
303 * for each queue.
304 *
305 * @dd: hfi1 dev data
306 */
307int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
308{
309	struct hfi1_netdev_rx *rx = dd->netdev_rx;
310	int res;
311
312	if (atomic_fetch_inc(&rx->netdevs))
313		return 0;
314
315	mutex_lock(&hfi1_mutex);
316	res = hfi1_netdev_rxq_init(rx);
317	mutex_unlock(&hfi1_mutex);
318	return res;
319}
320
321/**
322 * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
323 * napi is deleted and receive queses memory is freed.
324 *
325 * @dd: hfi1 dev data
326 */
327int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
328{
329	struct hfi1_netdev_rx *rx = dd->netdev_rx;
330
331	/* destroy the RX queues only if it is the last netdev going away */
332	if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
333		mutex_lock(&hfi1_mutex);
334		hfi1_netdev_rxq_deinit(rx);
335		mutex_unlock(&hfi1_mutex);
336	}
337
338	return 0;
339}
340
341/**
342 * hfi1_alloc_rx - Allocates the rx support structure
343 * @dd: hfi1 dev data
344 *
345 * Allocate the rx structure to support gathering the receive
346 * resources and the dummy netdev.
347 *
348 * Updates dd struct pointer upon success.
349 *
350 * Return: 0 (success) -error on failure
351 *
352 */
353int hfi1_alloc_rx(struct hfi1_devdata *dd)
354{
355	struct hfi1_netdev_rx *rx;
356
357	dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
358	rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
359
360	if (!rx)
361		return -ENOMEM;
362	rx->dd = dd;
363	init_dummy_netdev(&rx->rx_napi);
364
365	xa_init(&rx->dev_tbl);
366	atomic_set(&rx->enabled, 0);
367	atomic_set(&rx->netdevs, 0);
368	dd->netdev_rx = rx;
369
370	return 0;
371}
372
373void hfi1_free_rx(struct hfi1_devdata *dd)
374{
375	if (dd->netdev_rx) {
376		dd_dev_info(dd, "hfi1 rx freed\n");
377		kfree(dd->netdev_rx);
378		dd->netdev_rx = NULL;
379	}
380}
381
382/**
383 * hfi1_netdev_enable_queues - This is napi enable function.
384 * It enables napi objects associated with queues.
385 * When at least one device has called it it increments atomic counter.
386 * Disable function decrements counter and when it is 0,
387 * calls napi_disable for every queue.
388 *
389 * @dd: hfi1 dev data
390 */
391void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
392{
393	struct hfi1_netdev_rx *rx;
394
395	if (!dd->netdev_rx)
396		return;
397
398	rx = dd->netdev_rx;
399	if (atomic_fetch_inc(&rx->enabled))
400		return;
401
402	mutex_lock(&hfi1_mutex);
403	enable_queues(rx);
404	mutex_unlock(&hfi1_mutex);
405}
406
407void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
408{
409	struct hfi1_netdev_rx *rx;
410
411	if (!dd->netdev_rx)
412		return;
413
414	rx = dd->netdev_rx;
415	if (atomic_dec_if_positive(&rx->enabled))
416		return;
417
418	mutex_lock(&hfi1_mutex);
419	disable_queues(rx);
420	mutex_unlock(&hfi1_mutex);
421}
422
423/**
424 * hfi1_netdev_add_data - Registers data with unique identifier
425 * to be requested later this is needed for VNIC and IPoIB VLANs
426 * implementations.
427 * This call is protected by mutex idr_lock.
428 *
429 * @dd: hfi1 dev data
430 * @id: requested integer id up to INT_MAX
431 * @data: data to be associated with index
432 */
433int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
434{
435	struct hfi1_netdev_rx *rx = dd->netdev_rx;
436
437	return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
438}
439
440/**
441 * hfi1_netdev_remove_data - Removes data with previously given id.
442 * Returns the reference to removed entry.
443 *
444 * @dd: hfi1 dev data
445 * @id: requested integer id up to INT_MAX
446 */
447void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
448{
449	struct hfi1_netdev_rx *rx = dd->netdev_rx;
450
451	return xa_erase(&rx->dev_tbl, id);
452}
453
454/**
455 * hfi1_netdev_get_data - Gets data with given id
456 *
457 * @dd: hfi1 dev data
458 * @id: requested integer id up to INT_MAX
459 */
460void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
461{
462	struct hfi1_netdev_rx *rx = dd->netdev_rx;
463
464	return xa_load(&rx->dev_tbl, id);
465}
466
467/**
468 * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
469 *
470 * @dd: hfi1 dev data
471 * @start_id: requested integer id up to INT_MAX
472 */
473void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
474{
475	struct hfi1_netdev_rx *rx = dd->netdev_rx;
476	unsigned long index = *start_id;
477	void *ret;
478
479	ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
480	*start_id = (int)index;
481	return ret;
482}
483