1/******************************************************************************
2
3  Copyright (c) 2013-2019, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixl/ixl_iw.c 349163 2019-06-18 00:08:02Z erj $*/
34
35#include "ixl.h"
36#include "ixl_pf.h"
37#include "ixl_iw.h"
38#include "ixl_iw_int.h"
39
40#ifdef	IXL_IW
41
42#define IXL_IW_VEC_BASE(pf)	((pf)->msix - (pf)->iw_msix)
43#define IXL_IW_VEC_COUNT(pf)	((pf)->iw_msix)
44#define IXL_IW_VEC_LIMIT(pf)	((pf)->msix)
45
46extern int ixl_enable_iwarp;
47
48static struct ixl_iw_state ixl_iw;
49static int ixl_iw_ref_cnt;
50
51static void
52ixl_iw_pf_msix_reset(struct ixl_pf *pf)
53{
54	struct i40e_hw *hw = &pf->hw;
55	u32 reg;
56	int vec;
57
58	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
59		reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
60		wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
61	}
62
63	return;
64}
65
66static void
67ixl_iw_invoke_op(void *context, int pending)
68{
69	struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
70	struct ixl_iw_pf info;
71	bool initialize;
72	int err;
73
74	INIT_DEBUGOUT("begin");
75
76	mtx_lock(&ixl_iw.mtx);
77	if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
78	    (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
79		initialize = true;
80	else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
81	         (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
82		initialize = false;
83	else {
84		/* nothing to be done, so finish here */
85		mtx_unlock(&ixl_iw.mtx);
86		return;
87	}
88	info = pf_entry->pf_info;
89	mtx_unlock(&ixl_iw.mtx);
90
91	if (initialize) {
92		err = ixl_iw.ops->init(&info);
93		if (err)
94			device_printf(pf_entry->pf->dev,
95				"%s: failed to initialize iwarp (err %d)\n",
96				__func__, err);
97		else
98			pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
99	} else {
100		err = ixl_iw.ops->stop(&info);
101		if (err)
102			device_printf(pf_entry->pf->dev,
103				"%s: failed to stop iwarp (err %d)\n",
104				__func__, err);
105		else {
106			ixl_iw_pf_msix_reset(pf_entry->pf);
107			pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
108		}
109	}
110	return;
111}
112
113static void
114ixl_iw_uninit(void)
115{
116	INIT_DEBUGOUT("begin");
117
118	mtx_destroy(&ixl_iw.mtx);
119
120	return;
121}
122
123static void
124ixl_iw_init(void)
125{
126	INIT_DEBUGOUT("begin");
127
128	LIST_INIT(&ixl_iw.pfs);
129	mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
130	ixl_iw.registered = false;
131
132	return;
133}
134
135/******************************************************************************
136 * if_ixl internal API
137 *****************************************************************************/
138
139int
140ixl_iw_pf_init(struct ixl_pf *pf)
141{
142	struct ixl_iw_pf_entry *pf_entry;
143	struct ixl_iw_pf *pf_info;
144	int err = 0;
145
146	INIT_DEBUGOUT("begin");
147
148	mtx_lock(&ixl_iw.mtx);
149
150	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
151		if (pf_entry->pf == pf)
152			break;
153	if (pf_entry == NULL) {
154		/* attempt to initialize PF not yet attached - sth is wrong */
155		device_printf(pf->dev, "%s: PF not found\n", __func__);
156		err = ENOENT;
157		goto out;
158	}
159
160	pf_info = &pf_entry->pf_info;
161
162	pf_info->handle	= (void *)pf;
163
164	pf_info->ifp		= pf->vsi.ifp;
165	pf_info->dev		= pf->dev;
166	pf_info->pci_mem	= pf->pci_mem;
167	pf_info->pf_id		= pf->hw.pf_id;
168	pf_info->mtu		= pf->vsi.ifp->if_mtu;
169
170	pf_info->iw_msix.count	= IXL_IW_VEC_COUNT(pf);
171	pf_info->iw_msix.base	= IXL_IW_VEC_BASE(pf);
172
173	for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
174		pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
175
176	pf_entry->state.pf = IXL_IW_PF_STATE_ON;
177	if (ixl_iw.registered) {
178		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
179		taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
180	}
181
182out:
183	mtx_unlock(&ixl_iw.mtx);
184
185	return (err);
186}
187
188void
189ixl_iw_pf_stop(struct ixl_pf *pf)
190{
191	struct ixl_iw_pf_entry *pf_entry;
192
193	INIT_DEBUGOUT("begin");
194
195	mtx_lock(&ixl_iw.mtx);
196
197	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
198		if (pf_entry->pf == pf)
199			break;
200	if (pf_entry == NULL) {
201		/* attempt to stop PF which has not been attached - sth is wrong */
202		device_printf(pf->dev, "%s: PF not found\n", __func__);
203		goto out;
204	}
205
206	pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
207	if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
208		pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
209		if (ixl_iw.registered)
210			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
211	}
212
213out:
214	mtx_unlock(&ixl_iw.mtx);
215
216	return;
217}
218
219int
220ixl_iw_pf_attach(struct ixl_pf *pf)
221{
222	struct ixl_iw_pf_entry *pf_entry;
223	int err = 0;
224
225	INIT_DEBUGOUT("begin");
226
227	if (ixl_iw_ref_cnt == 0)
228		ixl_iw_init();
229
230	mtx_lock(&ixl_iw.mtx);
231
232	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
233		if (pf_entry->pf == pf) {
234			device_printf(pf->dev, "%s: PF already exists\n",
235			    __func__);
236			err = EEXIST;
237			goto out;
238		}
239
240	pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
241			M_DEVBUF, M_NOWAIT | M_ZERO);
242	if (pf_entry == NULL) {
243		device_printf(pf->dev,
244		    "%s: failed to allocate memory to attach new PF\n",
245		    __func__);
246		err = ENOMEM;
247		goto out;
248	}
249	pf_entry->pf = pf;
250	pf_entry->state.pf		= IXL_IW_PF_STATE_OFF;
251	pf_entry->state.iw_scheduled	= IXL_IW_PF_STATE_OFF;
252	pf_entry->state.iw_current	= IXL_IW_PF_STATE_OFF;
253
254	LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
255	ixl_iw_ref_cnt++;
256
257	TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
258out:
259	mtx_unlock(&ixl_iw.mtx);
260
261	return (err);
262}
263
264int
265ixl_iw_pf_detach(struct ixl_pf *pf)
266{
267	struct ixl_iw_pf_entry *pf_entry;
268	int err = 0;
269
270	INIT_DEBUGOUT("begin");
271
272	mtx_lock(&ixl_iw.mtx);
273
274	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
275		if (pf_entry->pf == pf)
276			break;
277	if (pf_entry == NULL) {
278		/* attempt to stop PF which has not been attached - sth is wrong */
279		device_printf(pf->dev, "%s: PF not found\n", __func__);
280		err = ENOENT;
281		goto out;
282	}
283
284	if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
285		/* attempt to detach PF which has not yet been stopped - sth is wrong */
286		device_printf(pf->dev, "%s: failed - PF is still active\n",
287		    __func__);
288		err = EBUSY;
289		goto out;
290	}
291	LIST_REMOVE(pf_entry, node);
292	free(pf_entry, M_DEVBUF);
293	ixl_iw_ref_cnt--;
294
295out:
296	mtx_unlock(&ixl_iw.mtx);
297
298	if (ixl_iw_ref_cnt == 0)
299		ixl_iw_uninit();
300
301	return (err);
302}
303
304
305/******************************************************************************
306 * API exposed to iw_ixl module
307 *****************************************************************************/
308
309int
310ixl_iw_pf_reset(void *pf_handle)
311{
312	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
313
314	INIT_DEBUGOUT("begin");
315
316	IXL_PF_LOCK(pf);
317	ixl_init_locked(pf);
318	IXL_PF_UNLOCK(pf);
319
320	return (0);
321}
322
323int
324ixl_iw_pf_msix_init(void *pf_handle,
325	struct ixl_iw_msix_mapping *msix_info)
326{
327	struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
328	struct i40e_hw *hw = &pf->hw;
329	u32 reg;
330	int vec, i;
331
332	INIT_DEBUGOUT("begin");
333
334	if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
335	    (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
336		printf("%s: invalid MSIX vector (%i) for AEQ\n",
337		    __func__, msix_info->aeq_vector);
338		return (EINVAL);
339	}
340	reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
341		(msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
342		(msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
343	wr32(hw, I40E_PFINT_AEQCTL, reg);
344
345	for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
346		for (i = 0; i < msix_info->ceq_cnt; i++)
347			if (msix_info->ceq_vector[i] == vec)
348				break;
349		if (i == msix_info->ceq_cnt) {
350			/* this vector has no CEQ mapped */
351			reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
352			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
353		} else {
354			reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
355			    (I40E_QUEUE_TYPE_PE_CEQ <<
356			    I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
357			wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
358
359			reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
360			    (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
361			    (msix_info->itr_indx <<
362			    I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
363			    (IXL_QUEUE_EOL <<
364			    I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
365			wr32(hw, I40E_PFINT_CEQCTL(i), reg);
366		}
367	}
368
369	return (0);
370}
371
372int
373ixl_iw_register(struct ixl_iw_ops *ops)
374{
375	struct ixl_iw_pf_entry *pf_entry;
376	int err = 0;
377	int iwarp_cap_on_pfs = 0;
378
379	INIT_DEBUGOUT("begin");
380	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
381		iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
382	if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
383		printf("%s: the device is not iwarp-capable, registering dropped\n",
384		    __func__);
385		return (ENODEV);
386	}
387	if (ixl_enable_iwarp == 0) {
388		printf("%s: enable_iwarp is off, registering dropped\n",
389		    __func__);
390		return (EACCES);
391	}
392
393	if ((ops->init == NULL) || (ops->stop == NULL)) {
394		printf("%s: invalid iwarp driver ops\n", __func__);
395		return (EINVAL);
396	}
397
398	mtx_lock(&ixl_iw.mtx);
399	if (ixl_iw.registered) {
400		printf("%s: iwarp driver already registered\n", __func__);
401		err = (EBUSY);
402		goto out;
403	}
404	ixl_iw.registered = true;
405	mtx_unlock(&ixl_iw.mtx);
406
407	ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
408		taskqueue_thread_enqueue, &ixl_iw.tq);
409	if (ixl_iw.tq == NULL) {
410		printf("%s: failed to create queue\n", __func__);
411		ixl_iw.registered = false;
412		return (ENOMEM);
413	}
414	taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
415
416	ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
417			M_DEVBUF, M_NOWAIT | M_ZERO);
418	if (ixl_iw.ops == NULL) {
419		printf("%s: failed to allocate memory\n", __func__);
420		taskqueue_free(ixl_iw.tq);
421		ixl_iw.registered = false;
422		return (ENOMEM);
423	}
424
425	ixl_iw.ops->init = ops->init;
426	ixl_iw.ops->stop = ops->stop;
427
428	mtx_lock(&ixl_iw.mtx);
429	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
430		if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
431			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
432			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
433		}
434out:
435	mtx_unlock(&ixl_iw.mtx);
436
437	return (err);
438}
439
440int
441ixl_iw_unregister(void)
442{
443	struct ixl_iw_pf_entry *pf_entry;
444	int iwarp_cap_on_pfs = 0;
445
446	INIT_DEBUGOUT("begin");
447
448	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
449		iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
450	if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
451		printf("%s: attempt to unregister driver when no iwarp-capable device present\n",
452		    __func__);
453		return (ENODEV);
454	}
455
456	if (ixl_enable_iwarp == 0) {
457		printf("%s: attempt to unregister driver when enable_iwarp is off\n",
458		    __func__);
459		return (ENODEV);
460	}
461	mtx_lock(&ixl_iw.mtx);
462
463	if (!ixl_iw.registered) {
464		printf("%s: failed - iwarp driver has not been registered\n",
465		    __func__);
466		mtx_unlock(&ixl_iw.mtx);
467		return (ENOENT);
468	}
469
470	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
471		if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
472			pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
473			taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
474		}
475
476	ixl_iw.registered = false;
477
478	mtx_unlock(&ixl_iw.mtx);
479
480	LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
481		taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
482	taskqueue_free(ixl_iw.tq);
483	ixl_iw.tq = NULL;
484	free(ixl_iw.ops, M_DEVBUF);
485	ixl_iw.ops = NULL;
486
487	return (0);
488}
489
490#endif /* IXL_IW */
491