ixlvc.c revision 292100
1/******************************************************************************
2
3  Copyright (c) 2013-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/ixlvc.c 292100 2015-12-11 13:08:38Z smh $*/
34
35/*
36**	Virtual Channel support
37**		These are support functions to communication
38**		between the VF and PF drivers.
39*/
40
41#include "ixl.h"
42#include "ixlv.h"
43#include "i40e_prototype.h"
44
45
46/* busy wait delay in msec */
47#define IXLV_BUSY_WAIT_DELAY 10
48#define IXLV_BUSY_WAIT_COUNT 50
49
50static void	ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51		    enum i40e_status_code);
52static void	ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53static void	ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54static void	ixl_vc_send_current(struct ixl_vc_mgr *mgr);
55
56#ifdef IXL_DEBUG
57/*
58** Validate VF messages
59*/
60static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
61    u8 *msg, u16 msglen)
62{
63	bool err_msg_format = false;
64	int valid_len;
65
66	/* Validate message length. */
67	switch (v_opcode) {
68	case I40E_VIRTCHNL_OP_VERSION:
69		valid_len = sizeof(struct i40e_virtchnl_version_info);
70		break;
71	case I40E_VIRTCHNL_OP_RESET_VF:
72	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
73		valid_len = 0;
74		break;
75	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
76		valid_len = sizeof(struct i40e_virtchnl_txq_info);
77		break;
78	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
79		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
80		break;
81	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
82		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
83		if (msglen >= valid_len) {
84			struct i40e_virtchnl_vsi_queue_config_info *vqc =
85			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
86			valid_len += (vqc->num_queue_pairs *
87				      sizeof(struct
88					     i40e_virtchnl_queue_pair_info));
89			if (vqc->num_queue_pairs == 0)
90				err_msg_format = true;
91		}
92		break;
93	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
94		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
95		if (msglen >= valid_len) {
96			struct i40e_virtchnl_irq_map_info *vimi =
97			    (struct i40e_virtchnl_irq_map_info *)msg;
98			valid_len += (vimi->num_vectors *
99				      sizeof(struct i40e_virtchnl_vector_map));
100			if (vimi->num_vectors == 0)
101				err_msg_format = true;
102		}
103		break;
104	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
105	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
106		valid_len = sizeof(struct i40e_virtchnl_queue_select);
107		break;
108	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
109	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
110		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
111		if (msglen >= valid_len) {
112			struct i40e_virtchnl_ether_addr_list *veal =
113			    (struct i40e_virtchnl_ether_addr_list *)msg;
114			valid_len += veal->num_elements *
115			    sizeof(struct i40e_virtchnl_ether_addr);
116			if (veal->num_elements == 0)
117				err_msg_format = true;
118		}
119		break;
120	case I40E_VIRTCHNL_OP_ADD_VLAN:
121	case I40E_VIRTCHNL_OP_DEL_VLAN:
122		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
123		if (msglen >= valid_len) {
124			struct i40e_virtchnl_vlan_filter_list *vfl =
125			    (struct i40e_virtchnl_vlan_filter_list *)msg;
126			valid_len += vfl->num_elements * sizeof(u16);
127			if (vfl->num_elements == 0)
128				err_msg_format = true;
129		}
130		break;
131	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
132		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
133		break;
134	case I40E_VIRTCHNL_OP_GET_STATS:
135		valid_len = sizeof(struct i40e_virtchnl_queue_select);
136		break;
137	/* These are always errors coming from the VF. */
138	case I40E_VIRTCHNL_OP_EVENT:
139	case I40E_VIRTCHNL_OP_UNKNOWN:
140	default:
141		return EPERM;
142		break;
143	}
144	/* few more checks */
145	if ((valid_len != msglen) || (err_msg_format))
146		return EINVAL;
147	else
148		return 0;
149}
150#endif
151
152/*
153** ixlv_send_pf_msg
154**
155** Send message to PF and print status if failure.
156*/
157static int
158ixlv_send_pf_msg(struct ixlv_sc *sc,
159	enum i40e_virtchnl_ops op, u8 *msg, u16 len)
160{
161	struct i40e_hw	*hw = &sc->hw;
162	device_t	dev = sc->dev;
163	i40e_status	err;
164
165#ifdef IXL_DEBUG
166	/*
167	** Pre-validating messages to the PF
168	*/
169	int val_err;
170	val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
171	if (val_err)
172		device_printf(dev, "Error validating msg to PF for op %d,"
173		    " msglen %d: error %d\n", op, len, val_err);
174#endif
175
176	err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
177	if (err)
178		device_printf(dev, "Unable to send opcode %d to PF, "
179		    "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
180	return err;
181}
182
183
184/*
185** ixlv_send_api_ver
186**
187** Send API version admin queue message to the PF. The reply is not checked
188** in this function. Returns 0 if the message was successfully
189** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
190*/
191int
192ixlv_send_api_ver(struct ixlv_sc *sc)
193{
194	struct i40e_virtchnl_version_info vvi;
195
196	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
197	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
198
199	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
200	    (u8 *)&vvi, sizeof(vvi));
201}
202
203/*
204** ixlv_verify_api_ver
205**
206** Compare API versions with the PF. Must be called after admin queue is
207** initialized. Returns 0 if API versions match, EIO if
208** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
209*/
210int
211ixlv_verify_api_ver(struct ixlv_sc *sc)
212{
213	struct i40e_virtchnl_version_info *pf_vvi;
214	struct i40e_hw *hw = &sc->hw;
215	struct i40e_arq_event_info event;
216	i40e_status err;
217	int retries = 0;
218
219	event.buf_len = IXL_AQ_BUFSZ;
220	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
221	if (!event.msg_buf) {
222		err = ENOMEM;
223		goto out;
224	}
225
226	do {
227		if (++retries > IXLV_AQ_MAX_ERR)
228			goto out_alloc;
229
230		/* NOTE: initial delay is necessary */
231		i40e_msec_delay(100);
232		err = i40e_clean_arq_element(hw, &event, NULL);
233	} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
234	if (err)
235		goto out_alloc;
236
237	err = (i40e_status)le32toh(event.desc.cookie_low);
238	if (err) {
239		err = EIO;
240		goto out_alloc;
241	}
242
243	if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
244	    I40E_VIRTCHNL_OP_VERSION) {
245		DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
246		    le32toh(event.desc.cookie_high));
247		err = EIO;
248		goto out_alloc;
249	}
250
251	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
252	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
253	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
254	    (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
255		err = EIO;
256	else
257		sc->pf_version = pf_vvi->minor;
258
259out_alloc:
260	free(event.msg_buf, M_DEVBUF);
261out:
262	return err;
263}
264
265/*
266** ixlv_send_vf_config_msg
267**
268** Send VF configuration request admin queue message to the PF. The reply
269** is not checked in this function. Returns 0 if the message was
270** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
271*/
272int
273ixlv_send_vf_config_msg(struct ixlv_sc *sc)
274{
275	u32	caps;
276
277	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
278	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
279	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
280	    I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
281
282	if (sc->pf_version)
283		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
284				  (u8 *)&caps, sizeof(caps));
285	else
286		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
287				  NULL, 0);
288}
289
290/*
291** ixlv_get_vf_config
292**
293** Get VF configuration from PF and populate hw structure. Must be called after
294** admin queue is initialized. Busy waits until response is received from PF,
295** with maximum timeout. Response from PF is returned in the buffer for further
296** processing by the caller.
297*/
298int
299ixlv_get_vf_config(struct ixlv_sc *sc)
300{
301	struct i40e_hw	*hw = &sc->hw;
302	device_t	dev = sc->dev;
303	struct i40e_arq_event_info event;
304	u16 len;
305	i40e_status err = 0;
306	u32 retries = 0;
307
308	/* Note this assumes a single VSI */
309	len = sizeof(struct i40e_virtchnl_vf_resource) +
310	    sizeof(struct i40e_virtchnl_vsi_resource);
311	event.buf_len = len;
312	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
313	if (!event.msg_buf) {
314		err = ENOMEM;
315		goto out;
316	}
317
318	for (;;) {
319		err = i40e_clean_arq_element(hw, &event, NULL);
320		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
321			if (++retries <= IXLV_AQ_MAX_ERR)
322				i40e_msec_delay(10);
323		} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
324		    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
325			DDPRINTF(dev, "Received a response from PF,"
326			    " opcode %d, error %d",
327			    le32toh(event.desc.cookie_high),
328			    le32toh(event.desc.cookie_low));
329			retries++;
330			continue;
331		} else {
332			err = (i40e_status)le32toh(event.desc.cookie_low);
333			if (err) {
334				device_printf(dev, "%s: Error returned from PF,"
335				    " opcode %d, error %d\n", __func__,
336				    le32toh(event.desc.cookie_high),
337				    le32toh(event.desc.cookie_low));
338				err = EIO;
339				goto out_alloc;
340			}
341			/* We retrieved the config message, with no errors */
342			break;
343		}
344
345		if (retries > IXLV_AQ_MAX_ERR) {
346			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
347			    retries);
348			err = ETIMEDOUT;
349			goto out_alloc;
350		}
351	}
352
353	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
354	i40e_vf_parse_hw_config(hw, sc->vf_res);
355
356out_alloc:
357	free(event.msg_buf, M_DEVBUF);
358out:
359	return err;
360}
361
362/*
363** ixlv_configure_queues
364**
365** Request that the PF set up our queues.
366*/
367void
368ixlv_configure_queues(struct ixlv_sc *sc)
369{
370	device_t		dev = sc->dev;
371	struct ixl_vsi		*vsi = &sc->vsi;
372	struct ixl_queue	*que = vsi->queues;
373	struct tx_ring		*txr;
374	struct rx_ring		*rxr;
375	int			len, pairs;
376
377	struct i40e_virtchnl_vsi_queue_config_info *vqci;
378	struct i40e_virtchnl_queue_pair_info *vqpi;
379
380	pairs = vsi->num_queues;
381	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
382		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
383	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
384	if (!vqci) {
385		device_printf(dev, "%s: unable to allocate memory\n", __func__);
386		ixl_vc_schedule_retry(&sc->vc_mgr);
387		return;
388	}
389	vqci->vsi_id = sc->vsi_res->vsi_id;
390	vqci->num_queue_pairs = pairs;
391	vqpi = vqci->qpair;
392	/* Size check is not needed here - HW max is 16 queue pairs, and we
393	 * can fit info for 31 of them into the AQ buffer before it overflows.
394	 */
395	for (int i = 0; i < pairs; i++, que++, vqpi++) {
396		txr = &que->txr;
397		rxr = &que->rxr;
398		vqpi->txq.vsi_id = vqci->vsi_id;
399		vqpi->txq.queue_id = i;
400		vqpi->txq.ring_len = que->num_desc;
401		vqpi->txq.dma_ring_addr = txr->dma.pa;
402		/* Enable Head writeback */
403		vqpi->txq.headwb_enabled = 1;
404		vqpi->txq.dma_headwb_addr = txr->dma.pa +
405		    (que->num_desc * sizeof(struct i40e_tx_desc));
406
407		vqpi->rxq.vsi_id = vqci->vsi_id;
408		vqpi->rxq.queue_id = i;
409		vqpi->rxq.ring_len = que->num_desc;
410		vqpi->rxq.dma_ring_addr = rxr->dma.pa;
411		vqpi->rxq.max_pkt_size = vsi->max_frame_size;
412		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
413		vqpi->rxq.splithdr_enabled = 0;
414	}
415
416	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
417			   (u8 *)vqci, len);
418	free(vqci, M_DEVBUF);
419}
420
421/*
422** ixlv_enable_queues
423**
424** Request that the PF enable all of our queues.
425*/
426void
427ixlv_enable_queues(struct ixlv_sc *sc)
428{
429	struct i40e_virtchnl_queue_select vqs;
430
431	vqs.vsi_id = sc->vsi_res->vsi_id;
432	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
433	vqs.rx_queues = vqs.tx_queues;
434	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
435			   (u8 *)&vqs, sizeof(vqs));
436}
437
438/*
439** ixlv_disable_queues
440**
441** Request that the PF disable all of our queues.
442*/
443void
444ixlv_disable_queues(struct ixlv_sc *sc)
445{
446	struct i40e_virtchnl_queue_select vqs;
447
448	vqs.vsi_id = sc->vsi_res->vsi_id;
449	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
450	vqs.rx_queues = vqs.tx_queues;
451	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
452			   (u8 *)&vqs, sizeof(vqs));
453}
454
455/*
456** ixlv_map_queues
457**
458** Request that the PF map queues to interrupt vectors. Misc causes, including
459** admin queue, are always mapped to vector 0.
460*/
461void
462ixlv_map_queues(struct ixlv_sc *sc)
463{
464	struct i40e_virtchnl_irq_map_info *vm;
465	int 			i, q, len;
466	struct ixl_vsi		*vsi = &sc->vsi;
467	struct ixl_queue	*que = vsi->queues;
468
469	/* How many queue vectors, adminq uses one */
470	q = sc->msix - 1;
471
472	len = sizeof(struct i40e_virtchnl_irq_map_info) +
473	      (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
474	vm = malloc(len, M_DEVBUF, M_NOWAIT);
475	if (!vm) {
476		printf("%s: unable to allocate memory\n", __func__);
477		ixl_vc_schedule_retry(&sc->vc_mgr);
478		return;
479	}
480
481	vm->num_vectors = sc->msix;
482	/* Queue vectors first */
483	for (i = 0; i < q; i++, que++) {
484		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
485		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
486		vm->vecmap[i].txq_map = (1 << que->me);
487		vm->vecmap[i].rxq_map = (1 << que->me);
488		vm->vecmap[i].rxitr_idx = 0;
489		vm->vecmap[i].txitr_idx = 0;
490	}
491
492	/* Misc vector last - this is only for AdminQ messages */
493	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
494	vm->vecmap[i].vector_id = 0;
495	vm->vecmap[i].txq_map = 0;
496	vm->vecmap[i].rxq_map = 0;
497	vm->vecmap[i].rxitr_idx = 0;
498	vm->vecmap[i].txitr_idx = 0;
499
500	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
501	    (u8 *)vm, len);
502	free(vm, M_DEVBUF);
503}
504
505/*
506** Scan the Filter List looking for vlans that need
507** to be added, then create the data to hand to the AQ
508** for handling.
509*/
510void
511ixlv_add_vlans(struct ixlv_sc *sc)
512{
513	struct i40e_virtchnl_vlan_filter_list	*v;
514	struct ixlv_vlan_filter *f, *ftmp;
515	device_t	dev = sc->dev;
516	int		len, i = 0, cnt = 0;
517
518	/* Get count of VLAN filters to add */
519	SLIST_FOREACH(f, sc->vlan_filters, next) {
520		if (f->flags & IXL_FILTER_ADD)
521			cnt++;
522	}
523
524	if (!cnt) {  /* no work... */
525		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
526		    I40E_SUCCESS);
527		return;
528	}
529
530	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
531	      (cnt * sizeof(u16));
532
533	if (len > IXL_AQ_BUF_SZ) {
534		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
535			__func__);
536		ixl_vc_schedule_retry(&sc->vc_mgr);
537		return;
538	}
539
540	v = malloc(len, M_DEVBUF, M_NOWAIT);
541	if (!v) {
542		device_printf(dev, "%s: unable to allocate memory\n",
543			__func__);
544		ixl_vc_schedule_retry(&sc->vc_mgr);
545		return;
546	}
547
548	v->vsi_id = sc->vsi_res->vsi_id;
549	v->num_elements = cnt;
550
551	/* Scan the filter array */
552	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
553                if (f->flags & IXL_FILTER_ADD) {
554                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
555			f->flags = IXL_FILTER_USED;
556                        i++;
557                }
558                if (i == cnt)
559                        break;
560	}
561	// ERJ: Should this be taken out?
562 	if (i == 0) { /* Should not happen... */
563		device_printf(dev, "%s: i == 0?\n", __func__);
564		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
565		    I40E_SUCCESS);
566		return;
567 	}
568
569	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
570	free(v, M_DEVBUF);
571	/* add stats? */
572}
573
574/*
575** Scan the Filter Table looking for vlans that need
576** to be removed, then create the data to hand to the AQ
577** for handling.
578*/
579void
580ixlv_del_vlans(struct ixlv_sc *sc)
581{
582	device_t	dev = sc->dev;
583	struct i40e_virtchnl_vlan_filter_list *v;
584	struct ixlv_vlan_filter *f, *ftmp;
585	int len, i = 0, cnt = 0;
586
587	/* Get count of VLAN filters to delete */
588	SLIST_FOREACH(f, sc->vlan_filters, next) {
589		if (f->flags & IXL_FILTER_DEL)
590			cnt++;
591	}
592
593	if (!cnt) {  /* no work... */
594		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
595		    I40E_SUCCESS);
596		return;
597	}
598
599	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
600	      (cnt * sizeof(u16));
601
602	if (len > IXL_AQ_BUF_SZ) {
603		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
604			__func__);
605		ixl_vc_schedule_retry(&sc->vc_mgr);
606		return;
607	}
608
609	v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
610	if (!v) {
611		device_printf(dev, "%s: unable to allocate memory\n",
612			__func__);
613		ixl_vc_schedule_retry(&sc->vc_mgr);
614		return;
615	}
616
617	v->vsi_id = sc->vsi_res->vsi_id;
618	v->num_elements = cnt;
619
620	/* Scan the filter array */
621	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
622                if (f->flags & IXL_FILTER_DEL) {
623                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
624                        i++;
625                        SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
626                        free(f, M_DEVBUF);
627                }
628                if (i == cnt)
629                        break;
630	}
631	// ERJ: Take this out?
632 	if (i == 0) { /* Should not happen... */
633		device_printf(dev, "%s: i == 0?\n", __func__);
634		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
635		    I40E_SUCCESS);
636		return;
637 	}
638
639	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
640	free(v, M_DEVBUF);
641	/* add stats? */
642}
643
644
645/*
646** This routine takes additions to the vsi filter
647** table and creates an Admin Queue call to create
648** the filters in the hardware.
649*/
650void
651ixlv_add_ether_filters(struct ixlv_sc *sc)
652{
653	struct i40e_virtchnl_ether_addr_list *a;
654	struct ixlv_mac_filter	*f;
655	device_t			dev = sc->dev;
656	int				len, j = 0, cnt = 0;
657
658	/* Get count of MAC addresses to add */
659	SLIST_FOREACH(f, sc->mac_filters, next) {
660		if (f->flags & IXL_FILTER_ADD)
661			cnt++;
662	}
663	if (cnt == 0) { /* Should not happen... */
664		DDPRINTF(dev, "cnt == 0, exiting...");
665		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
666		    I40E_SUCCESS);
667		return;
668	}
669
670	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
671	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
672
673	a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
674	if (a == NULL) {
675		device_printf(dev, "%s: Failed to get memory for "
676		    "virtchnl_ether_addr_list\n", __func__);
677		ixl_vc_schedule_retry(&sc->vc_mgr);
678		return;
679	}
680	a->vsi_id = sc->vsi.id;
681	a->num_elements = cnt;
682
683	/* Scan the filter array */
684	SLIST_FOREACH(f, sc->mac_filters, next) {
685		if (f->flags & IXL_FILTER_ADD) {
686			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
687			f->flags &= ~IXL_FILTER_ADD;
688			j++;
689
690			DDPRINTF(dev, "ADD: " MAC_FORMAT,
691			    MAC_FORMAT_ARGS(f->macaddr));
692		}
693		if (j == cnt)
694			break;
695	}
696	DDPRINTF(dev, "len %d, j %d, cnt %d",
697	    len, j, cnt);
698	ixlv_send_pf_msg(sc,
699	    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
700	/* add stats? */
701	free(a, M_DEVBUF);
702	return;
703}
704
705/*
706** This routine takes filters flagged for deletion in the
707** sc MAC filter list and creates an Admin Queue call
708** to delete those filters in the hardware.
709*/
710void
711ixlv_del_ether_filters(struct ixlv_sc *sc)
712{
713	struct i40e_virtchnl_ether_addr_list *d;
714	device_t			dev = sc->dev;
715	struct ixlv_mac_filter	*f, *f_temp;
716	int				len, j = 0, cnt = 0;
717
718	/* Get count of MAC addresses to delete */
719	SLIST_FOREACH(f, sc->mac_filters, next) {
720		if (f->flags & IXL_FILTER_DEL)
721			cnt++;
722	}
723	if (cnt == 0) {
724		DDPRINTF(dev, "cnt == 0, exiting...");
725		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
726		    I40E_SUCCESS);
727		return;
728	}
729
730	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
731	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
732
733	d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
734	if (d == NULL) {
735		device_printf(dev, "%s: Failed to get memory for "
736		    "virtchnl_ether_addr_list\n", __func__);
737		ixl_vc_schedule_retry(&sc->vc_mgr);
738		return;
739	}
740	d->vsi_id = sc->vsi.id;
741	d->num_elements = cnt;
742
743	/* Scan the filter array */
744	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
745		if (f->flags & IXL_FILTER_DEL) {
746			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
747			DDPRINTF(dev, "DEL: " MAC_FORMAT,
748			    MAC_FORMAT_ARGS(f->macaddr));
749			j++;
750			SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
751			free(f, M_DEVBUF);
752		}
753		if (j == cnt)
754			break;
755	}
756	ixlv_send_pf_msg(sc,
757	    I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
758	/* add stats? */
759	free(d, M_DEVBUF);
760	return;
761}
762
763/*
764** ixlv_request_reset
765** Request that the PF reset this VF. No response is expected.
766*/
767void
768ixlv_request_reset(struct ixlv_sc *sc)
769{
770	/*
771	** Set the reset status to "in progress" before
772	** the request, this avoids any possibility of
773	** a mistaken early detection of completion.
774	*/
775	wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
776	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
777}
778
779/*
780** ixlv_request_stats
781** Request the statistics for this VF's VSI from PF.
782*/
783void
784ixlv_request_stats(struct ixlv_sc *sc)
785{
786	struct i40e_virtchnl_queue_select vqs;
787	int error = 0;
788
789	vqs.vsi_id = sc->vsi_res->vsi_id;
790	/* Low priority, we don't need to error check */
791	error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
792	    (u8 *)&vqs, sizeof(vqs));
793#ifdef IXL_DEBUG
794	if (error)
795		device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
796#endif
797}
798
799/*
800** Updates driver's stats counters with VSI stats returned from PF.
801*/
802void
803ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
804{
805	struct ixl_vsi *vsi = &sc->vsi;
806	uint64_t tx_discards;
807
808	tx_discards = es->tx_discards;
809	for (int i = 0; i < vsi->num_queues; i++)
810		tx_discards += sc->vsi.queues[i].txr.br->br_drops;
811
812	/* Update ifnet stats */
813	IXL_SET_IPACKETS(vsi, es->rx_unicast +
814	                   es->rx_multicast +
815			   es->rx_broadcast);
816	IXL_SET_OPACKETS(vsi, es->tx_unicast +
817	                   es->tx_multicast +
818			   es->tx_broadcast);
819	IXL_SET_IBYTES(vsi, es->rx_bytes);
820	IXL_SET_OBYTES(vsi, es->tx_bytes);
821	IXL_SET_IMCASTS(vsi, es->rx_multicast);
822	IXL_SET_OMCASTS(vsi, es->tx_multicast);
823
824	IXL_SET_OERRORS(vsi, es->tx_errors);
825	IXL_SET_IQDROPS(vsi, es->rx_discards);
826	IXL_SET_OQDROPS(vsi, tx_discards);
827	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
828	IXL_SET_COLLISIONS(vsi, 0);
829
830	vsi->eth_stats = *es;
831}
832
833/*
834** ixlv_vc_completion
835**
836** Asynchronous completion function for admin queue messages. Rather than busy
837** wait, we fire off our requests and assume that no errors will be returned.
838** This function handles the reply messages.
839*/
840void
841ixlv_vc_completion(struct ixlv_sc *sc,
842    enum i40e_virtchnl_ops v_opcode,
843    i40e_status v_retval, u8 *msg, u16 msglen)
844{
845	device_t	dev = sc->dev;
846	struct ixl_vsi	*vsi = &sc->vsi;
847
848	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
849		struct i40e_virtchnl_pf_event *vpe =
850			(struct i40e_virtchnl_pf_event *)msg;
851
852		switch (vpe->event) {
853		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
854#ifdef IXL_DEBUG
855			device_printf(dev, "Link change: status %d, speed %d\n",
856			    vpe->event_data.link_event.link_status,
857			    vpe->event_data.link_event.link_speed);
858#endif
859			sc->link_up =
860				vpe->event_data.link_event.link_status;
861			sc->link_speed =
862				vpe->event_data.link_event.link_speed;
863			ixlv_update_link_status(sc);
864			break;
865		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
866			device_printf(dev, "PF initiated reset!\n");
867			sc->init_state = IXLV_RESET_PENDING;
868			ixlv_init(sc);
869			break;
870		default:
871			device_printf(dev, "%s: Unknown event %d from AQ\n",
872				__func__, vpe->event);
873			break;
874		}
875
876		return;
877	}
878
879	/* Catch-all error response */
880	if (v_retval) {
881		device_printf(dev,
882		    "%s: AQ returned error %d to our request %d!\n",
883		    __func__, v_retval, v_opcode);
884	}
885
886#ifdef IXL_DEBUG
887	if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
888		DDPRINTF(dev, "opcode %d", v_opcode);
889#endif
890
891	switch (v_opcode) {
892	case I40E_VIRTCHNL_OP_GET_STATS:
893		ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
894		break;
895	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
896		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
897		    v_retval);
898		if (v_retval) {
899			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
900			device_printf(dev, "WARNING: Device may not receive traffic!\n");
901		}
902		break;
903	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
904		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
905		    v_retval);
906		break;
907	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
908		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
909		    v_retval);
910		break;
911	case I40E_VIRTCHNL_OP_ADD_VLAN:
912		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
913		    v_retval);
914		break;
915	case I40E_VIRTCHNL_OP_DEL_VLAN:
916		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
917		    v_retval);
918		break;
919	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
920		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
921		    v_retval);
922		if (v_retval == 0) {
923			/* Update link status */
924			ixlv_update_link_status(sc);
925			/* Turn on all interrupts */
926			ixlv_enable_intr(vsi);
927			/* And inform the stack we're ready */
928			vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
929			vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
930		}
931		break;
932	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
933		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
934		    v_retval);
935		if (v_retval == 0) {
936			/* Turn off all interrupts */
937			ixlv_disable_intr(vsi);
938			/* Tell the stack that the interface is no longer active */
939			vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
940		}
941		break;
942	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
943		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
944		    v_retval);
945		break;
946	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
947		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
948		    v_retval);
949		break;
950	default:
951		device_printf(dev,
952		    "%s: Received unexpected message %d from PF.\n",
953		    __func__, v_opcode);
954		break;
955	}
956	return;
957}
958
959static void
960ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
961{
962
963	switch (request) {
964	case IXLV_FLAG_AQ_MAP_VECTORS:
965		ixlv_map_queues(sc);
966		break;
967
968	case IXLV_FLAG_AQ_ADD_MAC_FILTER:
969		ixlv_add_ether_filters(sc);
970		break;
971
972	case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
973		ixlv_add_vlans(sc);
974		break;
975
976	case IXLV_FLAG_AQ_DEL_MAC_FILTER:
977		ixlv_del_ether_filters(sc);
978		break;
979
980	case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
981		ixlv_del_vlans(sc);
982		break;
983
984	case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
985		ixlv_configure_queues(sc);
986		break;
987
988	case IXLV_FLAG_AQ_DISABLE_QUEUES:
989		ixlv_disable_queues(sc);
990		break;
991
992	case IXLV_FLAG_AQ_ENABLE_QUEUES:
993		ixlv_enable_queues(sc);
994		break;
995	}
996}
997
998void
999ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1000{
1001	mgr->sc = sc;
1002	mgr->current = NULL;
1003	TAILQ_INIT(&mgr->pending);
1004	callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1005}
1006
1007static void
1008ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1009{
1010	struct ixl_vc_cmd *cmd;
1011
1012	cmd = mgr->current;
1013	mgr->current = NULL;
1014	cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1015
1016	cmd->callback(cmd, cmd->arg, err);
1017	ixl_vc_process_next(mgr);
1018}
1019
1020static void
1021ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1022    enum i40e_status_code err)
1023{
1024	struct ixl_vc_cmd *cmd;
1025
1026	cmd = mgr->current;
1027	if (cmd == NULL || cmd->request != request)
1028		return;
1029
1030	callout_stop(&mgr->callout);
1031	ixl_vc_process_completion(mgr, err);
1032}
1033
1034static void
1035ixl_vc_cmd_timeout(void *arg)
1036{
1037	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1038
1039	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1040	ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1041}
1042
1043static void
1044ixl_vc_cmd_retry(void *arg)
1045{
1046	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1047
1048	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1049	ixl_vc_send_current(mgr);
1050}
1051
1052static void
1053ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1054{
1055	struct ixl_vc_cmd *cmd;
1056
1057	cmd = mgr->current;
1058	ixl_vc_send_cmd(mgr->sc, cmd->request);
1059	callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1060}
1061
1062static void
1063ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1064{
1065	struct ixl_vc_cmd *cmd;
1066
1067	if (mgr->current != NULL)
1068		return;
1069
1070	if (TAILQ_EMPTY(&mgr->pending))
1071		return;
1072
1073	cmd = TAILQ_FIRST(&mgr->pending);
1074	TAILQ_REMOVE(&mgr->pending, cmd, next);
1075
1076	mgr->current = cmd;
1077	ixl_vc_send_current(mgr);
1078}
1079
1080static void
1081ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1082{
1083
1084	callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1085}
1086
1087void
1088ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1089	    uint32_t req, ixl_vc_callback_t *callback, void *arg)
1090{
1091	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1092
1093	if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1094		if (mgr->current == cmd)
1095			mgr->current = NULL;
1096		else
1097			TAILQ_REMOVE(&mgr->pending, cmd, next);
1098	}
1099
1100	cmd->request = req;
1101	cmd->callback = callback;
1102	cmd->arg = arg;
1103	cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1104	TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1105
1106	ixl_vc_process_next(mgr);
1107}
1108
1109void
1110ixl_vc_flush(struct ixl_vc_mgr *mgr)
1111{
1112	struct ixl_vc_cmd *cmd;
1113
1114	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1115	KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1116	    ("ixlv: pending commands waiting but no command in progress"));
1117
1118	cmd = mgr->current;
1119	if (cmd != NULL) {
1120		mgr->current = NULL;
1121		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1122		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1123	}
1124
1125	while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1126		TAILQ_REMOVE(&mgr->pending, cmd, next);
1127		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1128		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1129	}
1130
1131	callout_stop(&mgr->callout);
1132}
1133
1134