ixlvc.c revision 270631
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/ixlvc.c 270631 2014-08-25 22:04:29Z jfv $*/
34
35/*
36**	Virtual Channel support
37**		These are support functions to communication
38**		between the VF and PF drivers.
39*/
40
41#include "ixl.h"
42#include "ixlv.h"
43#include "i40e_prototype.h"
44
45
46/* busy wait delay in msec */
47#define IXLV_BUSY_WAIT_DELAY 10
48#define IXLV_BUSY_WAIT_COUNT 50
49
50/*
51** Validate VF messages
52*/
53static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
54    u8 *msg, u16 msglen)
55{
56	bool err_msg_format = false;
57	int valid_len;
58
59	/* Validate message length. */
60	switch (v_opcode) {
61	case I40E_VIRTCHNL_OP_VERSION:
62		valid_len = sizeof(struct i40e_virtchnl_version_info);
63		break;
64	case I40E_VIRTCHNL_OP_RESET_VF:
65	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
66		valid_len = 0;
67		break;
68	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
69		valid_len = sizeof(struct i40e_virtchnl_txq_info);
70		break;
71	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
72		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
73		break;
74	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
75		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
76		if (msglen >= valid_len) {
77			struct i40e_virtchnl_vsi_queue_config_info *vqc =
78			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
79			valid_len += (vqc->num_queue_pairs *
80				      sizeof(struct
81					     i40e_virtchnl_queue_pair_info));
82			if (vqc->num_queue_pairs == 0)
83				err_msg_format = true;
84		}
85		break;
86	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
87		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
88		if (msglen >= valid_len) {
89			struct i40e_virtchnl_irq_map_info *vimi =
90			    (struct i40e_virtchnl_irq_map_info *)msg;
91			valid_len += (vimi->num_vectors *
92				      sizeof(struct i40e_virtchnl_vector_map));
93			if (vimi->num_vectors == 0)
94				err_msg_format = true;
95		}
96		break;
97	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
98	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
99		valid_len = sizeof(struct i40e_virtchnl_queue_select);
100		break;
101	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
102	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
103		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
104		if (msglen >= valid_len) {
105			struct i40e_virtchnl_ether_addr_list *veal =
106			    (struct i40e_virtchnl_ether_addr_list *)msg;
107			valid_len += veal->num_elements *
108			    sizeof(struct i40e_virtchnl_ether_addr);
109			if (veal->num_elements == 0)
110				err_msg_format = true;
111		}
112		break;
113	case I40E_VIRTCHNL_OP_ADD_VLAN:
114	case I40E_VIRTCHNL_OP_DEL_VLAN:
115		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
116		if (msglen >= valid_len) {
117			struct i40e_virtchnl_vlan_filter_list *vfl =
118			    (struct i40e_virtchnl_vlan_filter_list *)msg;
119			valid_len += vfl->num_elements * sizeof(u16);
120			if (vfl->num_elements == 0)
121				err_msg_format = true;
122		}
123		break;
124	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
125		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
126		break;
127	case I40E_VIRTCHNL_OP_GET_STATS:
128		valid_len = sizeof(struct i40e_virtchnl_queue_select);
129		break;
130	/* These are always errors coming from the VF. */
131	case I40E_VIRTCHNL_OP_EVENT:
132	case I40E_VIRTCHNL_OP_UNKNOWN:
133	default:
134		return EPERM;
135		break;
136	}
137	/* few more checks */
138	if ((valid_len != msglen) || (err_msg_format))
139		return EINVAL;
140	else
141		return 0;
142}
143
144/*
145** ixlv_send_pf_msg
146**
147** Send message to PF and print status if failure.
148*/
149static int
150ixlv_send_pf_msg(struct ixlv_sc *sc,
151	enum i40e_virtchnl_ops op, u8 *msg, u16 len)
152{
153	struct i40e_hw	*hw = &sc->hw;
154	device_t	dev = sc->dev;
155	i40e_status	err;
156	int		val_err;
157
158	/*
159	** Pre-validating messages to the PF, this might be
160	** removed for performance later?
161	*/
162	val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
163	if (val_err)
164		device_printf(dev, "Error validating msg to PF for op %d,"
165		    " msglen %d: error %d\n", op, len, val_err);
166
167	err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
168	if (err)
169		device_printf(dev, "Unable to send opcode %d to PF, "
170		    "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
171	return err;
172}
173
174
175/*
176** ixlv_send_api_ver
177**
178** Send API version admin queue message to the PF. The reply is not checked
179** in this function. Returns 0 if the message was successfully
180** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
181*/
182int
183ixlv_send_api_ver(struct ixlv_sc *sc)
184{
185	struct i40e_virtchnl_version_info vvi;
186
187	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
188	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
189
190	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
191	    (u8 *)&vvi, sizeof(vvi));
192}
193
194/*
195** ixlv_verify_api_ver
196**
197** Compare API versions with the PF. Must be called after admin queue is
198** initialized. Returns 0 if API versions match, EIO if
199** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
200*/
201int ixlv_verify_api_ver(struct ixlv_sc *sc)
202{
203	struct i40e_virtchnl_version_info *pf_vvi;
204	struct i40e_hw *hw = &sc->hw;
205	struct i40e_arq_event_info event;
206	i40e_status err;
207	int retries = 0;
208
209	event.buf_len = IXL_AQ_BUFSZ;
210	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
211	if (!event.msg_buf) {
212		err = ENOMEM;
213		goto out;
214	}
215
216	do {
217		if (++retries > IXLV_AQ_MAX_ERR)
218			goto out_alloc;
219
220		/* NOTE: initial delay is necessary */
221		i40e_msec_delay(100);
222		err = i40e_clean_arq_element(hw, &event, NULL);
223	} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
224	if (err)
225		goto out_alloc;
226
227	err = (i40e_status)le32toh(event.desc.cookie_low);
228	if (err) {
229		err = EIO;
230		goto out_alloc;
231	}
232
233	if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
234	    I40E_VIRTCHNL_OP_VERSION) {
235		err = EIO;
236		goto out_alloc;
237	}
238
239	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
240	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
241	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
242		err = EIO;
243
244out_alloc:
245	free(event.msg_buf, M_DEVBUF);
246out:
247	return err;
248}
249
250/*
251** ixlv_send_vf_config_msg
252**
253** Send VF configuration request admin queue message to the PF. The reply
254** is not checked in this function. Returns 0 if the message was
255** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
256*/
257int
258ixlv_send_vf_config_msg(struct ixlv_sc *sc)
259{
260	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
261				  NULL, 0);
262}
263
264/*
265** ixlv_get_vf_config
266**
267** Get VF configuration from PF and populate hw structure. Must be called after
268** admin queue is initialized. Busy waits until response is received from PF,
269** with maximum timeout. Response from PF is returned in the buffer for further
270** processing by the caller.
271*/
272int
273ixlv_get_vf_config(struct ixlv_sc *sc)
274{
275	struct i40e_hw	*hw = &sc->hw;
276	device_t	dev = sc->dev;
277	struct i40e_arq_event_info event;
278	u16 len;
279	i40e_status err = 0;
280	u32 retries = 0;
281
282	/* Note this assumes a single VSI */
283	len = sizeof(struct i40e_virtchnl_vf_resource) +
284	    sizeof(struct i40e_virtchnl_vsi_resource);
285	event.buf_len = len;
286	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
287	if (!event.msg_buf) {
288		err = ENOMEM;
289		goto out;
290	}
291
292	do {
293		err = i40e_clean_arq_element(hw, &event, NULL);
294		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
295			if (++retries <= IXLV_AQ_MAX_ERR)
296				i40e_msec_delay(100);
297		} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
298		    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
299			device_printf(dev, "%s: Received a response from PF,"
300			    " opcode %d, error %d\n", __func__,
301			    le32toh(event.desc.cookie_high),
302			    le32toh(event.desc.cookie_low));
303			retries++;
304			continue;
305		} else {
306			err = (i40e_status)le32toh(event.desc.cookie_low);
307			if (err) {
308				device_printf(dev, "%s: Error returned from PF,"
309				    " opcode %d, error %d\n", __func__,
310				    le32toh(event.desc.cookie_high),
311				    le32toh(event.desc.cookie_low));
312				err = EIO;
313				goto out_alloc;
314			}
315			break;
316		}
317
318		if (retries > IXLV_AQ_MAX_ERR) {
319			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
320			    retries);
321			goto out_alloc;
322		}
323
324	} while (err);
325
326	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
327	i40e_vf_parse_hw_config(hw, sc->vf_res);
328
329out_alloc:
330	free(event.msg_buf, M_DEVBUF);
331out:
332	return err;
333}
334
335/*
336** ixlv_configure_queues
337**
338** Request that the PF set up our queues.
339*/
340void
341ixlv_configure_queues(struct ixlv_sc *sc)
342{
343	device_t		dev = sc->dev;
344	struct ixl_vsi		*vsi = &sc->vsi;
345	struct ixl_queue	*que = vsi->queues;
346	struct tx_ring		*txr;
347	struct rx_ring		*rxr;
348	int			len, pairs;;
349
350	struct i40e_virtchnl_vsi_queue_config_info *vqci;
351	struct i40e_virtchnl_queue_pair_info *vqpi;
352
353
354	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
355		/* bail because we already have a command pending */
356#ifdef IXL_DEBUG
357		device_printf(dev, "%s: command %d pending\n",
358			__func__, sc->current_op);
359#endif
360		return;
361	}
362
363	pairs = vsi->num_queues;
364	sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
365	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
366		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
367	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
368	if (!vqci) {
369		device_printf(dev, "%s: unable to allocate memory\n", __func__);
370		return;
371	}
372	vqci->vsi_id = sc->vsi_res->vsi_id;
373	vqci->num_queue_pairs = pairs;
374	vqpi = vqci->qpair;
375	/* Size check is not needed here - HW max is 16 queue pairs, and we
376	 * can fit info for 31 of them into the AQ buffer before it overflows.
377	 */
378	for (int i = 0; i < pairs; i++, que++) {
379		txr = &que->txr;
380		rxr = &que->rxr;
381		vqpi->txq.vsi_id = vqci->vsi_id;
382		vqpi->txq.queue_id = i;
383		vqpi->txq.ring_len = que->num_desc;
384		vqpi->txq.dma_ring_addr = txr->dma.pa;
385		/* Enable Head writeback */
386		vqpi->txq.headwb_enabled = 1;
387		vqpi->txq.dma_headwb_addr = txr->dma.pa +
388		    (que->num_desc * sizeof(struct i40e_tx_desc));
389
390		vqpi->rxq.vsi_id = vqci->vsi_id;
391		vqpi->rxq.queue_id = i;
392		vqpi->rxq.ring_len = que->num_desc;
393		vqpi->rxq.dma_ring_addr = rxr->dma.pa;
394		vqpi->rxq.max_pkt_size = vsi->max_frame_size;
395		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
396		vqpi++;
397	}
398
399	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
400			   (u8 *)vqci, len);
401	free(vqci, M_DEVBUF);
402	sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
403	sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
404}
405
406/*
407** ixlv_enable_queues
408**
409** Request that the PF enable all of our queues.
410*/
411void
412ixlv_enable_queues(struct ixlv_sc *sc)
413{
414	struct i40e_virtchnl_queue_select vqs;
415
416	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
417		/* we already have a command pending */
418#ifdef IXL_DEBUG
419		device_printf(sc->dev, "%s: command %d pending\n",
420			__func__, sc->current_op);
421#endif
422		return;
423	}
424	sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
425	vqs.vsi_id = sc->vsi_res->vsi_id;
426	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
427	vqs.rx_queues = vqs.tx_queues;
428	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
429			   (u8 *)&vqs, sizeof(vqs));
430	sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
431	sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
432}
433
434/*
435** ixlv_disable_queues
436**
437** Request that the PF disable all of our queues.
438*/
439void
440ixlv_disable_queues(struct ixlv_sc *sc)
441{
442	struct i40e_virtchnl_queue_select vqs;
443
444	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
445		/* we already have a command pending */
446#ifdef IXL_DEBUG
447		device_printf(sc->dev, "%s: command %d pending\n",
448			__func__, sc->current_op);
449#endif
450		return;
451	}
452	sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
453	vqs.vsi_id = sc->vsi_res->vsi_id;
454	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
455	vqs.rx_queues = vqs.tx_queues;
456	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
457			   (u8 *)&vqs, sizeof(vqs));
458	sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
459	sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
460}
461
462/*
463** ixlv_map_queues
464**
465** Request that the PF map queues to interrupt vectors. Misc causes, including
466** admin queue, are always mapped to vector 0.
467*/
468void
469ixlv_map_queues(struct ixlv_sc *sc)
470{
471	struct i40e_virtchnl_irq_map_info *vm;
472	int 			i, q, len;
473	struct ixl_vsi		*vsi = &sc->vsi;
474	struct ixl_queue	*que = vsi->queues;
475
476	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
477		/* we already have a command pending */
478#ifdef IXL_DEBUG
479		device_printf(sc->dev, "%s: command %d pending\n",
480			__func__, sc->current_op);
481#endif
482		return;
483	}
484	sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
485
486	/* How many queue vectors, adminq uses one */
487	q = sc->msix - 1;
488
489	len = sizeof(struct i40e_virtchnl_irq_map_info) +
490	      (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
491	vm = malloc(len, M_DEVBUF, M_NOWAIT);
492	if (!vm) {
493		printf("%s: unable to allocate memory\n", __func__);
494		return;
495	}
496
497	vm->num_vectors = sc->msix;
498	/* Queue vectors first */
499	for (i = 0; i < q; i++, que++) {
500		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
501		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
502		vm->vecmap[i].txq_map = (1 << que->me);
503		vm->vecmap[i].rxq_map = (1 << que->me);
504	}
505
506	/* Misc vector last - this is only for AdminQ messages */
507	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
508	vm->vecmap[i].vector_id = 0;
509	vm->vecmap[i].txq_map = 0;
510	vm->vecmap[i].rxq_map = 0;
511
512	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
513	    (u8 *)vm, len);
514	free(vm, M_DEVBUF);
515	sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
516	sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
517}
518
519/*
520** Scan the Filter List looking for vlans that need
521** to be added, then create the data to hand to the AQ
522** for handling.
523*/
524void
525ixlv_add_vlans(struct ixlv_sc *sc)
526{
527	struct i40e_virtchnl_vlan_filter_list	*v;
528	struct ixlv_vlan_filter *f, *ftmp;
529	device_t	dev = sc->dev;
530	int		len, i = 0, cnt = 0;
531
532	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
533		return;
534
535	sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
536
537	/* Get count of VLAN filters to add */
538	SLIST_FOREACH(f, sc->vlan_filters, next) {
539		if (f->flags & IXL_FILTER_ADD)
540			cnt++;
541	}
542
543	if (!cnt) {  /* no work... */
544		sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
545		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
546		return;
547	}
548
549	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
550	      (cnt * sizeof(u16));
551
552	if (len > IXL_AQ_BUF_SZ) {
553		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
554			__func__);
555		return;
556	}
557
558	v = malloc(len, M_DEVBUF, M_NOWAIT);
559	if (!v) {
560		device_printf(dev, "%s: unable to allocate memory\n",
561			__func__);
562		return;
563	}
564
565	v->vsi_id = sc->vsi_res->vsi_id;
566	v->num_elements = cnt;
567
568	/* Scan the filter array */
569	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
570                if (f->flags & IXL_FILTER_ADD) {
571                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
572			f->flags = IXL_FILTER_USED;
573                        i++;
574                }
575                if (i == cnt)
576                        break;
577	}
578	if (i == 0) { /* Should not happen... */
579                device_printf(dev, "%s: i == 0?\n", __func__);
580                return;
581	}
582
583	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
584	free(v, M_DEVBUF);
585	/* add stats? */
586	sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
587	sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
588}
589
590/*
591** Scan the Filter Table looking for vlans that need
592** to be removed, then create the data to hand to the AQ
593** for handling.
594*/
595void
596ixlv_del_vlans(struct ixlv_sc *sc)
597{
598	device_t	dev = sc->dev;
599	struct i40e_virtchnl_vlan_filter_list *v;
600	struct ixlv_vlan_filter *f, *ftmp;
601	int len, i = 0, cnt = 0;
602
603	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
604		return;
605
606	sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
607
608	/* Get count of VLAN filters to delete */
609	SLIST_FOREACH(f, sc->vlan_filters, next) {
610		if (f->flags & IXL_FILTER_DEL)
611			cnt++;
612	}
613
614	if (!cnt) {  /* no work... */
615		sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
616		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
617		return;
618	}
619
620	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
621	      (cnt * sizeof(u16));
622
623	if (len > IXL_AQ_BUF_SZ) {
624		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
625			__func__);
626		return;
627	}
628
629	v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
630	if (!v) {
631		device_printf(dev, "%s: unable to allocate memory\n",
632			__func__);
633		return;
634	}
635
636	v->vsi_id = sc->vsi_res->vsi_id;
637	v->num_elements = cnt;
638
639	/* Scan the filter array */
640	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
641                if (f->flags & IXL_FILTER_DEL) {
642                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
643                        i++;
644                        SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
645                        free(f, M_DEVBUF);
646                }
647                if (i == cnt)
648                        break;
649	}
650	if (i == 0) { /* Should not happen... */
651                device_printf(dev, "%s: i == 0?\n", __func__);
652                return;
653	}
654
655	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
656	free(v, M_DEVBUF);
657	/* add stats? */
658	sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
659	sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
660}
661
662
663/*
664** This routine takes additions to the vsi filter
665** table and creates an Admin Queue call to create
666** the filters in the hardware.
667*/
668void
669ixlv_add_ether_filters(struct ixlv_sc *sc)
670{
671	struct i40e_virtchnl_ether_addr_list *a;
672	struct ixlv_mac_filter	*f;
673	device_t			dev = sc->dev;
674	int				len, j = 0, cnt = 0;
675
676	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
677		return;
678
679	sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
680
681	/* Get count of MAC addresses to add */
682	SLIST_FOREACH(f, sc->mac_filters, next) {
683		if (f->flags & IXL_FILTER_ADD)
684			cnt++;
685	}
686	if (cnt == 0) { /* Should not happen... */
687		DDPRINTF(dev, "cnt == 0, exiting...");
688		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
689		sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
690		wakeup(&sc->add_ether_done);
691		return;
692	}
693
694	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
695	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
696
697	a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
698	if (a == NULL) {
699		device_printf(dev, "%s: Failed to get memory for "
700		    "virtchnl_ether_addr_list\n", __func__);
701		return;
702	}
703	a->vsi_id = sc->vsi.id;
704	a->num_elements = cnt;
705
706	/* Scan the filter array */
707	SLIST_FOREACH(f, sc->mac_filters, next) {
708		if (f->flags & IXL_FILTER_ADD) {
709			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
710			f->flags &= ~IXL_FILTER_ADD;
711			j++;
712
713			DDPRINTF(dev, "ADD: " MAC_FORMAT,
714			    MAC_FORMAT_ARGS(f->macaddr));
715		}
716		if (j == cnt)
717			break;
718	}
719	DDPRINTF(dev, "len %d, j %d, cnt %d",
720	    len, j, cnt);
721	ixlv_send_pf_msg(sc,
722	    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
723	/* add stats? */
724	free(a, M_DEVBUF);
725	sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
726	sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
727	return;
728}
729
730/*
731** This routine takes filters flagged for deletion in the
732** sc MAC filter list and creates an Admin Queue call
733** to delete those filters in the hardware.
734*/
735void
736ixlv_del_ether_filters(struct ixlv_sc *sc)
737{
738	struct i40e_virtchnl_ether_addr_list *d;
739	device_t			dev = sc->dev;
740	struct ixlv_mac_filter	*f, *f_temp;
741	int				len, j = 0, cnt = 0;
742
743	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
744		return;
745
746	sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
747
748	/* Get count of MAC addresses to delete */
749	SLIST_FOREACH(f, sc->mac_filters, next) {
750		if (f->flags & IXL_FILTER_DEL)
751			cnt++;
752	}
753	if (cnt == 0) {
754		DDPRINTF(dev, "cnt == 0, exiting...");
755		sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
756		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
757		wakeup(&sc->del_ether_done);
758		return;
759	}
760
761	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
762	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
763
764	d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
765	if (d == NULL) {
766		device_printf(dev, "%s: Failed to get memory for "
767		    "virtchnl_ether_addr_list\n", __func__);
768		return;
769	}
770	d->vsi_id = sc->vsi.id;
771	d->num_elements = cnt;
772
773	/* Scan the filter array */
774	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
775		if (f->flags & IXL_FILTER_DEL) {
776			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
777			DDPRINTF(dev, "DEL: " MAC_FORMAT,
778			    MAC_FORMAT_ARGS(f->macaddr));
779			j++;
780			SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
781			free(f, M_DEVBUF);
782		}
783		if (j == cnt)
784			break;
785	}
786	ixlv_send_pf_msg(sc,
787	    I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
788	/* add stats? */
789	free(d, M_DEVBUF);
790	sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
791	sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
792	return;
793}
794
795/*
796** ixlv_request_reset
797** Request that the PF reset this VF. No response is expected.
798*/
799void
800ixlv_request_reset(struct ixlv_sc *sc)
801{
802	/*
803	** Set the reset status to "in progress" before
804	** the request, this avoids any possibility of
805	** a mistaken early detection of completion.
806	*/
807	wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
808	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
809	sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
810}
811
812/*
813** ixlv_request_stats
814** Request the statistics for this VF's VSI from PF.
815*/
816void
817ixlv_request_stats(struct ixlv_sc *sc)
818{
819	struct i40e_virtchnl_queue_select vqs;
820	int error = 0;
821
822	if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
823		return;
824
825	sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
826	vqs.vsi_id = sc->vsi_res->vsi_id;
827	error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
828	    (u8 *)&vqs, sizeof(vqs));
829	/* Low priority, ok if it fails */
830	if (error)
831		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
832}
833
834/*
835** Updates driver's stats counters with VSI stats returned from PF.
836*/
837void
838ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
839{
840	struct ifnet *ifp = sc->vsi.ifp;
841
842	ifp->if_ipackets = es->rx_unicast +
843	                   es->rx_multicast +
844			   es->rx_broadcast;
845	ifp->if_opackets = es->tx_unicast +
846	                   es->tx_multicast +
847			   es->tx_broadcast;
848	ifp->if_ibytes = es->rx_bytes;
849	ifp->if_obytes = es->tx_bytes;
850	ifp->if_imcasts = es->rx_multicast;
851	ifp->if_omcasts = es->tx_multicast;
852
853	ifp->if_oerrors = es->tx_errors;
854	ifp->if_iqdrops = es->rx_discards;
855	ifp->if_noproto = es->rx_unknown_protocol;
856
857	sc->vsi.eth_stats = *es;
858}
859
860/*
861** ixlv_vc_completion
862**
863** Asynchronous completion function for admin queue messages. Rather than busy
864** wait, we fire off our requests and assume that no errors will be returned.
865** This function handles the reply messages.
866*/
867void
868ixlv_vc_completion(struct ixlv_sc *sc,
869    enum i40e_virtchnl_ops v_opcode,
870    i40e_status v_retval, u8 *msg, u16 msglen)
871{
872	device_t	dev = sc->dev;
873	struct ixl_vsi	*vsi = &sc->vsi;
874
875	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
876		struct i40e_virtchnl_pf_event *vpe =
877			(struct i40e_virtchnl_pf_event *)msg;
878
879		switch (vpe->event) {
880		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
881			vsi->link_up =
882				vpe->event_data.link_event.link_status;
883			vsi->link_speed =
884				vpe->event_data.link_event.link_speed;
885			break;
886		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
887			device_printf(dev, "PF initiated reset!\n");
888			sc->init_state = IXLV_RESET_PENDING;
889			ixlv_init(sc);
890			break;
891		default:
892			device_printf(dev, "%s: Unknown event %d from AQ\n",
893				__func__, vpe->event);
894			break;
895		}
896
897		return;
898	}
899
900	if (v_opcode != sc->current_op
901	    && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
902		device_printf(dev, "%s: Pending op is %d, received %d.\n",
903			__func__, sc->current_op, v_opcode);
904		sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
905		return;
906	}
907
908	/* Catch-all error response */
909	if (v_retval) {
910		device_printf(dev,
911		    "%s: AQ returned error %d to our request %d!\n",
912		    __func__, v_retval, v_opcode);
913	}
914
915#ifdef IXL_DEBUG
916	if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
917		DDPRINTF(dev, "opcode %d", v_opcode);
918#endif
919
920	switch (v_opcode) {
921	case I40E_VIRTCHNL_OP_GET_STATS:
922		ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
923		break;
924	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
925		sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
926		if (v_retval) {
927			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
928			device_printf(dev, "WARNING: Device may not receive traffic!\n");
929		}
930		break;
931	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
932		sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
933		break;
934	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
935		sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
936		break;
937	case I40E_VIRTCHNL_OP_ADD_VLAN:
938		sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
939		break;
940	case I40E_VIRTCHNL_OP_DEL_VLAN:
941		sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
942		break;
943	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
944		sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
945		if (v_retval == 0) {
946			/* Turn on all interrupts */
947			ixlv_enable_intr(vsi);
948			/* And inform the stack we're ready */
949			vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
950			vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
951		}
952		break;
953	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
954		sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
955		if (v_retval == 0) {
956			/* Turn off all interrupts */
957			ixlv_disable_intr(vsi);
958			/* Tell the stack that the interface is no longer active */
959			vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
960		}
961		break;
962	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
963		sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
964		break;
965	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
966		sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
967		break;
968	default:
969		device_printf(dev,
970		    "%s: Received unexpected message %d from PF.\n",
971		    __func__, v_opcode);
972		break;
973	}
974	sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
975	return;
976}
977