1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35/*
36**	Virtual Channel support
37**		These are support functions to communication
38**		between the VF and PF drivers.
39*/
40
41#include "ixl.h"
42#include "iavf.h"
43
44/* busy wait delay in msec */
45#define IAVF_BUSY_WAIT_DELAY 10
46#define IAVF_BUSY_WAIT_COUNT 50
47
48/*
49** iavf_send_pf_msg
50**
51** Send message to PF and print status if failure.
52*/
53static int
54iavf_send_pf_msg(struct iavf_sc *sc,
55	enum virtchnl_ops op, u8 *msg, u16 len)
56{
57	struct i40e_hw *hw = &sc->hw;
58	device_t dev = sc->dev;
59	i40e_status status;
60	int val_err;
61
62	/* Validating message before sending it to the PF */
63	val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len);
64	if (val_err)
65		device_printf(dev, "Error validating msg to PF for op %d,"
66		    " msglen %d: error %d\n", op, len, val_err);
67
68	if (!i40e_check_asq_alive(hw)) {
69		if (op != VIRTCHNL_OP_GET_STATS)
70			device_printf(dev, "Unable to send opcode %s to PF, "
71			    "ASQ is not alive\n", ixl_vc_opcode_str(op));
72		return (0);
73	}
74
75	if (op != VIRTCHNL_OP_GET_STATS)
76		iavf_dbg_vc(sc,
77		    "Sending msg (op=%s[%d]) to PF\n",
78		    ixl_vc_opcode_str(op), op);
79
80	status = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
81	if (status && op != VIRTCHNL_OP_GET_STATS)
82		device_printf(dev, "Unable to send opcode %s to PF, "
83		    "status %s, aq error %s\n",
84		    ixl_vc_opcode_str(op),
85		    i40e_stat_str(hw, status),
86		    i40e_aq_str(hw, hw->aq.asq_last_status));
87
88	return (status);
89}
90
91/*
92** iavf_send_api_ver
93**
94** Send API version admin queue message to the PF. The reply is not checked
95** in this function. Returns 0 if the message was successfully
96** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
97*/
98int
99iavf_send_api_ver(struct iavf_sc *sc)
100{
101	struct virtchnl_version_info vvi;
102
103	vvi.major = VIRTCHNL_VERSION_MAJOR;
104	vvi.minor = VIRTCHNL_VERSION_MINOR;
105
106	return iavf_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
107	    (u8 *)&vvi, sizeof(vvi));
108}
109
110/*
111** iavf_verify_api_ver
112**
113** Compare API versions with the PF. Must be called after admin queue is
114** initialized. Returns 0 if API versions match, EIO if
115** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
116*/
117int
118iavf_verify_api_ver(struct iavf_sc *sc)
119{
120	struct virtchnl_version_info *pf_vvi;
121	struct i40e_hw *hw = &sc->hw;
122	struct i40e_arq_event_info event;
123	device_t dev = sc->dev;
124	i40e_status err;
125	int retries = 0;
126
127	event.buf_len = IXL_AQ_BUF_SZ;
128	event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK);
129
130	for (;;) {
131		if (++retries > IAVF_AQ_MAX_ERR)
132			goto out_alloc;
133
134		/* Initial delay here is necessary */
135		i40e_msec_pause(100);
136		err = i40e_clean_arq_element(hw, &event, NULL);
137		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
138			continue;
139		else if (err) {
140			err = EIO;
141			goto out_alloc;
142		}
143
144		if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
145		    VIRTCHNL_OP_VERSION) {
146			DDPRINTF(dev, "Received unexpected op response: %d\n",
147			    le32toh(event.desc.cookie_high));
148		    	/* Don't stop looking for expected response */
149			continue;
150		}
151
152		err = (i40e_status)le32toh(event.desc.cookie_low);
153		if (err) {
154			err = EIO;
155			goto out_alloc;
156		} else
157			break;
158	}
159
160	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
161	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
162	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
163	    (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
164		device_printf(dev, "Critical PF/VF API version mismatch!\n");
165		err = EIO;
166	} else {
167		sc->version.major = pf_vvi->major;
168		sc->version.minor = pf_vvi->minor;
169	}
170
171	/* Log PF/VF api versions */
172	device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
173	    pf_vvi->major, pf_vvi->minor,
174	    VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
175
176out_alloc:
177	free(event.msg_buf, M_IAVF);
178	return (err);
179}
180
181/*
182** iavf_send_vf_config_msg
183**
184** Send VF configuration request admin queue message to the PF. The reply
185** is not checked in this function. Returns 0 if the message was
186** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
187*/
188int
189iavf_send_vf_config_msg(struct iavf_sc *sc)
190{
191	u32	caps;
192
193	caps = VIRTCHNL_VF_OFFLOAD_L2 |
194	    VIRTCHNL_VF_OFFLOAD_RSS_PF |
195	    VIRTCHNL_VF_OFFLOAD_VLAN;
196
197	iavf_dbg_info(sc, "Sending offload flags: 0x%b\n",
198	    caps, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
199
200	if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
201		return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
202				  NULL, 0);
203	else
204		return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
205				  (u8 *)&caps, sizeof(caps));
206}
207
208/*
209** iavf_get_vf_config
210**
211** Get VF configuration from PF and populate hw structure. Must be called after
212** admin queue is initialized. Busy waits until response is received from PF,
213** with maximum timeout. Response from PF is returned in the buffer for further
214** processing by the caller.
215*/
216int
217iavf_get_vf_config(struct iavf_sc *sc)
218{
219	struct i40e_hw	*hw = &sc->hw;
220	device_t	dev = sc->dev;
221	struct i40e_arq_event_info event;
222	u16 len;
223	i40e_status err = 0;
224	u32 retries = 0;
225
226	/* Note this assumes a single VSI */
227	len = sizeof(struct virtchnl_vf_resource) +
228	    sizeof(struct virtchnl_vsi_resource);
229	event.buf_len = len;
230	event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK);
231
232	for (;;) {
233		err = i40e_clean_arq_element(hw, &event, NULL);
234		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
235			if (++retries <= IAVF_AQ_MAX_ERR)
236				i40e_msec_pause(10);
237		} else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
238		    VIRTCHNL_OP_GET_VF_RESOURCES) {
239			DDPRINTF(dev, "Received a response from PF,"
240			    " opcode %d, error %d",
241			    le32toh(event.desc.cookie_high),
242			    le32toh(event.desc.cookie_low));
243			retries++;
244			continue;
245		} else {
246			err = (i40e_status)le32toh(event.desc.cookie_low);
247			if (err) {
248				device_printf(dev, "%s: Error returned from PF,"
249				    " opcode %d, error %d\n", __func__,
250				    le32toh(event.desc.cookie_high),
251				    le32toh(event.desc.cookie_low));
252				err = EIO;
253				goto out_alloc;
254			}
255			/* We retrieved the config message, with no errors */
256			break;
257		}
258
259		if (retries > IAVF_AQ_MAX_ERR) {
260			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
261			    retries);
262			err = ETIMEDOUT;
263			goto out_alloc;
264		}
265	}
266
267	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
268	i40e_vf_parse_hw_config(hw, sc->vf_res);
269
270out_alloc:
271	free(event.msg_buf, M_IAVF);
272	return err;
273}
274
275/*
276** iavf_configure_queues
277**
278** Request that the PF set up our queues.
279*/
280int
281iavf_configure_queues(struct iavf_sc *sc)
282{
283	device_t		dev = sc->dev;
284	struct ixl_vsi		*vsi = &sc->vsi;
285	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
286	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
287	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
288	struct tx_ring		*txr;
289	struct rx_ring		*rxr;
290	int			len, pairs;
291
292	struct virtchnl_vsi_queue_config_info *vqci;
293	struct virtchnl_queue_pair_info *vqpi;
294
295	/* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
296	 * queues of a pair need to be configured */
297	pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
298	len = sizeof(struct virtchnl_vsi_queue_config_info) +
299		       (sizeof(struct virtchnl_queue_pair_info) * pairs);
300	vqci = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
301	if (!vqci) {
302		device_printf(dev, "%s: unable to allocate memory\n", __func__);
303		return (ENOMEM);
304	}
305	vqci->vsi_id = sc->vsi_res->vsi_id;
306	vqci->num_queue_pairs = pairs;
307	vqpi = vqci->qpair;
308	/* Size check is not needed here - HW max is 16 queue pairs, and we
309	 * can fit info for 31 of them into the AQ buffer before it overflows.
310	 */
311	// TODO: the above is wrong now; X722 VFs can have 256 queues
312	for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
313		txr = &tx_que->txr;
314		rxr = &rx_que->rxr;
315
316		vqpi->txq.vsi_id = vqci->vsi_id;
317		vqpi->txq.queue_id = i;
318		vqpi->txq.ring_len = scctx->isc_ntxd[0];
319		vqpi->txq.dma_ring_addr = txr->tx_paddr;
320		/* Enable Head writeback */
321		if (!vsi->enable_head_writeback) {
322			vqpi->txq.headwb_enabled = 0;
323			vqpi->txq.dma_headwb_addr = 0;
324		} else {
325			vqpi->txq.headwb_enabled = 1;
326			vqpi->txq.dma_headwb_addr = txr->tx_paddr +
327			    sizeof(struct i40e_tx_desc) * scctx->isc_ntxd[0];
328		}
329
330		vqpi->rxq.vsi_id = vqci->vsi_id;
331		vqpi->rxq.queue_id = i;
332		vqpi->rxq.ring_len = scctx->isc_nrxd[0];
333		vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
334		vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
335		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
336		vqpi->rxq.splithdr_enabled = 0;
337	}
338
339	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
340			   (u8 *)vqci, len);
341	free(vqci, M_IAVF);
342
343	return (0);
344}
345
346/*
347** iavf_enable_queues
348**
349** Request that the PF enable all of our queues.
350*/
351int
352iavf_enable_queues(struct iavf_sc *sc)
353{
354	struct virtchnl_queue_select vqs;
355
356	vqs.vsi_id = sc->vsi_res->vsi_id;
357	/* XXX: In Linux PF, as long as neither of these is 0,
358	 * every queue in VF VSI is enabled. */
359	vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1;
360	vqs.rx_queues = vqs.tx_queues;
361	iavf_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
362			   (u8 *)&vqs, sizeof(vqs));
363	return (0);
364}
365
366/*
367** iavf_disable_queues
368**
369** Request that the PF disable all of our queues.
370*/
371int
372iavf_disable_queues(struct iavf_sc *sc)
373{
374	struct virtchnl_queue_select vqs;
375
376	vqs.vsi_id = sc->vsi_res->vsi_id;
377	/* XXX: In Linux PF, as long as neither of these is 0,
378	 * every queue in VF VSI is disabled. */
379	vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1;
380	vqs.rx_queues = vqs.tx_queues;
381	iavf_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
382			   (u8 *)&vqs, sizeof(vqs));
383	return (0);
384}
385
386/*
387** iavf_map_queues
388**
389** Request that the PF map queues to interrupt vectors. Misc causes, including
390** admin queue, are always mapped to vector 0.
391*/
392int
393iavf_map_queues(struct iavf_sc *sc)
394{
395	struct virtchnl_irq_map_info *vm;
396	int 			i, q, len;
397	struct ixl_vsi		*vsi = &sc->vsi;
398	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
399	if_softc_ctx_t		scctx = vsi->shared;
400	device_t		dev = sc->dev;
401
402	// XXX: What happens if we only get 1 MSI-X vector?
403	MPASS(scctx->isc_vectors > 1);
404
405	/* How many queue vectors, adminq uses one */
406	// XXX: How do we know how many interrupt vectors we have?
407	q = scctx->isc_vectors - 1;
408
409	len = sizeof(struct virtchnl_irq_map_info) +
410	      (scctx->isc_vectors * sizeof(struct virtchnl_vector_map));
411	vm = malloc(len, M_IAVF, M_NOWAIT);
412	if (!vm) {
413		device_printf(dev, "%s: unable to allocate memory\n", __func__);
414		return (ENOMEM);
415	}
416
417	vm->num_vectors = scctx->isc_vectors;
418	/* Queue vectors first */
419	for (i = 0; i < q; i++, rx_que++) {
420		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
421		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
422		// TODO: Re-examine this
423		vm->vecmap[i].txq_map = (1 << rx_que->rxr.me);
424		vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me);
425		vm->vecmap[i].rxitr_idx = 0;
426		vm->vecmap[i].txitr_idx = 1;
427	}
428
429	/* Misc vector last - this is only for AdminQ messages */
430	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
431	vm->vecmap[i].vector_id = 0;
432	vm->vecmap[i].txq_map = 0;
433	vm->vecmap[i].rxq_map = 0;
434	vm->vecmap[i].rxitr_idx = 0;
435	vm->vecmap[i].txitr_idx = 0;
436
437	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
438	    (u8 *)vm, len);
439	free(vm, M_IAVF);
440
441	return (0);
442}
443
444/*
445** Scan the Filter List looking for vlans that need
446** to be added, then create the data to hand to the AQ
447** for handling.
448*/
449int
450iavf_add_vlans(struct iavf_sc *sc)
451{
452	struct virtchnl_vlan_filter_list *v;
453	struct iavf_vlan_filter *f, *ftmp;
454	device_t	dev = sc->dev;
455	int		len, i = 0, cnt = 0;
456
457	/* Get count of VLAN filters to add */
458	SLIST_FOREACH(f, sc->vlan_filters, next) {
459		if (f->flags & IAVF_FILTER_ADD)
460			cnt++;
461	}
462
463	if (!cnt) /* no work... */
464		return (ENOENT);
465
466	len = sizeof(struct virtchnl_vlan_filter_list) +
467	      (cnt * sizeof(u16));
468
469	if (len > IXL_AQ_BUF_SZ) {
470		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
471			__func__);
472		return (EFBIG);
473	}
474
475	v = malloc(len, M_IAVF, M_NOWAIT);
476	if (!v) {
477		device_printf(dev, "%s: unable to allocate memory\n",
478			__func__);
479		return (ENOMEM);
480	}
481
482	v->vsi_id = sc->vsi_res->vsi_id;
483	v->num_elements = cnt;
484
485	/* Scan the filter array */
486	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
487                if (f->flags & IAVF_FILTER_ADD) {
488                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
489			f->flags = IAVF_FILTER_USED;
490                        i++;
491                }
492                if (i == cnt)
493                        break;
494	}
495
496	iavf_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
497	free(v, M_IAVF);
498	/* add stats? */
499	return (0);
500}
501
502/*
503** Scan the Filter Table looking for vlans that need
504** to be removed, then create the data to hand to the AQ
505** for handling.
506*/
507int
508iavf_del_vlans(struct iavf_sc *sc)
509{
510	struct virtchnl_vlan_filter_list *v;
511	struct iavf_vlan_filter *f, *ftmp;
512	device_t dev = sc->dev;
513	int len, i = 0, cnt = 0;
514
515	/* Get count of VLAN filters to delete */
516	SLIST_FOREACH(f, sc->vlan_filters, next) {
517		if (f->flags & IAVF_FILTER_DEL)
518			cnt++;
519	}
520
521	if (!cnt) /* no work... */
522		return (ENOENT);
523
524	len = sizeof(struct virtchnl_vlan_filter_list) +
525	      (cnt * sizeof(u16));
526
527	if (len > IXL_AQ_BUF_SZ) {
528		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
529			__func__);
530		return (EFBIG);
531	}
532
533	v = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
534	if (!v) {
535		device_printf(dev, "%s: unable to allocate memory\n",
536			__func__);
537		return (ENOMEM);
538	}
539
540	v->vsi_id = sc->vsi_res->vsi_id;
541	v->num_elements = cnt;
542
543	/* Scan the filter array */
544	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
545                if (f->flags & IAVF_FILTER_DEL) {
546                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
547                        i++;
548                        SLIST_REMOVE(sc->vlan_filters, f, iavf_vlan_filter, next);
549                        free(f, M_IAVF);
550                }
551                if (i == cnt)
552                        break;
553	}
554
555	iavf_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
556	free(v, M_IAVF);
557	/* add stats? */
558	return (0);
559}
560
561
562/*
563** This routine takes additions to the vsi filter
564** table and creates an Admin Queue call to create
565** the filters in the hardware.
566*/
567int
568iavf_add_ether_filters(struct iavf_sc *sc)
569{
570	struct virtchnl_ether_addr_list *a;
571	struct iavf_mac_filter	*f;
572	device_t dev = sc->dev;
573	int len, j = 0, cnt = 0;
574	enum i40e_status_code status;
575
576	/* Get count of MAC addresses to add */
577	SLIST_FOREACH(f, sc->mac_filters, next) {
578		if (f->flags & IAVF_FILTER_ADD)
579			cnt++;
580	}
581	if (cnt == 0) { /* Should not happen... */
582		iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
583		return (ENOENT);
584	}
585
586	len = sizeof(struct virtchnl_ether_addr_list) +
587	    (cnt * sizeof(struct virtchnl_ether_addr));
588
589	a = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
590	if (a == NULL) {
591		device_printf(dev, "%s: Failed to get memory for "
592		    "virtchnl_ether_addr_list\n", __func__);
593		return (ENOMEM);
594	}
595	a->vsi_id = sc->vsi.id;
596	a->num_elements = cnt;
597
598	/* Scan the filter array */
599	SLIST_FOREACH(f, sc->mac_filters, next) {
600		if (f->flags & IAVF_FILTER_ADD) {
601			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
602			f->flags &= ~IAVF_FILTER_ADD;
603			j++;
604
605			iavf_dbg_vc(sc, "ADD: " MAC_FORMAT "\n",
606			    MAC_FORMAT_ARGS(f->macaddr));
607		}
608		if (j == cnt)
609			break;
610	}
611	DDPRINTF(dev, "len %d, j %d, cnt %d",
612	    len, j, cnt);
613
614	status = iavf_send_pf_msg(sc,
615	    VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
616	/* add stats? */
617	free(a, M_IAVF);
618	return (status);
619}
620
621/*
622** This routine takes filters flagged for deletion in the
623** sc MAC filter list and creates an Admin Queue call
624** to delete those filters in the hardware.
625*/
626int
627iavf_del_ether_filters(struct iavf_sc *sc)
628{
629	struct virtchnl_ether_addr_list *d;
630	struct iavf_mac_filter *f, *f_temp;
631	device_t dev = sc->dev;
632	int len, j = 0, cnt = 0;
633
634	/* Get count of MAC addresses to delete */
635	SLIST_FOREACH(f, sc->mac_filters, next) {
636		if (f->flags & IAVF_FILTER_DEL)
637			cnt++;
638	}
639	if (cnt == 0) {
640		iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
641		return (ENOENT);
642	}
643
644	len = sizeof(struct virtchnl_ether_addr_list) +
645	    (cnt * sizeof(struct virtchnl_ether_addr));
646
647	d = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
648	if (d == NULL) {
649		device_printf(dev, "%s: Failed to get memory for "
650		    "virtchnl_ether_addr_list\n", __func__);
651		return (ENOMEM);
652	}
653	d->vsi_id = sc->vsi.id;
654	d->num_elements = cnt;
655
656	/* Scan the filter array */
657	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
658		if (f->flags & IAVF_FILTER_DEL) {
659			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
660			iavf_dbg_vc(sc, "DEL: " MAC_FORMAT "\n",
661			    MAC_FORMAT_ARGS(f->macaddr));
662			j++;
663			SLIST_REMOVE(sc->mac_filters, f, iavf_mac_filter, next);
664			free(f, M_IAVF);
665		}
666		if (j == cnt)
667			break;
668	}
669	iavf_send_pf_msg(sc,
670	    VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
671	/* add stats? */
672	free(d, M_IAVF);
673	return (0);
674}
675
676/*
677** iavf_request_reset
678** Request that the PF reset this VF. No response is expected.
679*/
680int
681iavf_request_reset(struct iavf_sc *sc)
682{
683	/*
684	** Set the reset status to "in progress" before
685	** the request, this avoids any possibility of
686	** a mistaken early detection of completion.
687	*/
688	wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
689	iavf_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
690	return (0);
691}
692
693/*
694** iavf_request_stats
695** Request the statistics for this VF's VSI from PF.
696*/
697int
698iavf_request_stats(struct iavf_sc *sc)
699{
700	struct virtchnl_queue_select vqs;
701	int error = 0;
702
703	vqs.vsi_id = sc->vsi_res->vsi_id;
704	/* Low priority, we don't need to error check */
705	error = iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
706	    (u8 *)&vqs, sizeof(vqs));
707	if (error)
708		device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
709
710	return (0);
711}
712
713/*
714** Updates driver's stats counters with VSI stats returned from PF.
715*/
716void
717iavf_update_stats_counters(struct iavf_sc *sc, struct i40e_eth_stats *es)
718{
719	struct ixl_vsi *vsi = &sc->vsi;
720	uint64_t tx_discards;
721
722	tx_discards = es->tx_discards;
723
724	/* Update ifnet stats */
725	IXL_SET_IPACKETS(vsi, es->rx_unicast +
726	                   es->rx_multicast +
727			   es->rx_broadcast);
728	IXL_SET_OPACKETS(vsi, es->tx_unicast +
729	                   es->tx_multicast +
730			   es->tx_broadcast);
731	IXL_SET_IBYTES(vsi, es->rx_bytes);
732	IXL_SET_OBYTES(vsi, es->tx_bytes);
733	IXL_SET_IMCASTS(vsi, es->rx_multicast);
734	IXL_SET_OMCASTS(vsi, es->tx_multicast);
735
736	IXL_SET_OERRORS(vsi, es->tx_errors);
737	IXL_SET_IQDROPS(vsi, es->rx_discards);
738	IXL_SET_OQDROPS(vsi, tx_discards);
739	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
740	IXL_SET_COLLISIONS(vsi, 0);
741
742	vsi->eth_stats = *es;
743}
744
745int
746iavf_config_rss_key(struct iavf_sc *sc)
747{
748	struct virtchnl_rss_key *rss_key_msg;
749	int msg_len, key_length;
750	u8		rss_seed[IXL_RSS_KEY_SIZE];
751
752#ifdef RSS
753	/* Fetch the configured RSS key */
754	rss_getkey((uint8_t *) &rss_seed);
755#else
756	ixl_get_default_rss_key((u32 *)rss_seed);
757#endif
758
759	/* Send the fetched key */
760	key_length = IXL_RSS_KEY_SIZE;
761	msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
762	rss_key_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
763	if (rss_key_msg == NULL) {
764		device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
765		return (ENOMEM);
766	}
767
768	rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
769	rss_key_msg->key_len = key_length;
770	bcopy(rss_seed, &rss_key_msg->key[0], key_length);
771
772	iavf_dbg_vc(sc, "config_rss: vsi_id %d, key_len %d\n",
773	    rss_key_msg->vsi_id, rss_key_msg->key_len);
774
775	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
776			  (u8 *)rss_key_msg, msg_len);
777
778	free(rss_key_msg, M_IAVF);
779	return (0);
780}
781
782int
783iavf_set_rss_hena(struct iavf_sc *sc)
784{
785	struct virtchnl_rss_hena hena;
786	struct i40e_hw *hw = &sc->hw;
787
788	if (hw->mac.type == I40E_MAC_X722_VF)
789		hena.hena = IXL_DEFAULT_RSS_HENA_X722;
790	else
791		hena.hena = IXL_DEFAULT_RSS_HENA_XL710;
792
793	iavf_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
794			  (u8 *)&hena, sizeof(hena));
795	return (0);
796}
797
798int
799iavf_config_rss_lut(struct iavf_sc *sc)
800{
801	struct virtchnl_rss_lut *rss_lut_msg;
802	int msg_len;
803	u16 lut_length;
804	u32 lut;
805	int i, que_id;
806
807	lut_length = IXL_RSS_VSI_LUT_SIZE;
808	msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
809	rss_lut_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
810	if (rss_lut_msg == NULL) {
811		device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
812		return (ENOMEM);
813	}
814
815	rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
816	/* Each LUT entry is a max of 1 byte, so this is easy */
817	rss_lut_msg->lut_entries = lut_length;
818
819	/* Populate the LUT with max no. of queues in round robin fashion */
820	for (i = 0; i < lut_length; i++) {
821#ifdef RSS
822		/*
823		 * Fetch the RSS bucket id for the given indirection entry.
824		 * Cap it at the number of configured buckets (which is
825		 * num_rx_queues.)
826		 */
827		que_id = rss_get_indirection_to_bucket(i);
828		que_id = que_id % sc->vsi.num_rx_queues;
829#else
830		que_id = i % sc->vsi.num_rx_queues;
831#endif
832		lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
833		rss_lut_msg->lut[i] = lut;
834	}
835
836	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
837			  (u8 *)rss_lut_msg, msg_len);
838
839	free(rss_lut_msg, M_IAVF);
840	return (0);
841}
842
843int
844iavf_config_promisc_mode(struct iavf_sc *sc)
845{
846	struct virtchnl_promisc_info pinfo;
847
848	pinfo.vsi_id = sc->vsi_res->vsi_id;
849	pinfo.flags = sc->promisc_flags;
850
851	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
852	    (u8 *)&pinfo, sizeof(pinfo));
853	return (0);
854}
855
856/*
857** iavf_vc_completion
858**
859** Asynchronous completion function for admin queue messages. Rather than busy
860** wait, we fire off our requests and assume that no errors will be returned.
861** This function handles the reply messages.
862*/
863void
864iavf_vc_completion(struct iavf_sc *sc,
865    enum virtchnl_ops v_opcode,
866    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
867{
868	device_t	dev = sc->dev;
869
870	if (v_opcode != VIRTCHNL_OP_GET_STATS)
871		iavf_dbg_vc(sc, "%s: opcode %s\n", __func__,
872		    ixl_vc_opcode_str(v_opcode));
873
874	if (v_opcode == VIRTCHNL_OP_EVENT) {
875		struct virtchnl_pf_event *vpe =
876			(struct virtchnl_pf_event *)msg;
877
878		switch (vpe->event) {
879		case VIRTCHNL_EVENT_LINK_CHANGE:
880			iavf_dbg_vc(sc, "Link change: status %d, speed %s\n",
881			    vpe->event_data.link_event.link_status,
882			    iavf_vc_speed_to_string(vpe->event_data.link_event.link_speed));
883			sc->link_up =
884				vpe->event_data.link_event.link_status;
885			sc->link_speed =
886				vpe->event_data.link_event.link_speed;
887			iavf_update_link_status(sc);
888			break;
889		case VIRTCHNL_EVENT_RESET_IMPENDING:
890			device_printf(dev, "PF initiated reset!\n");
891			sc->init_state = IAVF_RESET_PENDING;
892			iavf_if_init(sc->vsi.ctx);
893			break;
894		default:
895			iavf_dbg_vc(sc, "Unknown event %d from AQ\n",
896				vpe->event);
897			break;
898		}
899
900		return;
901	}
902
903	/* Catch-all error response */
904	if (v_retval) {
905		device_printf(dev,
906		    "%s: AQ returned error %s to our request %s!\n",
907		    __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
908	}
909
910	switch (v_opcode) {
911	case VIRTCHNL_OP_GET_STATS:
912		iavf_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
913		break;
914	case VIRTCHNL_OP_ADD_ETH_ADDR:
915		if (v_retval) {
916			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
917			device_printf(dev, "WARNING: Device may not receive traffic!\n");
918		}
919		break;
920	case VIRTCHNL_OP_DEL_ETH_ADDR:
921		break;
922	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
923		break;
924	case VIRTCHNL_OP_ADD_VLAN:
925		break;
926	case VIRTCHNL_OP_DEL_VLAN:
927		break;
928	case VIRTCHNL_OP_ENABLE_QUEUES:
929		atomic_store_rel_32(&sc->queues_enabled, 1);
930		wakeup_one(&sc->enable_queues_chan);
931		break;
932	case VIRTCHNL_OP_DISABLE_QUEUES:
933		atomic_store_rel_32(&sc->queues_enabled, 0);
934		wakeup_one(&sc->disable_queues_chan);
935		break;
936	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
937		break;
938	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
939		break;
940	case VIRTCHNL_OP_CONFIG_RSS_KEY:
941		break;
942	case VIRTCHNL_OP_SET_RSS_HENA:
943		break;
944	case VIRTCHNL_OP_CONFIG_RSS_LUT:
945		break;
946	default:
947		iavf_dbg_vc(sc,
948		    "Received unexpected message %s from PF.\n",
949		    ixl_vc_opcode_str(v_opcode));
950		break;
951	}
952}
953
954int
955ixl_vc_send_cmd(struct iavf_sc *sc, uint32_t request)
956{
957
958	switch (request) {
959	case IAVF_FLAG_AQ_MAP_VECTORS:
960		return iavf_map_queues(sc);
961
962	case IAVF_FLAG_AQ_ADD_MAC_FILTER:
963		return iavf_add_ether_filters(sc);
964
965	case IAVF_FLAG_AQ_ADD_VLAN_FILTER:
966		return iavf_add_vlans(sc);
967
968	case IAVF_FLAG_AQ_DEL_MAC_FILTER:
969		return iavf_del_ether_filters(sc);
970
971	case IAVF_FLAG_AQ_DEL_VLAN_FILTER:
972		return iavf_del_vlans(sc);
973
974	case IAVF_FLAG_AQ_CONFIGURE_QUEUES:
975		return iavf_configure_queues(sc);
976
977	case IAVF_FLAG_AQ_DISABLE_QUEUES:
978		return iavf_disable_queues(sc);
979
980	case IAVF_FLAG_AQ_ENABLE_QUEUES:
981		return iavf_enable_queues(sc);
982
983	case IAVF_FLAG_AQ_CONFIG_RSS_KEY:
984		return iavf_config_rss_key(sc);
985
986	case IAVF_FLAG_AQ_SET_RSS_HENA:
987		return iavf_set_rss_hena(sc);
988
989	case IAVF_FLAG_AQ_CONFIG_RSS_LUT:
990		return iavf_config_rss_lut(sc);
991
992	case IAVF_FLAG_AQ_CONFIGURE_PROMISC:
993		return iavf_config_promisc_mode(sc);
994	}
995
996	return (0);
997}
998
999void *
1000ixl_vc_get_op_chan(struct iavf_sc *sc, uint32_t request)
1001{
1002	switch (request) {
1003	case IAVF_FLAG_AQ_ENABLE_QUEUES:
1004		return (&sc->enable_queues_chan);
1005	case IAVF_FLAG_AQ_DISABLE_QUEUES:
1006		return (&sc->disable_queues_chan);
1007	default:
1008		return (NULL);
1009	}
1010}
1011