1270631Sjfv/******************************************************************************
2270631Sjfv
3292095Ssmh  Copyright (c) 2013-2015, Intel Corporation
4270631Sjfv  All rights reserved.
5270631Sjfv
6270631Sjfv  Redistribution and use in source and binary forms, with or without
7270631Sjfv  modification, are permitted provided that the following conditions are met:
8270631Sjfv
9270631Sjfv   1. Redistributions of source code must retain the above copyright notice,
10270631Sjfv      this list of conditions and the following disclaimer.
11270631Sjfv
12270631Sjfv   2. Redistributions in binary form must reproduce the above copyright
13270631Sjfv      notice, this list of conditions and the following disclaimer in the
14270631Sjfv      documentation and/or other materials provided with the distribution.
15270631Sjfv
16270631Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17270631Sjfv      contributors may be used to endorse or promote products derived from
18270631Sjfv      this software without specific prior written permission.
19270631Sjfv
20270631Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21270631Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22270631Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23270631Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24270631Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25270631Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26270631Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27270631Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28270631Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29270631Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30270631Sjfv  POSSIBILITY OF SUCH DAMAGE.
31270631Sjfv
32270631Sjfv******************************************************************************/
33270631Sjfv/*$FreeBSD$*/
34270631Sjfv
35270631Sjfv/*
36270631Sjfv**	Virtual Channel support
37270631Sjfv**		These are support functions to communication
38270631Sjfv**		between the VF and PF drivers.
39270631Sjfv*/
40270631Sjfv
41270631Sjfv#include "ixl.h"
42270631Sjfv#include "ixlv.h"
43270631Sjfv#include "i40e_prototype.h"
44270631Sjfv
45270631Sjfv
46270631Sjfv/* busy wait delay in msec */
47270631Sjfv#define IXLV_BUSY_WAIT_DELAY 10
48270631Sjfv#define IXLV_BUSY_WAIT_COUNT 50
49270631Sjfv
50274360Sjfvstatic void	ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51274360Sjfv		    enum i40e_status_code);
52274360Sjfvstatic void	ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53274360Sjfvstatic void	ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54274360Sjfvstatic void	ixl_vc_send_current(struct ixl_vc_mgr *mgr);
55274360Sjfv
56274360Sjfv#ifdef IXL_DEBUG
57270631Sjfv/*
58270631Sjfv** Validate VF messages
59270631Sjfv*/
60270631Sjfvstatic int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
61270631Sjfv    u8 *msg, u16 msglen)
62270631Sjfv{
63270631Sjfv	bool err_msg_format = false;
64270631Sjfv	int valid_len;
65270631Sjfv
66270631Sjfv	/* Validate message length. */
67270631Sjfv	switch (v_opcode) {
68270631Sjfv	case I40E_VIRTCHNL_OP_VERSION:
69270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_version_info);
70270631Sjfv		break;
71270631Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
72270631Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
73270631Sjfv		valid_len = 0;
74270631Sjfv		break;
75270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
76270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_txq_info);
77270631Sjfv		break;
78270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
79270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
80270631Sjfv		break;
81270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
82270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
83270631Sjfv		if (msglen >= valid_len) {
84270631Sjfv			struct i40e_virtchnl_vsi_queue_config_info *vqc =
85270631Sjfv			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
86270631Sjfv			valid_len += (vqc->num_queue_pairs *
87270631Sjfv				      sizeof(struct
88270631Sjfv					     i40e_virtchnl_queue_pair_info));
89270631Sjfv			if (vqc->num_queue_pairs == 0)
90270631Sjfv				err_msg_format = true;
91270631Sjfv		}
92270631Sjfv		break;
93270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
94270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
95270631Sjfv		if (msglen >= valid_len) {
96270631Sjfv			struct i40e_virtchnl_irq_map_info *vimi =
97270631Sjfv			    (struct i40e_virtchnl_irq_map_info *)msg;
98270631Sjfv			valid_len += (vimi->num_vectors *
99270631Sjfv				      sizeof(struct i40e_virtchnl_vector_map));
100270631Sjfv			if (vimi->num_vectors == 0)
101270631Sjfv				err_msg_format = true;
102270631Sjfv		}
103270631Sjfv		break;
104270631Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
105270631Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
106270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_queue_select);
107270631Sjfv		break;
108270631Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
109270631Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
110270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
111270631Sjfv		if (msglen >= valid_len) {
112270631Sjfv			struct i40e_virtchnl_ether_addr_list *veal =
113270631Sjfv			    (struct i40e_virtchnl_ether_addr_list *)msg;
114270631Sjfv			valid_len += veal->num_elements *
115270631Sjfv			    sizeof(struct i40e_virtchnl_ether_addr);
116270631Sjfv			if (veal->num_elements == 0)
117270631Sjfv				err_msg_format = true;
118270631Sjfv		}
119270631Sjfv		break;
120270631Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
121270631Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
122270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
123270631Sjfv		if (msglen >= valid_len) {
124270631Sjfv			struct i40e_virtchnl_vlan_filter_list *vfl =
125270631Sjfv			    (struct i40e_virtchnl_vlan_filter_list *)msg;
126270631Sjfv			valid_len += vfl->num_elements * sizeof(u16);
127270631Sjfv			if (vfl->num_elements == 0)
128270631Sjfv				err_msg_format = true;
129270631Sjfv		}
130270631Sjfv		break;
131270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
132270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
133270631Sjfv		break;
134270631Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
135270631Sjfv		valid_len = sizeof(struct i40e_virtchnl_queue_select);
136270631Sjfv		break;
137270631Sjfv	/* These are always errors coming from the VF. */
138270631Sjfv	case I40E_VIRTCHNL_OP_EVENT:
139270631Sjfv	case I40E_VIRTCHNL_OP_UNKNOWN:
140270631Sjfv	default:
141270631Sjfv		return EPERM;
142270631Sjfv		break;
143270631Sjfv	}
144270631Sjfv	/* few more checks */
145270631Sjfv	if ((valid_len != msglen) || (err_msg_format))
146270631Sjfv		return EINVAL;
147270631Sjfv	else
148270631Sjfv		return 0;
149270631Sjfv}
150274360Sjfv#endif
151270631Sjfv
152270631Sjfv/*
153270631Sjfv** ixlv_send_pf_msg
154270631Sjfv**
155270631Sjfv** Send message to PF and print status if failure.
156270631Sjfv*/
157270631Sjfvstatic int
158270631Sjfvixlv_send_pf_msg(struct ixlv_sc *sc,
159270631Sjfv	enum i40e_virtchnl_ops op, u8 *msg, u16 len)
160270631Sjfv{
161270631Sjfv	struct i40e_hw	*hw = &sc->hw;
162270631Sjfv	device_t	dev = sc->dev;
163270631Sjfv	i40e_status	err;
164270631Sjfv
165274360Sjfv#ifdef IXL_DEBUG
166270631Sjfv	/*
167274360Sjfv	** Pre-validating messages to the PF
168270631Sjfv	*/
169274360Sjfv	int val_err;
170270631Sjfv	val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
171270631Sjfv	if (val_err)
172270631Sjfv		device_printf(dev, "Error validating msg to PF for op %d,"
173270631Sjfv		    " msglen %d: error %d\n", op, len, val_err);
174274360Sjfv#endif
175270631Sjfv
176270631Sjfv	err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
177270631Sjfv	if (err)
178270631Sjfv		device_printf(dev, "Unable to send opcode %d to PF, "
179270631Sjfv		    "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
180270631Sjfv	return err;
181270631Sjfv}
182270631Sjfv
183270631Sjfv
184270631Sjfv/*
185270631Sjfv** ixlv_send_api_ver
186270631Sjfv**
187270631Sjfv** Send API version admin queue message to the PF. The reply is not checked
188270631Sjfv** in this function. Returns 0 if the message was successfully
189270631Sjfv** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
190270631Sjfv*/
191270631Sjfvint
192270631Sjfvixlv_send_api_ver(struct ixlv_sc *sc)
193270631Sjfv{
194270631Sjfv	struct i40e_virtchnl_version_info vvi;
195270631Sjfv
196270631Sjfv	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
197270631Sjfv	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
198270631Sjfv
199270631Sjfv	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
200270631Sjfv	    (u8 *)&vvi, sizeof(vvi));
201270631Sjfv}
202270631Sjfv
203270631Sjfv/*
204270631Sjfv** ixlv_verify_api_ver
205270631Sjfv**
206270631Sjfv** Compare API versions with the PF. Must be called after admin queue is
207270631Sjfv** initialized. Returns 0 if API versions match, EIO if
208270631Sjfv** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
209270631Sjfv*/
210274360Sjfvint
211274360Sjfvixlv_verify_api_ver(struct ixlv_sc *sc)
212270631Sjfv{
213270631Sjfv	struct i40e_virtchnl_version_info *pf_vvi;
214270631Sjfv	struct i40e_hw *hw = &sc->hw;
215270631Sjfv	struct i40e_arq_event_info event;
216270631Sjfv	i40e_status err;
217270631Sjfv	int retries = 0;
218270631Sjfv
219270631Sjfv	event.buf_len = IXL_AQ_BUFSZ;
220270631Sjfv	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
221270631Sjfv	if (!event.msg_buf) {
222270631Sjfv		err = ENOMEM;
223270631Sjfv		goto out;
224270631Sjfv	}
225270631Sjfv
226270631Sjfv	do {
227270631Sjfv		if (++retries > IXLV_AQ_MAX_ERR)
228270631Sjfv			goto out_alloc;
229270631Sjfv
230270631Sjfv		/* NOTE: initial delay is necessary */
231270631Sjfv		i40e_msec_delay(100);
232270631Sjfv		err = i40e_clean_arq_element(hw, &event, NULL);
233270631Sjfv	} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
234270631Sjfv	if (err)
235270631Sjfv		goto out_alloc;
236270631Sjfv
237270631Sjfv	err = (i40e_status)le32toh(event.desc.cookie_low);
238270631Sjfv	if (err) {
239270631Sjfv		err = EIO;
240270631Sjfv		goto out_alloc;
241270631Sjfv	}
242270631Sjfv
243270631Sjfv	if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
244270631Sjfv	    I40E_VIRTCHNL_OP_VERSION) {
245274360Sjfv		DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
246274360Sjfv		    le32toh(event.desc.cookie_high));
247270631Sjfv		err = EIO;
248270631Sjfv		goto out_alloc;
249270631Sjfv	}
250270631Sjfv
251270631Sjfv	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
252292100Ssmh	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
253292100Ssmh	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
254292100Ssmh	    (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
255270631Sjfv		err = EIO;
256292100Ssmh	else
257292100Ssmh		sc->pf_version = pf_vvi->minor;
258270631Sjfv
259270631Sjfvout_alloc:
260270631Sjfv	free(event.msg_buf, M_DEVBUF);
261270631Sjfvout:
262270631Sjfv	return err;
263270631Sjfv}
264270631Sjfv
265270631Sjfv/*
266270631Sjfv** ixlv_send_vf_config_msg
267270631Sjfv**
268270631Sjfv** Send VF configuration request admin queue message to the PF. The reply
269270631Sjfv** is not checked in this function. Returns 0 if the message was
270270631Sjfv** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
271270631Sjfv*/
272270631Sjfvint
273270631Sjfvixlv_send_vf_config_msg(struct ixlv_sc *sc)
274270631Sjfv{
275292100Ssmh	u32	caps;
276292100Ssmh
277292100Ssmh	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
278292100Ssmh	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
279292100Ssmh	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
280292100Ssmh	    I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
281292100Ssmh
282292100Ssmh	if (sc->pf_version)
283292100Ssmh		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
284292100Ssmh				  (u8 *)&caps, sizeof(caps));
285292100Ssmh	else
286292100Ssmh		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
287270631Sjfv				  NULL, 0);
288270631Sjfv}
289270631Sjfv
290270631Sjfv/*
291270631Sjfv** ixlv_get_vf_config
292270631Sjfv**
293270631Sjfv** Get VF configuration from PF and populate hw structure. Must be called after
294270631Sjfv** admin queue is initialized. Busy waits until response is received from PF,
295270631Sjfv** with maximum timeout. Response from PF is returned in the buffer for further
296270631Sjfv** processing by the caller.
297270631Sjfv*/
298270631Sjfvint
299270631Sjfvixlv_get_vf_config(struct ixlv_sc *sc)
300270631Sjfv{
301270631Sjfv	struct i40e_hw	*hw = &sc->hw;
302270631Sjfv	device_t	dev = sc->dev;
303270631Sjfv	struct i40e_arq_event_info event;
304270631Sjfv	u16 len;
305270631Sjfv	i40e_status err = 0;
306270631Sjfv	u32 retries = 0;
307270631Sjfv
308270631Sjfv	/* Note this assumes a single VSI */
309270631Sjfv	len = sizeof(struct i40e_virtchnl_vf_resource) +
310270631Sjfv	    sizeof(struct i40e_virtchnl_vsi_resource);
311270631Sjfv	event.buf_len = len;
312270631Sjfv	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
313270631Sjfv	if (!event.msg_buf) {
314270631Sjfv		err = ENOMEM;
315270631Sjfv		goto out;
316270631Sjfv	}
317270631Sjfv
318274360Sjfv	for (;;) {
319270631Sjfv		err = i40e_clean_arq_element(hw, &event, NULL);
320270631Sjfv		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
321270631Sjfv			if (++retries <= IXLV_AQ_MAX_ERR)
322274360Sjfv				i40e_msec_delay(10);
323270631Sjfv		} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
324270631Sjfv		    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
325274360Sjfv			DDPRINTF(dev, "Received a response from PF,"
326274360Sjfv			    " opcode %d, error %d",
327270631Sjfv			    le32toh(event.desc.cookie_high),
328270631Sjfv			    le32toh(event.desc.cookie_low));
329270631Sjfv			retries++;
330270631Sjfv			continue;
331270631Sjfv		} else {
332270631Sjfv			err = (i40e_status)le32toh(event.desc.cookie_low);
333270631Sjfv			if (err) {
334270631Sjfv				device_printf(dev, "%s: Error returned from PF,"
335270631Sjfv				    " opcode %d, error %d\n", __func__,
336270631Sjfv				    le32toh(event.desc.cookie_high),
337270631Sjfv				    le32toh(event.desc.cookie_low));
338270631Sjfv				err = EIO;
339270631Sjfv				goto out_alloc;
340270631Sjfv			}
341274360Sjfv			/* We retrieved the config message, with no errors */
342270631Sjfv			break;
343270631Sjfv		}
344270631Sjfv
345270631Sjfv		if (retries > IXLV_AQ_MAX_ERR) {
346270631Sjfv			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
347270631Sjfv			    retries);
348274360Sjfv			err = ETIMEDOUT;
349270631Sjfv			goto out_alloc;
350270631Sjfv		}
351274360Sjfv	}
352270631Sjfv
353270631Sjfv	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
354270631Sjfv	i40e_vf_parse_hw_config(hw, sc->vf_res);
355270631Sjfv
356270631Sjfvout_alloc:
357270631Sjfv	free(event.msg_buf, M_DEVBUF);
358270631Sjfvout:
359270631Sjfv	return err;
360270631Sjfv}
361270631Sjfv
362270631Sjfv/*
363270631Sjfv** ixlv_configure_queues
364270631Sjfv**
365270631Sjfv** Request that the PF set up our queues.
366270631Sjfv*/
367270631Sjfvvoid
368270631Sjfvixlv_configure_queues(struct ixlv_sc *sc)
369270631Sjfv{
370270631Sjfv	device_t		dev = sc->dev;
371270631Sjfv	struct ixl_vsi		*vsi = &sc->vsi;
372270631Sjfv	struct ixl_queue	*que = vsi->queues;
373270631Sjfv	struct tx_ring		*txr;
374270631Sjfv	struct rx_ring		*rxr;
375274360Sjfv	int			len, pairs;
376270631Sjfv
377270631Sjfv	struct i40e_virtchnl_vsi_queue_config_info *vqci;
378270631Sjfv	struct i40e_virtchnl_queue_pair_info *vqpi;
379292097Ssmh
380270631Sjfv	pairs = vsi->num_queues;
381270631Sjfv	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
382270631Sjfv		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
383270631Sjfv	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
384270631Sjfv	if (!vqci) {
385270631Sjfv		device_printf(dev, "%s: unable to allocate memory\n", __func__);
386274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
387270631Sjfv		return;
388270631Sjfv	}
389270631Sjfv	vqci->vsi_id = sc->vsi_res->vsi_id;
390270631Sjfv	vqci->num_queue_pairs = pairs;
391270631Sjfv	vqpi = vqci->qpair;
392270631Sjfv	/* Size check is not needed here - HW max is 16 queue pairs, and we
393270631Sjfv	 * can fit info for 31 of them into the AQ buffer before it overflows.
394270631Sjfv	 */
395274360Sjfv	for (int i = 0; i < pairs; i++, que++, vqpi++) {
396270631Sjfv		txr = &que->txr;
397270631Sjfv		rxr = &que->rxr;
398270631Sjfv		vqpi->txq.vsi_id = vqci->vsi_id;
399270631Sjfv		vqpi->txq.queue_id = i;
400270631Sjfv		vqpi->txq.ring_len = que->num_desc;
401270631Sjfv		vqpi->txq.dma_ring_addr = txr->dma.pa;
402270631Sjfv		/* Enable Head writeback */
403270631Sjfv		vqpi->txq.headwb_enabled = 1;
404270631Sjfv		vqpi->txq.dma_headwb_addr = txr->dma.pa +
405270631Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
406270631Sjfv
407270631Sjfv		vqpi->rxq.vsi_id = vqci->vsi_id;
408270631Sjfv		vqpi->rxq.queue_id = i;
409270631Sjfv		vqpi->rxq.ring_len = que->num_desc;
410270631Sjfv		vqpi->rxq.dma_ring_addr = rxr->dma.pa;
411270631Sjfv		vqpi->rxq.max_pkt_size = vsi->max_frame_size;
412270631Sjfv		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
413274360Sjfv		vqpi->rxq.splithdr_enabled = 0;
414270631Sjfv	}
415270631Sjfv
416270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
417270631Sjfv			   (u8 *)vqci, len);
418270631Sjfv	free(vqci, M_DEVBUF);
419270631Sjfv}
420270631Sjfv
421270631Sjfv/*
422270631Sjfv** ixlv_enable_queues
423270631Sjfv**
424270631Sjfv** Request that the PF enable all of our queues.
425270631Sjfv*/
426270631Sjfvvoid
427270631Sjfvixlv_enable_queues(struct ixlv_sc *sc)
428270631Sjfv{
429270631Sjfv	struct i40e_virtchnl_queue_select vqs;
430270631Sjfv
431270631Sjfv	vqs.vsi_id = sc->vsi_res->vsi_id;
432270631Sjfv	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
433270631Sjfv	vqs.rx_queues = vqs.tx_queues;
434270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
435270631Sjfv			   (u8 *)&vqs, sizeof(vqs));
436270631Sjfv}
437270631Sjfv
438270631Sjfv/*
439270631Sjfv** ixlv_disable_queues
440270631Sjfv**
441270631Sjfv** Request that the PF disable all of our queues.
442270631Sjfv*/
443270631Sjfvvoid
444270631Sjfvixlv_disable_queues(struct ixlv_sc *sc)
445270631Sjfv{
446270631Sjfv	struct i40e_virtchnl_queue_select vqs;
447270631Sjfv
448270631Sjfv	vqs.vsi_id = sc->vsi_res->vsi_id;
449270631Sjfv	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
450270631Sjfv	vqs.rx_queues = vqs.tx_queues;
451270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
452270631Sjfv			   (u8 *)&vqs, sizeof(vqs));
453270631Sjfv}
454270631Sjfv
455270631Sjfv/*
456270631Sjfv** ixlv_map_queues
457270631Sjfv**
458270631Sjfv** Request that the PF map queues to interrupt vectors. Misc causes, including
459270631Sjfv** admin queue, are always mapped to vector 0.
460270631Sjfv*/
461270631Sjfvvoid
462270631Sjfvixlv_map_queues(struct ixlv_sc *sc)
463270631Sjfv{
464270631Sjfv	struct i40e_virtchnl_irq_map_info *vm;
465270631Sjfv	int 			i, q, len;
466270631Sjfv	struct ixl_vsi		*vsi = &sc->vsi;
467270631Sjfv	struct ixl_queue	*que = vsi->queues;
468270631Sjfv
469270631Sjfv	/* How many queue vectors, adminq uses one */
470270631Sjfv	q = sc->msix - 1;
471270631Sjfv
472270631Sjfv	len = sizeof(struct i40e_virtchnl_irq_map_info) +
473270631Sjfv	      (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
474270631Sjfv	vm = malloc(len, M_DEVBUF, M_NOWAIT);
475270631Sjfv	if (!vm) {
476270631Sjfv		printf("%s: unable to allocate memory\n", __func__);
477274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
478270631Sjfv		return;
479270631Sjfv	}
480270631Sjfv
481270631Sjfv	vm->num_vectors = sc->msix;
482270631Sjfv	/* Queue vectors first */
483270631Sjfv	for (i = 0; i < q; i++, que++) {
484270631Sjfv		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
485270631Sjfv		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
486270631Sjfv		vm->vecmap[i].txq_map = (1 << que->me);
487270631Sjfv		vm->vecmap[i].rxq_map = (1 << que->me);
488274360Sjfv		vm->vecmap[i].rxitr_idx = 0;
489274360Sjfv		vm->vecmap[i].txitr_idx = 0;
490270631Sjfv	}
491270631Sjfv
492270631Sjfv	/* Misc vector last - this is only for AdminQ messages */
493270631Sjfv	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
494270631Sjfv	vm->vecmap[i].vector_id = 0;
495270631Sjfv	vm->vecmap[i].txq_map = 0;
496270631Sjfv	vm->vecmap[i].rxq_map = 0;
497274360Sjfv	vm->vecmap[i].rxitr_idx = 0;
498274360Sjfv	vm->vecmap[i].txitr_idx = 0;
499270631Sjfv
500270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
501270631Sjfv	    (u8 *)vm, len);
502270631Sjfv	free(vm, M_DEVBUF);
503270631Sjfv}
504270631Sjfv
505270631Sjfv/*
506270631Sjfv** Scan the Filter List looking for vlans that need
507270631Sjfv** to be added, then create the data to hand to the AQ
508270631Sjfv** for handling.
509270631Sjfv*/
510270631Sjfvvoid
511270631Sjfvixlv_add_vlans(struct ixlv_sc *sc)
512270631Sjfv{
513270631Sjfv	struct i40e_virtchnl_vlan_filter_list	*v;
514270631Sjfv	struct ixlv_vlan_filter *f, *ftmp;
515270631Sjfv	device_t	dev = sc->dev;
516270631Sjfv	int		len, i = 0, cnt = 0;
517270631Sjfv
518270631Sjfv	/* Get count of VLAN filters to add */
519270631Sjfv	SLIST_FOREACH(f, sc->vlan_filters, next) {
520270631Sjfv		if (f->flags & IXL_FILTER_ADD)
521270631Sjfv			cnt++;
522270631Sjfv	}
523270631Sjfv
524270631Sjfv	if (!cnt) {  /* no work... */
525274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
526274360Sjfv		    I40E_SUCCESS);
527270631Sjfv		return;
528270631Sjfv	}
529270631Sjfv
530270631Sjfv	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
531270631Sjfv	      (cnt * sizeof(u16));
532270631Sjfv
533270631Sjfv	if (len > IXL_AQ_BUF_SZ) {
534270631Sjfv		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
535270631Sjfv			__func__);
536274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
537270631Sjfv		return;
538270631Sjfv	}
539270631Sjfv
540270631Sjfv	v = malloc(len, M_DEVBUF, M_NOWAIT);
541270631Sjfv	if (!v) {
542270631Sjfv		device_printf(dev, "%s: unable to allocate memory\n",
543270631Sjfv			__func__);
544274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
545270631Sjfv		return;
546270631Sjfv	}
547270631Sjfv
548270631Sjfv	v->vsi_id = sc->vsi_res->vsi_id;
549270631Sjfv	v->num_elements = cnt;
550270631Sjfv
551270631Sjfv	/* Scan the filter array */
552270631Sjfv	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
553270631Sjfv                if (f->flags & IXL_FILTER_ADD) {
554270631Sjfv                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
555270631Sjfv			f->flags = IXL_FILTER_USED;
556270631Sjfv                        i++;
557270631Sjfv                }
558270631Sjfv                if (i == cnt)
559270631Sjfv                        break;
560270631Sjfv	}
561274360Sjfv	// ERJ: Should this be taken out?
562274360Sjfv 	if (i == 0) { /* Should not happen... */
563274360Sjfv		device_printf(dev, "%s: i == 0?\n", __func__);
564274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
565274360Sjfv		    I40E_SUCCESS);
566274360Sjfv		return;
567274360Sjfv 	}
568270631Sjfv
569270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
570270631Sjfv	free(v, M_DEVBUF);
571270631Sjfv	/* add stats? */
572270631Sjfv}
573270631Sjfv
574270631Sjfv/*
575270631Sjfv** Scan the Filter Table looking for vlans that need
576270631Sjfv** to be removed, then create the data to hand to the AQ
577270631Sjfv** for handling.
578270631Sjfv*/
579270631Sjfvvoid
580270631Sjfvixlv_del_vlans(struct ixlv_sc *sc)
581270631Sjfv{
582270631Sjfv	device_t	dev = sc->dev;
583270631Sjfv	struct i40e_virtchnl_vlan_filter_list *v;
584270631Sjfv	struct ixlv_vlan_filter *f, *ftmp;
585270631Sjfv	int len, i = 0, cnt = 0;
586270631Sjfv
587270631Sjfv	/* Get count of VLAN filters to delete */
588270631Sjfv	SLIST_FOREACH(f, sc->vlan_filters, next) {
589270631Sjfv		if (f->flags & IXL_FILTER_DEL)
590270631Sjfv			cnt++;
591270631Sjfv	}
592270631Sjfv
593270631Sjfv	if (!cnt) {  /* no work... */
594274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
595274360Sjfv		    I40E_SUCCESS);
596270631Sjfv		return;
597270631Sjfv	}
598270631Sjfv
599270631Sjfv	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
600270631Sjfv	      (cnt * sizeof(u16));
601270631Sjfv
602270631Sjfv	if (len > IXL_AQ_BUF_SZ) {
603270631Sjfv		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
604270631Sjfv			__func__);
605274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
606270631Sjfv		return;
607270631Sjfv	}
608270631Sjfv
609270631Sjfv	v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
610270631Sjfv	if (!v) {
611270631Sjfv		device_printf(dev, "%s: unable to allocate memory\n",
612270631Sjfv			__func__);
613274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
614270631Sjfv		return;
615270631Sjfv	}
616270631Sjfv
617270631Sjfv	v->vsi_id = sc->vsi_res->vsi_id;
618270631Sjfv	v->num_elements = cnt;
619270631Sjfv
620270631Sjfv	/* Scan the filter array */
621270631Sjfv	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
622270631Sjfv                if (f->flags & IXL_FILTER_DEL) {
623270631Sjfv                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
624270631Sjfv                        i++;
625270631Sjfv                        SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
626270631Sjfv                        free(f, M_DEVBUF);
627270631Sjfv                }
628270631Sjfv                if (i == cnt)
629270631Sjfv                        break;
630270631Sjfv	}
631274360Sjfv	// ERJ: Take this out?
632274360Sjfv 	if (i == 0) { /* Should not happen... */
633274360Sjfv		device_printf(dev, "%s: i == 0?\n", __func__);
634274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
635274360Sjfv		    I40E_SUCCESS);
636274360Sjfv		return;
637274360Sjfv 	}
638270631Sjfv
639270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
640270631Sjfv	free(v, M_DEVBUF);
641270631Sjfv	/* add stats? */
642270631Sjfv}
643270631Sjfv
644270631Sjfv
645270631Sjfv/*
646270631Sjfv** This routine takes additions to the vsi filter
647270631Sjfv** table and creates an Admin Queue call to create
648270631Sjfv** the filters in the hardware.
649270631Sjfv*/
650270631Sjfvvoid
651270631Sjfvixlv_add_ether_filters(struct ixlv_sc *sc)
652270631Sjfv{
653270631Sjfv	struct i40e_virtchnl_ether_addr_list *a;
654270631Sjfv	struct ixlv_mac_filter	*f;
655270631Sjfv	device_t			dev = sc->dev;
656270631Sjfv	int				len, j = 0, cnt = 0;
657270631Sjfv
658270631Sjfv	/* Get count of MAC addresses to add */
659270631Sjfv	SLIST_FOREACH(f, sc->mac_filters, next) {
660270631Sjfv		if (f->flags & IXL_FILTER_ADD)
661270631Sjfv			cnt++;
662270631Sjfv	}
663270631Sjfv	if (cnt == 0) { /* Should not happen... */
664270631Sjfv		DDPRINTF(dev, "cnt == 0, exiting...");
665274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
666274360Sjfv		    I40E_SUCCESS);
667270631Sjfv		return;
668270631Sjfv	}
669270631Sjfv
670270631Sjfv	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
671270631Sjfv	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
672270631Sjfv
673270631Sjfv	a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
674270631Sjfv	if (a == NULL) {
675270631Sjfv		device_printf(dev, "%s: Failed to get memory for "
676270631Sjfv		    "virtchnl_ether_addr_list\n", __func__);
677274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
678270631Sjfv		return;
679270631Sjfv	}
680270631Sjfv	a->vsi_id = sc->vsi.id;
681270631Sjfv	a->num_elements = cnt;
682270631Sjfv
683270631Sjfv	/* Scan the filter array */
684270631Sjfv	SLIST_FOREACH(f, sc->mac_filters, next) {
685270631Sjfv		if (f->flags & IXL_FILTER_ADD) {
686270631Sjfv			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
687270631Sjfv			f->flags &= ~IXL_FILTER_ADD;
688270631Sjfv			j++;
689270631Sjfv
690270631Sjfv			DDPRINTF(dev, "ADD: " MAC_FORMAT,
691270631Sjfv			    MAC_FORMAT_ARGS(f->macaddr));
692270631Sjfv		}
693270631Sjfv		if (j == cnt)
694270631Sjfv			break;
695270631Sjfv	}
696270631Sjfv	DDPRINTF(dev, "len %d, j %d, cnt %d",
697270631Sjfv	    len, j, cnt);
698270631Sjfv	ixlv_send_pf_msg(sc,
699270631Sjfv	    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
700270631Sjfv	/* add stats? */
701270631Sjfv	free(a, M_DEVBUF);
702270631Sjfv	return;
703270631Sjfv}
704270631Sjfv
705270631Sjfv/*
706270631Sjfv** This routine takes filters flagged for deletion in the
707270631Sjfv** sc MAC filter list and creates an Admin Queue call
708270631Sjfv** to delete those filters in the hardware.
709270631Sjfv*/
710270631Sjfvvoid
711270631Sjfvixlv_del_ether_filters(struct ixlv_sc *sc)
712270631Sjfv{
713270631Sjfv	struct i40e_virtchnl_ether_addr_list *d;
714270631Sjfv	device_t			dev = sc->dev;
715270631Sjfv	struct ixlv_mac_filter	*f, *f_temp;
716270631Sjfv	int				len, j = 0, cnt = 0;
717270631Sjfv
718270631Sjfv	/* Get count of MAC addresses to delete */
719270631Sjfv	SLIST_FOREACH(f, sc->mac_filters, next) {
720270631Sjfv		if (f->flags & IXL_FILTER_DEL)
721270631Sjfv			cnt++;
722270631Sjfv	}
723270631Sjfv	if (cnt == 0) {
724270631Sjfv		DDPRINTF(dev, "cnt == 0, exiting...");
725274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
726274360Sjfv		    I40E_SUCCESS);
727270631Sjfv		return;
728270631Sjfv	}
729270631Sjfv
730270631Sjfv	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
731270631Sjfv	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
732270631Sjfv
733270631Sjfv	d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
734270631Sjfv	if (d == NULL) {
735270631Sjfv		device_printf(dev, "%s: Failed to get memory for "
736270631Sjfv		    "virtchnl_ether_addr_list\n", __func__);
737274360Sjfv		ixl_vc_schedule_retry(&sc->vc_mgr);
738270631Sjfv		return;
739270631Sjfv	}
740270631Sjfv	d->vsi_id = sc->vsi.id;
741270631Sjfv	d->num_elements = cnt;
742270631Sjfv
743270631Sjfv	/* Scan the filter array */
744270631Sjfv	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
745270631Sjfv		if (f->flags & IXL_FILTER_DEL) {
746270631Sjfv			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
747270631Sjfv			DDPRINTF(dev, "DEL: " MAC_FORMAT,
748270631Sjfv			    MAC_FORMAT_ARGS(f->macaddr));
749270631Sjfv			j++;
750270631Sjfv			SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
751270631Sjfv			free(f, M_DEVBUF);
752270631Sjfv		}
753270631Sjfv		if (j == cnt)
754270631Sjfv			break;
755270631Sjfv	}
756270631Sjfv	ixlv_send_pf_msg(sc,
757270631Sjfv	    I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
758270631Sjfv	/* add stats? */
759270631Sjfv	free(d, M_DEVBUF);
760270631Sjfv	return;
761270631Sjfv}
762270631Sjfv
763270631Sjfv/*
764270631Sjfv** ixlv_request_reset
765270631Sjfv** Request that the PF reset this VF. No response is expected.
766270631Sjfv*/
767270631Sjfvvoid
768270631Sjfvixlv_request_reset(struct ixlv_sc *sc)
769270631Sjfv{
770270631Sjfv	/*
771270631Sjfv	** Set the reset status to "in progress" before
772270631Sjfv	** the request, this avoids any possibility of
773270631Sjfv	** a mistaken early detection of completion.
774270631Sjfv	*/
775270631Sjfv	wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
776270631Sjfv	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
777270631Sjfv}
778270631Sjfv
779270631Sjfv/*
780270631Sjfv** ixlv_request_stats
781270631Sjfv** Request the statistics for this VF's VSI from PF.
782270631Sjfv*/
783270631Sjfvvoid
784270631Sjfvixlv_request_stats(struct ixlv_sc *sc)
785270631Sjfv{
786270631Sjfv	struct i40e_virtchnl_queue_select vqs;
787292095Ssmh	int error = 0;
788270631Sjfv
789270631Sjfv	vqs.vsi_id = sc->vsi_res->vsi_id;
790274360Sjfv	/* Low priority, we don't need to error check */
791292095Ssmh	error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
792270631Sjfv	    (u8 *)&vqs, sizeof(vqs));
793292095Ssmh#ifdef IXL_DEBUG
794292095Ssmh	if (error)
795292095Ssmh		device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
796292095Ssmh#endif
797270631Sjfv}
798270631Sjfv
799270631Sjfv/*
800270631Sjfv** Updates driver's stats counters with VSI stats returned from PF.
801270631Sjfv*/
802270631Sjfvvoid
803270631Sjfvixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
804270631Sjfv{
805292097Ssmh	struct ixl_vsi *vsi = &sc->vsi;
806274360Sjfv	uint64_t tx_discards;
807270631Sjfv
808274360Sjfv	tx_discards = es->tx_discards;
809292097Ssmh	for (int i = 0; i < vsi->num_queues; i++)
810274360Sjfv		tx_discards += sc->vsi.queues[i].txr.br->br_drops;
811274360Sjfv
812274360Sjfv	/* Update ifnet stats */
813274360Sjfv	IXL_SET_IPACKETS(vsi, es->rx_unicast +
814270631Sjfv	                   es->rx_multicast +
815274360Sjfv			   es->rx_broadcast);
816274360Sjfv	IXL_SET_OPACKETS(vsi, es->tx_unicast +
817270631Sjfv	                   es->tx_multicast +
818274360Sjfv			   es->tx_broadcast);
819274360Sjfv	IXL_SET_IBYTES(vsi, es->rx_bytes);
820274360Sjfv	IXL_SET_OBYTES(vsi, es->tx_bytes);
821274360Sjfv	IXL_SET_IMCASTS(vsi, es->rx_multicast);
822274360Sjfv	IXL_SET_OMCASTS(vsi, es->tx_multicast);
823270631Sjfv
824274360Sjfv	IXL_SET_OERRORS(vsi, es->tx_errors);
825274360Sjfv	IXL_SET_IQDROPS(vsi, es->rx_discards);
826274360Sjfv	IXL_SET_OQDROPS(vsi, tx_discards);
827274360Sjfv	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
828274360Sjfv	IXL_SET_COLLISIONS(vsi, 0);
829270631Sjfv
830292097Ssmh	vsi->eth_stats = *es;
831270631Sjfv}
832270631Sjfv
833270631Sjfv/*
834270631Sjfv** ixlv_vc_completion
835270631Sjfv**
836270631Sjfv** Asynchronous completion function for admin queue messages. Rather than busy
837270631Sjfv** wait, we fire off our requests and assume that no errors will be returned.
838270631Sjfv** This function handles the reply messages.
839270631Sjfv*/
840270631Sjfvvoid
841270631Sjfvixlv_vc_completion(struct ixlv_sc *sc,
842270631Sjfv    enum i40e_virtchnl_ops v_opcode,
843270631Sjfv    i40e_status v_retval, u8 *msg, u16 msglen)
844270631Sjfv{
845270631Sjfv	device_t	dev = sc->dev;
846270631Sjfv	struct ixl_vsi	*vsi = &sc->vsi;
847270631Sjfv
848270631Sjfv	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
849270631Sjfv		struct i40e_virtchnl_pf_event *vpe =
850270631Sjfv			(struct i40e_virtchnl_pf_event *)msg;
851270631Sjfv
852270631Sjfv		switch (vpe->event) {
853270631Sjfv		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
854274360Sjfv#ifdef IXL_DEBUG
855274360Sjfv			device_printf(dev, "Link change: status %d, speed %d\n",
856274360Sjfv			    vpe->event_data.link_event.link_status,
857274360Sjfv			    vpe->event_data.link_event.link_speed);
858274360Sjfv#endif
859292097Ssmh			sc->link_up =
860270631Sjfv				vpe->event_data.link_event.link_status;
861292097Ssmh			sc->link_speed =
862270631Sjfv				vpe->event_data.link_event.link_speed;
863274360Sjfv			ixlv_update_link_status(sc);
864270631Sjfv			break;
865270631Sjfv		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
866270631Sjfv			device_printf(dev, "PF initiated reset!\n");
867270631Sjfv			sc->init_state = IXLV_RESET_PENDING;
868270631Sjfv			ixlv_init(sc);
869270631Sjfv			break;
870270631Sjfv		default:
871270631Sjfv			device_printf(dev, "%s: Unknown event %d from AQ\n",
872270631Sjfv				__func__, vpe->event);
873270631Sjfv			break;
874270631Sjfv		}
875270631Sjfv
876270631Sjfv		return;
877270631Sjfv	}
878270631Sjfv
879270631Sjfv	/* Catch-all error response */
880270631Sjfv	if (v_retval) {
881270631Sjfv		device_printf(dev,
882270631Sjfv		    "%s: AQ returned error %d to our request %d!\n",
883270631Sjfv		    __func__, v_retval, v_opcode);
884270631Sjfv	}
885270631Sjfv
886270631Sjfv#ifdef IXL_DEBUG
887270631Sjfv	if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
888270631Sjfv		DDPRINTF(dev, "opcode %d", v_opcode);
889270631Sjfv#endif
890270631Sjfv
891270631Sjfv	switch (v_opcode) {
892270631Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
893270631Sjfv		ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
894270631Sjfv		break;
895270631Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
896274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
897274360Sjfv		    v_retval);
898270631Sjfv		if (v_retval) {
899270631Sjfv			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
900270631Sjfv			device_printf(dev, "WARNING: Device may not receive traffic!\n");
901270631Sjfv		}
902270631Sjfv		break;
903270631Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
904274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
905274360Sjfv		    v_retval);
906270631Sjfv		break;
907270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
908274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
909274360Sjfv		    v_retval);
910270631Sjfv		break;
911270631Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
912274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
913274360Sjfv		    v_retval);
914270631Sjfv		break;
915270631Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
916274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
917274360Sjfv		    v_retval);
918270631Sjfv		break;
919270631Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
920274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
921274360Sjfv		    v_retval);
922270631Sjfv		if (v_retval == 0) {
923274360Sjfv			/* Update link status */
924274360Sjfv			ixlv_update_link_status(sc);
925270631Sjfv			/* Turn on all interrupts */
926270631Sjfv			ixlv_enable_intr(vsi);
927270631Sjfv			/* And inform the stack we're ready */
928270631Sjfv			vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
929270631Sjfv			vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
930270631Sjfv		}
931270631Sjfv		break;
932270631Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
933274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
934274360Sjfv		    v_retval);
935270631Sjfv		if (v_retval == 0) {
936270631Sjfv			/* Turn off all interrupts */
937270631Sjfv			ixlv_disable_intr(vsi);
938270631Sjfv			/* Tell the stack that the interface is no longer active */
939270631Sjfv			vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
940270631Sjfv		}
941270631Sjfv		break;
942270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
943274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
944274360Sjfv		    v_retval);
945270631Sjfv		break;
946270631Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
947274360Sjfv		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
948274360Sjfv		    v_retval);
949270631Sjfv		break;
950270631Sjfv	default:
951270631Sjfv		device_printf(dev,
952270631Sjfv		    "%s: Received unexpected message %d from PF.\n",
953270631Sjfv		    __func__, v_opcode);
954270631Sjfv		break;
955270631Sjfv	}
956270631Sjfv	return;
957270631Sjfv}
958274360Sjfv
959274360Sjfvstatic void
960274360Sjfvixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
961274360Sjfv{
962274360Sjfv
963274360Sjfv	switch (request) {
964274360Sjfv	case IXLV_FLAG_AQ_MAP_VECTORS:
965274360Sjfv		ixlv_map_queues(sc);
966274360Sjfv		break;
967274360Sjfv
968274360Sjfv	case IXLV_FLAG_AQ_ADD_MAC_FILTER:
969274360Sjfv		ixlv_add_ether_filters(sc);
970274360Sjfv		break;
971274360Sjfv
972274360Sjfv	case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
973274360Sjfv		ixlv_add_vlans(sc);
974274360Sjfv		break;
975274360Sjfv
976274360Sjfv	case IXLV_FLAG_AQ_DEL_MAC_FILTER:
977274360Sjfv		ixlv_del_ether_filters(sc);
978274360Sjfv		break;
979274360Sjfv
980274360Sjfv	case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
981274360Sjfv		ixlv_del_vlans(sc);
982274360Sjfv		break;
983274360Sjfv
984274360Sjfv	case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
985274360Sjfv		ixlv_configure_queues(sc);
986274360Sjfv		break;
987274360Sjfv
988274360Sjfv	case IXLV_FLAG_AQ_DISABLE_QUEUES:
989274360Sjfv		ixlv_disable_queues(sc);
990274360Sjfv		break;
991274360Sjfv
992274360Sjfv	case IXLV_FLAG_AQ_ENABLE_QUEUES:
993274360Sjfv		ixlv_enable_queues(sc);
994274360Sjfv		break;
995274360Sjfv	}
996274360Sjfv}
997274360Sjfv
998274360Sjfvvoid
999274360Sjfvixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1000274360Sjfv{
1001274360Sjfv	mgr->sc = sc;
1002274360Sjfv	mgr->current = NULL;
1003274360Sjfv	TAILQ_INIT(&mgr->pending);
1004274360Sjfv	callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1005274360Sjfv}
1006274360Sjfv
1007274360Sjfvstatic void
1008274360Sjfvixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1009274360Sjfv{
1010274360Sjfv	struct ixl_vc_cmd *cmd;
1011274360Sjfv
1012274360Sjfv	cmd = mgr->current;
1013274360Sjfv	mgr->current = NULL;
1014274360Sjfv	cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1015274360Sjfv
1016274360Sjfv	cmd->callback(cmd, cmd->arg, err);
1017274360Sjfv	ixl_vc_process_next(mgr);
1018274360Sjfv}
1019274360Sjfv
1020274360Sjfvstatic void
1021274360Sjfvixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1022274360Sjfv    enum i40e_status_code err)
1023274360Sjfv{
1024274360Sjfv	struct ixl_vc_cmd *cmd;
1025274360Sjfv
1026274360Sjfv	cmd = mgr->current;
1027274360Sjfv	if (cmd == NULL || cmd->request != request)
1028274360Sjfv		return;
1029274360Sjfv
1030274360Sjfv	callout_stop(&mgr->callout);
1031274360Sjfv	ixl_vc_process_completion(mgr, err);
1032274360Sjfv}
1033274360Sjfv
1034274360Sjfvstatic void
1035274360Sjfvixl_vc_cmd_timeout(void *arg)
1036274360Sjfv{
1037274360Sjfv	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1038274360Sjfv
1039274360Sjfv	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1040274360Sjfv	ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1041274360Sjfv}
1042274360Sjfv
1043274360Sjfvstatic void
1044274360Sjfvixl_vc_cmd_retry(void *arg)
1045274360Sjfv{
1046274360Sjfv	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1047274360Sjfv
1048274360Sjfv	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1049274360Sjfv	ixl_vc_send_current(mgr);
1050274360Sjfv}
1051274360Sjfv
1052274360Sjfvstatic void
1053274360Sjfvixl_vc_send_current(struct ixl_vc_mgr *mgr)
1054274360Sjfv{
1055274360Sjfv	struct ixl_vc_cmd *cmd;
1056274360Sjfv
1057274360Sjfv	cmd = mgr->current;
1058274360Sjfv	ixl_vc_send_cmd(mgr->sc, cmd->request);
1059274360Sjfv	callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1060274360Sjfv}
1061274360Sjfv
1062274360Sjfvstatic void
1063274360Sjfvixl_vc_process_next(struct ixl_vc_mgr *mgr)
1064274360Sjfv{
1065274360Sjfv	struct ixl_vc_cmd *cmd;
1066274360Sjfv
1067274360Sjfv	if (mgr->current != NULL)
1068274360Sjfv		return;
1069274360Sjfv
1070274360Sjfv	if (TAILQ_EMPTY(&mgr->pending))
1071274360Sjfv		return;
1072274360Sjfv
1073274360Sjfv	cmd = TAILQ_FIRST(&mgr->pending);
1074274360Sjfv	TAILQ_REMOVE(&mgr->pending, cmd, next);
1075274360Sjfv
1076274360Sjfv	mgr->current = cmd;
1077274360Sjfv	ixl_vc_send_current(mgr);
1078274360Sjfv}
1079274360Sjfv
1080274360Sjfvstatic void
1081274360Sjfvixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1082274360Sjfv{
1083274360Sjfv
1084274360Sjfv	callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1085274360Sjfv}
1086274360Sjfv
1087274360Sjfvvoid
1088274360Sjfvixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1089274360Sjfv	    uint32_t req, ixl_vc_callback_t *callback, void *arg)
1090274360Sjfv{
1091274360Sjfv	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1092274360Sjfv
1093274360Sjfv	if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1094274360Sjfv		if (mgr->current == cmd)
1095274360Sjfv			mgr->current = NULL;
1096274360Sjfv		else
1097274360Sjfv			TAILQ_REMOVE(&mgr->pending, cmd, next);
1098274360Sjfv	}
1099274360Sjfv
1100274360Sjfv	cmd->request = req;
1101274360Sjfv	cmd->callback = callback;
1102274360Sjfv	cmd->arg = arg;
1103274360Sjfv	cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1104274360Sjfv	TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1105274360Sjfv
1106274360Sjfv	ixl_vc_process_next(mgr);
1107274360Sjfv}
1108274360Sjfv
1109274360Sjfvvoid
1110274360Sjfvixl_vc_flush(struct ixl_vc_mgr *mgr)
1111274360Sjfv{
1112274360Sjfv	struct ixl_vc_cmd *cmd;
1113274360Sjfv
1114274360Sjfv	IXLV_CORE_LOCK_ASSERT(mgr->sc);
1115274360Sjfv	KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1116274360Sjfv	    ("ixlv: pending commands waiting but no command in progress"));
1117274360Sjfv
1118274360Sjfv	cmd = mgr->current;
1119274360Sjfv	if (cmd != NULL) {
1120274360Sjfv		mgr->current = NULL;
1121274360Sjfv		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1122274360Sjfv		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1123274360Sjfv	}
1124274360Sjfv
1125274360Sjfv	while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1126274360Sjfv		TAILQ_REMOVE(&mgr->pending, cmd, next);
1127274360Sjfv		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1128274360Sjfv		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1129274360Sjfv	}
1130274360Sjfv
1131274360Sjfv	callout_stop(&mgr->callout);
1132274360Sjfv}
1133274360Sjfv
1134