1303816Ssbruno/******************************************************************************
2303816Ssbruno
3303816Ssbruno  Copyright (c) 2013-2015, Intel Corporation
4303816Ssbruno  All rights reserved.
5303816Ssbruno
6303816Ssbruno  Redistribution and use in source and binary forms, with or without
7303816Ssbruno  modification, are permitted provided that the following conditions are met:
8303816Ssbruno
9303816Ssbruno   1. Redistributions of source code must retain the above copyright notice,
10303816Ssbruno      this list of conditions and the following disclaimer.
11303816Ssbruno
12303816Ssbruno   2. Redistributions in binary form must reproduce the above copyright
13303816Ssbruno      notice, this list of conditions and the following disclaimer in the
14303816Ssbruno      documentation and/or other materials provided with the distribution.
15303816Ssbruno
16303816Ssbruno   3. Neither the name of the Intel Corporation nor the names of its
17303816Ssbruno      contributors may be used to endorse or promote products derived from
18303816Ssbruno      this software without specific prior written permission.
19303816Ssbruno
20303816Ssbruno  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21303816Ssbruno  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22303816Ssbruno  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23303816Ssbruno  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24303816Ssbruno  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25303816Ssbruno  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26303816Ssbruno  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27303816Ssbruno  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28303816Ssbruno  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29303816Ssbruno  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30303816Ssbruno  POSSIBILITY OF SUCH DAMAGE.
31303816Ssbruno
32303816Ssbruno******************************************************************************/
33303816Ssbruno/*$FreeBSD: releng/11.0/sys/dev/ixl/ixl_pf_iov.c 303967 2016-08-11 19:13:30Z sbruno $*/
34303816Ssbruno
35303816Ssbruno#include "ixl_pf_iov.h"
36303816Ssbruno
37303816Ssbruno/* Private functions */
38303816Ssbrunostatic void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39303816Ssbrunostatic void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40303816Ssbrunostatic void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41303816Ssbruno
42303816Ssbrunostatic bool	ixl_zero_mac(const uint8_t *addr);
43303816Ssbrunostatic bool	ixl_bcast_mac(const uint8_t *addr);
44303816Ssbruno
45303816Ssbrunostatic const char *	ixl_vc_opcode_str(uint16_t op);
46303816Ssbrunostatic int	ixl_vc_opcode_level(uint16_t opcode);
47303816Ssbruno
48303816Ssbrunostatic int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49303816Ssbruno
50303816Ssbrunostatic int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51303816Ssbrunostatic int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
52303816Ssbrunostatic void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
53303816Ssbrunostatic void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
54303816Ssbrunostatic void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
55303816Ssbrunostatic int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
56303816Ssbrunostatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57303816Ssbrunostatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
58303816Ssbrunostatic void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
59303816Ssbrunostatic void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
60303816Ssbrunostatic void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
61303816Ssbrunostatic void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62303816Ssbrunostatic void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63303816Ssbrunostatic void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
64303816Ssbrunostatic int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
65303816Ssbrunostatic int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
66303816Ssbrunostatic void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67303816Ssbrunostatic void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
68303816Ssbruno    enum i40e_queue_type *last_type, uint16_t *last_queue);
69303816Ssbrunostatic void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
70303816Ssbrunostatic void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71303816Ssbrunostatic void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72303816Ssbrunostatic void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73303816Ssbrunostatic void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74303816Ssbrunostatic void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75303816Ssbrunostatic enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
76303816Ssbrunostatic void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77303816Ssbrunostatic void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78303816Ssbrunostatic void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79303816Ssbrunostatic void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
80303816Ssbrunostatic int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81303816Ssbruno
82303816Ssbrunostatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
83303816Ssbruno
84303816Ssbrunovoid
85303816Ssbrunoixl_initialize_sriov(struct ixl_pf *pf)
86303816Ssbruno{
87303816Ssbruno	device_t dev = pf->dev;
88303816Ssbruno	struct i40e_hw *hw = &pf->hw;
89303816Ssbruno	nvlist_t	*pf_schema, *vf_schema;
90303816Ssbruno	int		iov_error;
91303816Ssbruno
92303816Ssbruno	/* SR-IOV is only supported when MSI-X is in use. */
93303816Ssbruno	if (pf->msix <= 1)
94303816Ssbruno		return;
95303816Ssbruno
96303816Ssbruno	pf_schema = pci_iov_schema_alloc_node();
97303816Ssbruno	vf_schema = pci_iov_schema_alloc_node();
98303816Ssbruno	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99303816Ssbruno	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100303816Ssbruno	    IOV_SCHEMA_HASDEFAULT, TRUE);
101303816Ssbruno	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102303816Ssbruno	    IOV_SCHEMA_HASDEFAULT, FALSE);
103303816Ssbruno	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104303816Ssbruno	    IOV_SCHEMA_HASDEFAULT, FALSE);
105303816Ssbruno	pci_iov_schema_add_uint16(vf_schema, "num-queues",
106303816Ssbruno	    IOV_SCHEMA_HASDEFAULT,
107303816Ssbruno	    max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
108303816Ssbruno
109303816Ssbruno	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110303816Ssbruno	if (iov_error != 0) {
111303816Ssbruno		device_printf(dev,
112303816Ssbruno		    "Failed to initialize SR-IOV (error=%d)\n",
113303816Ssbruno		    iov_error);
114303816Ssbruno	} else
115303816Ssbruno		device_printf(dev, "SR-IOV ready\n");
116303816Ssbruno
117303816Ssbruno	pf->vc_debug_lvl = 1;
118303816Ssbruno}
119303816Ssbruno
120303816Ssbruno/*
121303816Ssbruno * Allocate the VSI for a VF.
122303816Ssbruno */
123303816Ssbrunostatic int
124303816Ssbrunoixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
125303816Ssbruno{
126303816Ssbruno	device_t dev;
127303816Ssbruno	struct i40e_hw *hw;
128303816Ssbruno	struct ixl_vsi *vsi;
129303816Ssbruno	struct i40e_vsi_context vsi_ctx;
130303816Ssbruno	int i;
131303816Ssbruno	enum i40e_status_code code;
132303816Ssbruno
133303816Ssbruno	hw = &pf->hw;
134303816Ssbruno	vsi = &pf->vsi;
135303816Ssbruno	dev = pf->dev;
136303816Ssbruno
137303816Ssbruno	vsi_ctx.pf_num = hw->pf_id;
138303816Ssbruno	vsi_ctx.uplink_seid = pf->veb_seid;
139303816Ssbruno	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
140303816Ssbruno	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
141303816Ssbruno	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142303816Ssbruno
143303816Ssbruno	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144303816Ssbruno
145303816Ssbruno	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
146303816Ssbruno	vsi_ctx.info.switch_id = htole16(0);
147303816Ssbruno
148303816Ssbruno	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149303816Ssbruno	vsi_ctx.info.sec_flags = 0;
150303816Ssbruno	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151303816Ssbruno		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152303816Ssbruno
153303816Ssbruno	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154303816Ssbruno	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155303816Ssbruno	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156303816Ssbruno
157303816Ssbruno	vsi_ctx.info.valid_sections |=
158303816Ssbruno	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159303816Ssbruno	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160303816Ssbruno
161303816Ssbruno	/* ERJ: Only scattered allocation is supported for VFs right now */
162303816Ssbruno	for (i = 0; i < vf->qtag.num_active; i++)
163303816Ssbruno		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164303816Ssbruno	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165303816Ssbruno		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166303816Ssbruno
167303816Ssbruno	vsi_ctx.info.tc_mapping[0] = htole16(
168303816Ssbruno	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169303816Ssbruno	    (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170303816Ssbruno
171303816Ssbruno	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172303816Ssbruno	if (code != I40E_SUCCESS)
173303816Ssbruno		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174303816Ssbruno	vf->vsi.seid = vsi_ctx.seid;
175303816Ssbruno	vf->vsi.vsi_num = vsi_ctx.vsi_number;
176303816Ssbruno	// vf->vsi.first_queue = vf->qtag.qidx[0];
177303816Ssbruno	vf->vsi.num_queues = vf->qtag.num_active;
178303816Ssbruno
179303816Ssbruno	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180303816Ssbruno	if (code != I40E_SUCCESS)
181303816Ssbruno		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182303816Ssbruno
183303816Ssbruno	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184303816Ssbruno	if (code != I40E_SUCCESS) {
185303816Ssbruno		device_printf(dev, "Failed to disable BW limit: %d\n",
186303816Ssbruno		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187303816Ssbruno		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188303816Ssbruno	}
189303816Ssbruno
190303816Ssbruno	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
191303816Ssbruno	return (0);
192303816Ssbruno}
193303816Ssbruno
194303816Ssbrunostatic int
195303816Ssbrunoixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
196303816Ssbruno{
197303816Ssbruno	struct i40e_hw *hw;
198303816Ssbruno	int error;
199303816Ssbruno
200303816Ssbruno	hw = &pf->hw;
201303816Ssbruno
202303816Ssbruno	error = ixl_vf_alloc_vsi(pf, vf);
203303816Ssbruno	if (error != 0)
204303816Ssbruno		return (error);
205303816Ssbruno
206303816Ssbruno	vf->vsi.hw_filters_add = 0;
207303816Ssbruno	vf->vsi.hw_filters_del = 0;
208303816Ssbruno	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
209303816Ssbruno	ixl_reconfigure_filters(&vf->vsi);
210303816Ssbruno
211303816Ssbruno	return (0);
212303816Ssbruno}
213303816Ssbruno
214303816Ssbrunostatic void
215303816Ssbrunoixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
216303816Ssbruno    uint32_t val)
217303816Ssbruno{
218303816Ssbruno	uint32_t qtable;
219303816Ssbruno	int index, shift;
220303816Ssbruno
221303816Ssbruno	/*
222303816Ssbruno	 * Two queues are mapped in a single register, so we have to do some
223303816Ssbruno	 * gymnastics to convert the queue number into a register index and
224303816Ssbruno	 * shift.
225303816Ssbruno	 */
226303816Ssbruno	index = qnum / 2;
227303816Ssbruno	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
228303816Ssbruno
229303816Ssbruno	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
230303816Ssbruno	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
231303816Ssbruno	qtable |= val << shift;
232303816Ssbruno	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
233303816Ssbruno}
234303816Ssbruno
235303816Ssbrunostatic void
236303816Ssbrunoixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
237303816Ssbruno{
238303816Ssbruno	struct i40e_hw *hw;
239303816Ssbruno	uint32_t qtable;
240303816Ssbruno	int i;
241303816Ssbruno
242303816Ssbruno	hw = &pf->hw;
243303816Ssbruno
244303816Ssbruno	/*
245303816Ssbruno	 * Contiguous mappings aren't actually supported by the hardware,
246303816Ssbruno	 * so we have to use non-contiguous mappings.
247303816Ssbruno	 */
248303816Ssbruno	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
249303816Ssbruno	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
250303816Ssbruno
251303816Ssbruno	/* Enable LAN traffic on this VF */
252303816Ssbruno	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
253303816Ssbruno	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
254303816Ssbruno
255303816Ssbruno	/* Program index of each VF queue into PF queue space
256303816Ssbruno	 * (This is only needed if QTABLE is enabled) */
257303816Ssbruno	for (i = 0; i < vf->vsi.num_queues; i++) {
258303816Ssbruno		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
259303816Ssbruno		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
260303816Ssbruno
261303816Ssbruno		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
262303816Ssbruno	}
263303816Ssbruno	for (; i < IXL_MAX_VSI_QUEUES; i++)
264303816Ssbruno		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
265303816Ssbruno		    I40E_VPLAN_QTABLE_QINDEX_MASK);
266303816Ssbruno
267303816Ssbruno	/* Map queues allocated to VF to its VSI;
268303816Ssbruno	 * This mapping matches the VF-wide mapping since the VF
269303816Ssbruno	 * is only given a single VSI */
270303816Ssbruno	for (i = 0; i < vf->vsi.num_queues; i++)
271303816Ssbruno		ixl_vf_map_vsi_queue(hw, vf, i,
272303816Ssbruno		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
273303816Ssbruno
274303816Ssbruno	/* Set rest of VSI queues as unused. */
275303816Ssbruno	for (; i < IXL_MAX_VSI_QUEUES; i++)
276303816Ssbruno		ixl_vf_map_vsi_queue(hw, vf, i,
277303816Ssbruno		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
278303816Ssbruno
279303816Ssbruno	ixl_flush(hw);
280303816Ssbruno}
281303816Ssbruno
282303816Ssbrunostatic void
283303816Ssbrunoixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
284303816Ssbruno{
285303816Ssbruno	struct i40e_hw *hw;
286303816Ssbruno
287303816Ssbruno	hw = &pf->hw;
288303816Ssbruno
289303816Ssbruno	if (vsi->seid == 0)
290303816Ssbruno		return;
291303816Ssbruno
292303816Ssbruno	i40e_aq_delete_element(hw, vsi->seid, NULL);
293303816Ssbruno}
294303816Ssbruno
295303816Ssbrunostatic void
296303816Ssbrunoixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
297303816Ssbruno{
298303816Ssbruno
299303816Ssbruno	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
300303816Ssbruno	ixl_flush(hw);
301303816Ssbruno}
302303816Ssbruno
303303816Ssbrunostatic void
304303816Ssbrunoixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
305303816Ssbruno{
306303816Ssbruno
307303816Ssbruno	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
308303816Ssbruno	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
309303816Ssbruno	ixl_flush(hw);
310303816Ssbruno}
311303816Ssbruno
312303816Ssbrunostatic void
313303816Ssbrunoixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
314303816Ssbruno{
315303816Ssbruno	struct i40e_hw *hw;
316303816Ssbruno	uint32_t vfint_reg, vpint_reg;
317303816Ssbruno	int i;
318303816Ssbruno
319303816Ssbruno	hw = &pf->hw;
320303816Ssbruno
321303816Ssbruno	ixl_vf_vsi_release(pf, &vf->vsi);
322303816Ssbruno
323303816Ssbruno	/* Index 0 has a special register. */
324303816Ssbruno	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
325303816Ssbruno
326303816Ssbruno	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
327303816Ssbruno		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
328303816Ssbruno		ixl_vf_disable_queue_intr(hw, vfint_reg);
329303816Ssbruno	}
330303816Ssbruno
331303816Ssbruno	/* Index 0 has a special register. */
332303816Ssbruno	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
333303816Ssbruno
334303816Ssbruno	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
335303816Ssbruno		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
336303816Ssbruno		ixl_vf_unregister_intr(hw, vpint_reg);
337303816Ssbruno	}
338303816Ssbruno
339303816Ssbruno	vf->vsi.num_queues = 0;
340303816Ssbruno}
341303816Ssbruno
342303816Ssbrunostatic int
343303816Ssbrunoixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
344303816Ssbruno{
345303816Ssbruno	struct i40e_hw *hw;
346303816Ssbruno	int i;
347303816Ssbruno	uint16_t global_vf_num;
348303816Ssbruno	uint32_t ciad;
349303816Ssbruno
350303816Ssbruno	hw = &pf->hw;
351303816Ssbruno	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
352303816Ssbruno
353303816Ssbruno	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
354303816Ssbruno	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
355303816Ssbruno	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
356303816Ssbruno		ciad = rd32(hw, I40E_PF_PCI_CIAD);
357303816Ssbruno		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
358303816Ssbruno			return (0);
359303816Ssbruno		DELAY(1);
360303816Ssbruno	}
361303816Ssbruno
362303816Ssbruno	return (ETIMEDOUT);
363303816Ssbruno}
364303816Ssbruno
365303816Ssbrunostatic void
366303816Ssbrunoixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
367303816Ssbruno{
368303816Ssbruno	struct i40e_hw *hw;
369303816Ssbruno	uint32_t vfrtrig;
370303816Ssbruno
371303816Ssbruno	hw = &pf->hw;
372303816Ssbruno
373303816Ssbruno	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
374303816Ssbruno	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
375303816Ssbruno	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
376303816Ssbruno	ixl_flush(hw);
377303816Ssbruno
378303816Ssbruno	ixl_reinit_vf(pf, vf);
379303816Ssbruno}
380303816Ssbruno
381303816Ssbrunostatic void
382303816Ssbrunoixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
383303816Ssbruno{
384303816Ssbruno	struct i40e_hw *hw;
385303816Ssbruno	uint32_t vfrstat, vfrtrig;
386303816Ssbruno	int i, error;
387303816Ssbruno
388303816Ssbruno	hw = &pf->hw;
389303816Ssbruno
390303816Ssbruno	error = ixl_flush_pcie(pf, vf);
391303816Ssbruno	if (error != 0)
392303816Ssbruno		device_printf(pf->dev,
393303816Ssbruno		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
394303816Ssbruno		    vf->vf_num);
395303816Ssbruno
396303816Ssbruno	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
397303816Ssbruno		DELAY(10);
398303816Ssbruno
399303816Ssbruno		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
400303816Ssbruno		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
401303816Ssbruno			break;
402303816Ssbruno	}
403303816Ssbruno
404303816Ssbruno	if (i == IXL_VF_RESET_TIMEOUT)
405303816Ssbruno		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
406303816Ssbruno
407303816Ssbruno	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
408303816Ssbruno
409303816Ssbruno	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
410303816Ssbruno	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
411303816Ssbruno	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
412303816Ssbruno
413303816Ssbruno	if (vf->vsi.seid != 0)
414303816Ssbruno		ixl_disable_rings(&vf->vsi);
415303816Ssbruno
416303816Ssbruno	ixl_vf_release_resources(pf, vf);
417303816Ssbruno	ixl_vf_setup_vsi(pf, vf);
418303816Ssbruno	ixl_vf_map_queues(pf, vf);
419303816Ssbruno
420303816Ssbruno	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
421303816Ssbruno	ixl_flush(hw);
422303816Ssbruno}
423303816Ssbruno
424303816Ssbrunostatic const char *
425303816Ssbrunoixl_vc_opcode_str(uint16_t op)
426303816Ssbruno{
427303816Ssbruno
428303816Ssbruno	switch (op) {
429303816Ssbruno	case I40E_VIRTCHNL_OP_VERSION:
430303816Ssbruno		return ("VERSION");
431303816Ssbruno	case I40E_VIRTCHNL_OP_RESET_VF:
432303816Ssbruno		return ("RESET_VF");
433303816Ssbruno	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
434303816Ssbruno		return ("GET_VF_RESOURCES");
435303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
436303816Ssbruno		return ("CONFIG_TX_QUEUE");
437303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
438303816Ssbruno		return ("CONFIG_RX_QUEUE");
439303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
440303816Ssbruno		return ("CONFIG_VSI_QUEUES");
441303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
442303816Ssbruno		return ("CONFIG_IRQ_MAP");
443303816Ssbruno	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
444303816Ssbruno		return ("ENABLE_QUEUES");
445303816Ssbruno	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
446303816Ssbruno		return ("DISABLE_QUEUES");
447303816Ssbruno	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
448303816Ssbruno		return ("ADD_ETHER_ADDRESS");
449303816Ssbruno	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
450303816Ssbruno		return ("DEL_ETHER_ADDRESS");
451303816Ssbruno	case I40E_VIRTCHNL_OP_ADD_VLAN:
452303816Ssbruno		return ("ADD_VLAN");
453303816Ssbruno	case I40E_VIRTCHNL_OP_DEL_VLAN:
454303816Ssbruno		return ("DEL_VLAN");
455303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
456303816Ssbruno		return ("CONFIG_PROMISCUOUS_MODE");
457303816Ssbruno	case I40E_VIRTCHNL_OP_GET_STATS:
458303816Ssbruno		return ("GET_STATS");
459303816Ssbruno	case I40E_VIRTCHNL_OP_FCOE:
460303816Ssbruno		return ("FCOE");
461303816Ssbruno	case I40E_VIRTCHNL_OP_EVENT:
462303816Ssbruno		return ("EVENT");
463303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
464303816Ssbruno		return ("CONFIG_RSS_KEY");
465303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
466303816Ssbruno		return ("CONFIG_RSS_LUT");
467303816Ssbruno	case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
468303816Ssbruno		return ("GET_RSS_HENA_CAPS");
469303816Ssbruno	case I40E_VIRTCHNL_OP_SET_RSS_HENA:
470303816Ssbruno		return ("SET_RSS_HENA");
471303816Ssbruno	default:
472303816Ssbruno		return ("UNKNOWN");
473303816Ssbruno	}
474303816Ssbruno}
475303816Ssbruno
476303816Ssbrunostatic int
477303816Ssbrunoixl_vc_opcode_level(uint16_t opcode)
478303816Ssbruno{
479303816Ssbruno	switch (opcode) {
480303816Ssbruno	case I40E_VIRTCHNL_OP_GET_STATS:
481303816Ssbruno		return (10);
482303816Ssbruno	default:
483303816Ssbruno		return (5);
484303816Ssbruno	}
485303816Ssbruno}
486303816Ssbruno
487303816Ssbrunostatic void
488303816Ssbrunoixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
489303816Ssbruno    enum i40e_status_code status, void *msg, uint16_t len)
490303816Ssbruno{
491303816Ssbruno	struct i40e_hw *hw;
492303816Ssbruno	int global_vf_id;
493303816Ssbruno
494303816Ssbruno	hw = &pf->hw;
495303816Ssbruno	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
496303816Ssbruno
497303816Ssbruno	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
498303816Ssbruno	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
499303816Ssbruno	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
500303816Ssbruno
501303816Ssbruno	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
502303816Ssbruno}
503303816Ssbruno
504303816Ssbrunostatic void
505303816Ssbrunoixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
506303816Ssbruno{
507303816Ssbruno
508303816Ssbruno	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
509303816Ssbruno}
510303816Ssbruno
511303816Ssbrunostatic void
512303816Ssbrunoixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
513303816Ssbruno    enum i40e_status_code status, const char *file, int line)
514303816Ssbruno{
515303816Ssbruno
516303816Ssbruno	I40E_VC_DEBUG(pf, 1,
517303816Ssbruno	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
518303816Ssbruno	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
519303816Ssbruno	    status, vf->vf_num, file, line);
520303816Ssbruno	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
521303816Ssbruno}
522303816Ssbruno
523303816Ssbrunostatic void
524303816Ssbrunoixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
525303816Ssbruno    uint16_t msg_size)
526303816Ssbruno{
527303816Ssbruno	struct i40e_virtchnl_version_info reply;
528303816Ssbruno
529303816Ssbruno	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
530303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
531303816Ssbruno		    I40E_ERR_PARAM);
532303816Ssbruno		return;
533303816Ssbruno	}
534303816Ssbruno
535303816Ssbruno	vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
536303816Ssbruno
537303816Ssbruno	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
538303816Ssbruno	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
539303816Ssbruno	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
540303816Ssbruno	    sizeof(reply));
541303816Ssbruno}
542303816Ssbruno
543303816Ssbrunostatic void
544303816Ssbrunoixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
545303816Ssbruno    uint16_t msg_size)
546303816Ssbruno{
547303816Ssbruno
548303816Ssbruno	if (msg_size != 0) {
549303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
550303816Ssbruno		    I40E_ERR_PARAM);
551303816Ssbruno		return;
552303816Ssbruno	}
553303816Ssbruno
554303816Ssbruno	ixl_reset_vf(pf, vf);
555303816Ssbruno
556303816Ssbruno	/* No response to a reset message. */
557303816Ssbruno}
558303816Ssbruno
559303816Ssbrunostatic void
560303816Ssbrunoixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
561303816Ssbruno    uint16_t msg_size)
562303816Ssbruno{
563303816Ssbruno	struct i40e_virtchnl_vf_resource reply;
564303816Ssbruno
565303816Ssbruno	if ((vf->version == 0 && msg_size != 0) ||
566303816Ssbruno	    (vf->version == 1 && msg_size != 4)) {
567303816Ssbruno		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
568303816Ssbruno		    " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
569303816Ssbruno		    vf->version);
570303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
571303816Ssbruno		    I40E_ERR_PARAM);
572303816Ssbruno		return;
573303816Ssbruno	}
574303816Ssbruno
575303816Ssbruno	bzero(&reply, sizeof(reply));
576303816Ssbruno
577303816Ssbruno	if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
578303816Ssbruno		reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
579303816Ssbruno					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
580303816Ssbruno					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
581303816Ssbruno	else
582303816Ssbruno		/* Force VF RSS setup by PF in 1.1+ VFs */
583303816Ssbruno		reply.vf_offload_flags = *(u32 *)msg & (
584303816Ssbruno					 I40E_VIRTCHNL_VF_OFFLOAD_L2 |
585303816Ssbruno					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
586303816Ssbruno					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
587303816Ssbruno
588303816Ssbruno	reply.num_vsis = 1;
589303816Ssbruno	reply.num_queue_pairs = vf->vsi.num_queues;
590303816Ssbruno	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
591303816Ssbruno	reply.rss_key_size = 52;
592303816Ssbruno	reply.rss_lut_size = 64;
593303816Ssbruno	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
594303816Ssbruno	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
595303816Ssbruno	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
596303816Ssbruno	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
597303816Ssbruno
598303816Ssbruno	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
599303816Ssbruno	    I40E_SUCCESS, &reply, sizeof(reply));
600303816Ssbruno}
601303816Ssbruno
602303816Ssbrunostatic int
603303816Ssbrunoixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
604303816Ssbruno    struct i40e_virtchnl_txq_info *info)
605303816Ssbruno{
606303816Ssbruno	struct i40e_hw *hw;
607303816Ssbruno	struct i40e_hmc_obj_txq txq;
608303816Ssbruno	uint16_t global_queue_num, global_vf_num;
609303816Ssbruno	enum i40e_status_code status;
610303816Ssbruno	uint32_t qtx_ctl;
611303816Ssbruno
612303816Ssbruno	hw = &pf->hw;
613303816Ssbruno	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614303816Ssbruno	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
615303816Ssbruno	bzero(&txq, sizeof(txq));
616303816Ssbruno
617303816Ssbruno	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
618303816Ssbruno	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
619303816Ssbruno
620303816Ssbruno	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
621303816Ssbruno	if (status != I40E_SUCCESS)
622303816Ssbruno		return (EINVAL);
623303816Ssbruno
624303816Ssbruno	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
625303816Ssbruno
626303816Ssbruno	txq.head_wb_ena = info->headwb_enabled;
627303816Ssbruno	txq.head_wb_addr = info->dma_headwb_addr;
628303816Ssbruno	txq.qlen = info->ring_len;
629303816Ssbruno	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
630303816Ssbruno	txq.rdylist_act = 0;
631303816Ssbruno
632303816Ssbruno	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
633303816Ssbruno	if (status != I40E_SUCCESS)
634303816Ssbruno		return (EINVAL);
635303816Ssbruno
636303816Ssbruno	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
637303816Ssbruno	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
638303816Ssbruno	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
639303816Ssbruno	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
640303816Ssbruno	ixl_flush(hw);
641303816Ssbruno
642303816Ssbruno	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
643303816Ssbruno
644303816Ssbruno	return (0);
645303816Ssbruno}
646303816Ssbruno
647303816Ssbrunostatic int
648303816Ssbrunoixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
649303816Ssbruno    struct i40e_virtchnl_rxq_info *info)
650303816Ssbruno{
651303816Ssbruno	struct i40e_hw *hw;
652303816Ssbruno	struct i40e_hmc_obj_rxq rxq;
653303816Ssbruno	uint16_t global_queue_num;
654303816Ssbruno	enum i40e_status_code status;
655303816Ssbruno
656303816Ssbruno	hw = &pf->hw;
657303816Ssbruno	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
658303816Ssbruno	bzero(&rxq, sizeof(rxq));
659303816Ssbruno
660303816Ssbruno	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
661303816Ssbruno	    vf->vf_num, global_queue_num, info->queue_id);
662303816Ssbruno
663303816Ssbruno	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
664303816Ssbruno		return (EINVAL);
665303816Ssbruno
666303816Ssbruno	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
667303816Ssbruno	    info->max_pkt_size < ETHER_MIN_LEN)
668303816Ssbruno		return (EINVAL);
669303816Ssbruno
670303816Ssbruno	if (info->splithdr_enabled) {
671303816Ssbruno		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
672303816Ssbruno			return (EINVAL);
673303816Ssbruno
674303816Ssbruno		rxq.hsplit_0 = info->rx_split_pos &
675303816Ssbruno		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
676303816Ssbruno		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
677303816Ssbruno		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
678303816Ssbruno		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
679303816Ssbruno		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
680303816Ssbruno
681303816Ssbruno		rxq.dtype = 2;
682303816Ssbruno	}
683303816Ssbruno
684303816Ssbruno	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
685303816Ssbruno	if (status != I40E_SUCCESS)
686303816Ssbruno		return (EINVAL);
687303816Ssbruno
688303816Ssbruno	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
689303816Ssbruno	rxq.qlen = info->ring_len;
690303816Ssbruno
691303816Ssbruno	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
692303816Ssbruno
693303816Ssbruno	rxq.dsize = 1;
694303816Ssbruno	rxq.crcstrip = 1;
695303816Ssbruno	rxq.l2tsel = 1;
696303816Ssbruno
697303816Ssbruno	rxq.rxmax = info->max_pkt_size;
698303816Ssbruno	rxq.tphrdesc_ena = 1;
699303816Ssbruno	rxq.tphwdesc_ena = 1;
700303816Ssbruno	rxq.tphdata_ena = 1;
701303816Ssbruno	rxq.tphhead_ena = 1;
702303816Ssbruno	rxq.lrxqthresh = 2;
703303816Ssbruno	rxq.prefena = 1;
704303816Ssbruno
705303816Ssbruno	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
706303816Ssbruno	if (status != I40E_SUCCESS)
707303816Ssbruno		return (EINVAL);
708303816Ssbruno
709303816Ssbruno	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
710303816Ssbruno
711303816Ssbruno	return (0);
712303816Ssbruno}
713303816Ssbruno
714303816Ssbrunostatic void
715303816Ssbrunoixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
716303816Ssbruno    uint16_t msg_size)
717303816Ssbruno{
718303816Ssbruno	struct i40e_virtchnl_vsi_queue_config_info *info;
719303816Ssbruno	struct i40e_virtchnl_queue_pair_info *pair;
720303816Ssbruno	uint16_t expected_msg_size;
721303816Ssbruno	int i;
722303816Ssbruno
723303816Ssbruno	if (msg_size < sizeof(*info)) {
724303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
725303816Ssbruno		    I40E_ERR_PARAM);
726303816Ssbruno		return;
727303816Ssbruno	}
728303816Ssbruno
729303816Ssbruno	info = msg;
730303816Ssbruno	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
731303816Ssbruno		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
732303816Ssbruno		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
733303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
734303816Ssbruno		    I40E_ERR_PARAM);
735303816Ssbruno		return;
736303816Ssbruno	}
737303816Ssbruno
738303816Ssbruno	expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
739303816Ssbruno	if (msg_size != expected_msg_size) {
740303816Ssbruno		device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
741303816Ssbruno		    vf->vf_num, msg_size, expected_msg_size);
742303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
743303816Ssbruno		    I40E_ERR_PARAM);
744303816Ssbruno		return;
745303816Ssbruno	}
746303816Ssbruno
747303816Ssbruno	if (info->vsi_id != vf->vsi.vsi_num) {
748303816Ssbruno		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
749303816Ssbruno		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
750303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
751303816Ssbruno		    I40E_ERR_PARAM);
752303816Ssbruno		return;
753303816Ssbruno	}
754303816Ssbruno
755303816Ssbruno	for (i = 0; i < info->num_queue_pairs; i++) {
756303816Ssbruno		pair = &info->qpair[i];
757303816Ssbruno
758303816Ssbruno		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
759303816Ssbruno		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
760303816Ssbruno		    pair->txq.queue_id != pair->rxq.queue_id ||
761303816Ssbruno		    pair->txq.queue_id >= vf->vsi.num_queues) {
762303816Ssbruno
763303816Ssbruno			i40e_send_vf_nack(pf, vf,
764303816Ssbruno			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
765303816Ssbruno			return;
766303816Ssbruno		}
767303816Ssbruno
768303816Ssbruno		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
769303816Ssbruno			i40e_send_vf_nack(pf, vf,
770303816Ssbruno			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
771303816Ssbruno			return;
772303816Ssbruno		}
773303816Ssbruno
774303816Ssbruno		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
775303816Ssbruno			i40e_send_vf_nack(pf, vf,
776303816Ssbruno			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
777303816Ssbruno			return;
778303816Ssbruno		}
779303816Ssbruno	}
780303816Ssbruno
781303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
782303816Ssbruno}
783303816Ssbruno
784303816Ssbrunostatic void
785303816Ssbrunoixl_vf_set_qctl(struct ixl_pf *pf,
786303816Ssbruno    const struct i40e_virtchnl_vector_map *vector,
787303816Ssbruno    enum i40e_queue_type cur_type, uint16_t cur_queue,
788303816Ssbruno    enum i40e_queue_type *last_type, uint16_t *last_queue)
789303816Ssbruno{
790303816Ssbruno	uint32_t offset, qctl;
791303816Ssbruno	uint16_t itr_indx;
792303816Ssbruno
793303816Ssbruno	if (cur_type == I40E_QUEUE_TYPE_RX) {
794303816Ssbruno		offset = I40E_QINT_RQCTL(cur_queue);
795303816Ssbruno		itr_indx = vector->rxitr_idx;
796303816Ssbruno	} else {
797303816Ssbruno		offset = I40E_QINT_TQCTL(cur_queue);
798303816Ssbruno		itr_indx = vector->txitr_idx;
799303816Ssbruno	}
800303816Ssbruno
801303816Ssbruno	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
802303816Ssbruno	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
803303816Ssbruno	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
804303816Ssbruno	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
805303816Ssbruno	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
806303816Ssbruno
807303816Ssbruno	wr32(&pf->hw, offset, qctl);
808303816Ssbruno
809303816Ssbruno	*last_type = cur_type;
810303816Ssbruno	*last_queue = cur_queue;
811303816Ssbruno}
812303816Ssbruno
813303816Ssbrunostatic void
814303816Ssbrunoixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
815303816Ssbruno    const struct i40e_virtchnl_vector_map *vector)
816303816Ssbruno{
817303816Ssbruno	struct i40e_hw *hw;
818303816Ssbruno	u_int qindex;
819303816Ssbruno	enum i40e_queue_type type, last_type;
820303816Ssbruno	uint32_t lnklst_reg;
821303816Ssbruno	uint16_t rxq_map, txq_map, cur_queue, last_queue;
822303816Ssbruno
823303816Ssbruno	hw = &pf->hw;
824303816Ssbruno
825303816Ssbruno	rxq_map = vector->rxq_map;
826303816Ssbruno	txq_map = vector->txq_map;
827303816Ssbruno
828303816Ssbruno	last_queue = IXL_END_OF_INTR_LNKLST;
829303816Ssbruno	last_type = I40E_QUEUE_TYPE_RX;
830303816Ssbruno
831303816Ssbruno	/*
832303816Ssbruno	 * The datasheet says to optimize performance, RX queues and TX queues
833303816Ssbruno	 * should be interleaved in the interrupt linked list, so we process
834303816Ssbruno	 * both at once here.
835303816Ssbruno	 */
836303816Ssbruno	while ((rxq_map != 0) || (txq_map != 0)) {
837303816Ssbruno		if (txq_map != 0) {
838303816Ssbruno			qindex = ffs(txq_map) - 1;
839303816Ssbruno			type = I40E_QUEUE_TYPE_TX;
840303816Ssbruno			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
841303816Ssbruno			ixl_vf_set_qctl(pf, vector, type, cur_queue,
842303816Ssbruno			    &last_type, &last_queue);
843303816Ssbruno			txq_map &= ~(1 << qindex);
844303816Ssbruno		}
845303816Ssbruno
846303816Ssbruno		if (rxq_map != 0) {
847303816Ssbruno			qindex = ffs(rxq_map) - 1;
848303816Ssbruno			type = I40E_QUEUE_TYPE_RX;
849303816Ssbruno			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
850303816Ssbruno			ixl_vf_set_qctl(pf, vector, type, cur_queue,
851303816Ssbruno			    &last_type, &last_queue);
852303816Ssbruno			rxq_map &= ~(1 << qindex);
853303816Ssbruno		}
854303816Ssbruno	}
855303816Ssbruno
856303816Ssbruno	if (vector->vector_id == 0)
857303816Ssbruno		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
858303816Ssbruno	else
859303816Ssbruno		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
860303816Ssbruno		    vf->vf_num);
861303816Ssbruno	wr32(hw, lnklst_reg,
862303816Ssbruno	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
863303816Ssbruno	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
864303816Ssbruno
865303816Ssbruno	ixl_flush(hw);
866303816Ssbruno}
867303816Ssbruno
868303816Ssbrunostatic void
869303816Ssbrunoixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
870303816Ssbruno    uint16_t msg_size)
871303816Ssbruno{
872303816Ssbruno	struct i40e_virtchnl_irq_map_info *map;
873303816Ssbruno	struct i40e_virtchnl_vector_map *vector;
874303816Ssbruno	struct i40e_hw *hw;
875303816Ssbruno	int i, largest_txq, largest_rxq;
876303816Ssbruno
877303816Ssbruno	hw = &pf->hw;
878303816Ssbruno
879303816Ssbruno	if (msg_size < sizeof(*map)) {
880303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
881303816Ssbruno		    I40E_ERR_PARAM);
882303816Ssbruno		return;
883303816Ssbruno	}
884303816Ssbruno
885303816Ssbruno	map = msg;
886303816Ssbruno	if (map->num_vectors == 0) {
887303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
888303816Ssbruno		    I40E_ERR_PARAM);
889303816Ssbruno		return;
890303816Ssbruno	}
891303816Ssbruno
892303816Ssbruno	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
893303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
894303816Ssbruno		    I40E_ERR_PARAM);
895303816Ssbruno		return;
896303816Ssbruno	}
897303816Ssbruno
898303816Ssbruno	for (i = 0; i < map->num_vectors; i++) {
899303816Ssbruno		vector = &map->vecmap[i];
900303816Ssbruno
901303816Ssbruno		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
902303816Ssbruno		    vector->vsi_id != vf->vsi.vsi_num) {
903303816Ssbruno			i40e_send_vf_nack(pf, vf,
904303816Ssbruno			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
905303816Ssbruno			return;
906303816Ssbruno		}
907303816Ssbruno
908303816Ssbruno		if (vector->rxq_map != 0) {
909303816Ssbruno			largest_rxq = fls(vector->rxq_map) - 1;
910303816Ssbruno			if (largest_rxq >= vf->vsi.num_queues) {
911303816Ssbruno				i40e_send_vf_nack(pf, vf,
912303816Ssbruno				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
913303816Ssbruno				    I40E_ERR_PARAM);
914303816Ssbruno				return;
915303816Ssbruno			}
916303816Ssbruno		}
917303816Ssbruno
918303816Ssbruno		if (vector->txq_map != 0) {
919303816Ssbruno			largest_txq = fls(vector->txq_map) - 1;
920303816Ssbruno			if (largest_txq >= vf->vsi.num_queues) {
921303816Ssbruno				i40e_send_vf_nack(pf, vf,
922303816Ssbruno				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
923303816Ssbruno				    I40E_ERR_PARAM);
924303816Ssbruno				return;
925303816Ssbruno			}
926303816Ssbruno		}
927303816Ssbruno
928303816Ssbruno		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
929303816Ssbruno		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
930303816Ssbruno			i40e_send_vf_nack(pf, vf,
931303816Ssbruno			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
932303816Ssbruno			    I40E_ERR_PARAM);
933303816Ssbruno			return;
934303816Ssbruno		}
935303816Ssbruno
936303816Ssbruno		ixl_vf_config_vector(pf, vf, vector);
937303816Ssbruno	}
938303816Ssbruno
939303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
940303816Ssbruno}
941303816Ssbruno
942303816Ssbrunostatic void
943303816Ssbrunoixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
944303816Ssbruno    uint16_t msg_size)
945303816Ssbruno{
946303816Ssbruno	struct i40e_virtchnl_queue_select *select;
947303816Ssbruno	int error = 0;
948303816Ssbruno
949303816Ssbruno	if (msg_size != sizeof(*select)) {
950303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
951303816Ssbruno		    I40E_ERR_PARAM);
952303816Ssbruno		return;
953303816Ssbruno	}
954303816Ssbruno
955303816Ssbruno	select = msg;
956303816Ssbruno	if (select->vsi_id != vf->vsi.vsi_num ||
957303816Ssbruno	    select->rx_queues == 0 || select->tx_queues == 0) {
958303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
959303816Ssbruno		    I40E_ERR_PARAM);
960303816Ssbruno		return;
961303816Ssbruno	}
962303816Ssbruno
963303816Ssbruno	/* Enable TX rings selected by the VF */
964303816Ssbruno	for (int i = 0; i < 32; i++) {
965303816Ssbruno		if ((1 << i) & select->tx_queues) {
966303816Ssbruno			/* Warn if queue is out of VF allocation range */
967303816Ssbruno			if (i >= vf->vsi.num_queues) {
968303816Ssbruno				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
969303816Ssbruno				    vf->vf_num, i);
970303816Ssbruno				break;
971303816Ssbruno			}
972303816Ssbruno			/* Skip this queue if it hasn't been configured */
973303816Ssbruno			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
974303816Ssbruno				continue;
975303816Ssbruno			/* Warn if this queue is already marked as enabled */
976303816Ssbruno			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
977303816Ssbruno				device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
978303816Ssbruno				    vf->vf_num, i);
979303816Ssbruno
980303816Ssbruno			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
981303816Ssbruno			if (error)
982303816Ssbruno				break;
983303816Ssbruno			else
984303816Ssbruno				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
985303816Ssbruno		}
986303816Ssbruno	}
987303816Ssbruno
988303816Ssbruno	/* Enable RX rings selected by the VF */
989303816Ssbruno	for (int i = 0; i < 32; i++) {
990303816Ssbruno		if ((1 << i) & select->rx_queues) {
991303816Ssbruno			/* Warn if queue is out of VF allocation range */
992303816Ssbruno			if (i >= vf->vsi.num_queues) {
993303816Ssbruno				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
994303816Ssbruno				    vf->vf_num, i);
995303816Ssbruno				break;
996303816Ssbruno			}
997303816Ssbruno			/* Skip this queue if it hasn't been configured */
998303816Ssbruno			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
999303816Ssbruno				continue;
1000303816Ssbruno			/* Warn if this queue is already marked as enabled */
1001303816Ssbruno			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
1002303816Ssbruno				device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
1003303816Ssbruno				    vf->vf_num, i);
1004303816Ssbruno			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
1005303816Ssbruno			if (error)
1006303816Ssbruno				break;
1007303816Ssbruno			else
1008303816Ssbruno				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
1009303816Ssbruno		}
1010303816Ssbruno	}
1011303816Ssbruno
1012303816Ssbruno	if (error) {
1013303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1014303816Ssbruno		    I40E_ERR_TIMEOUT);
1015303816Ssbruno		return;
1016303816Ssbruno	}
1017303816Ssbruno
1018303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
1019303816Ssbruno}
1020303816Ssbruno
1021303816Ssbrunostatic void
1022303816Ssbrunoixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1023303816Ssbruno    void *msg, uint16_t msg_size)
1024303816Ssbruno{
1025303816Ssbruno	struct i40e_virtchnl_queue_select *select;
1026303816Ssbruno	int error = 0;
1027303816Ssbruno
1028303816Ssbruno	if (msg_size != sizeof(*select)) {
1029303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1030303816Ssbruno		    I40E_ERR_PARAM);
1031303816Ssbruno		return;
1032303816Ssbruno	}
1033303816Ssbruno
1034303816Ssbruno	select = msg;
1035303816Ssbruno	if (select->vsi_id != vf->vsi.vsi_num ||
1036303816Ssbruno	    select->rx_queues == 0 || select->tx_queues == 0) {
1037303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1038303816Ssbruno		    I40E_ERR_PARAM);
1039303816Ssbruno		return;
1040303816Ssbruno	}
1041303816Ssbruno
1042303816Ssbruno	/* Disable TX rings selected by the VF */
1043303816Ssbruno	for (int i = 0; i < 32; i++) {
1044303816Ssbruno		if ((1 << i) & select->tx_queues) {
1045303816Ssbruno			/* Warn if queue is out of VF allocation range */
1046303816Ssbruno			if (i >= vf->vsi.num_queues) {
1047303816Ssbruno				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1048303816Ssbruno				    vf->vf_num, i);
1049303816Ssbruno				break;
1050303816Ssbruno			}
1051303816Ssbruno			/* Skip this queue if it hasn't been configured */
1052303816Ssbruno			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1053303816Ssbruno				continue;
1054303816Ssbruno			/* Warn if this queue is already marked as disabled */
1055303816Ssbruno			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1056303816Ssbruno				device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1057303816Ssbruno				    vf->vf_num, i);
1058303816Ssbruno				continue;
1059303816Ssbruno			}
1060303816Ssbruno			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1061303816Ssbruno			if (error)
1062303816Ssbruno				break;
1063303816Ssbruno			else
1064303816Ssbruno				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1065303816Ssbruno		}
1066303816Ssbruno	}
1067303816Ssbruno
1068303816Ssbruno	/* Enable RX rings selected by the VF */
1069303816Ssbruno	for (int i = 0; i < 32; i++) {
1070303816Ssbruno		if ((1 << i) & select->rx_queues) {
1071303816Ssbruno			/* Warn if queue is out of VF allocation range */
1072303816Ssbruno			if (i >= vf->vsi.num_queues) {
1073303816Ssbruno				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1074303816Ssbruno				    vf->vf_num, i);
1075303816Ssbruno				break;
1076303816Ssbruno			}
1077303816Ssbruno			/* Skip this queue if it hasn't been configured */
1078303816Ssbruno			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1079303816Ssbruno				continue;
1080303816Ssbruno			/* Warn if this queue is already marked as disabled */
1081303816Ssbruno			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1082303816Ssbruno				device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1083303816Ssbruno				    vf->vf_num, i);
1084303816Ssbruno				continue;
1085303816Ssbruno			}
1086303816Ssbruno			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1087303816Ssbruno			if (error)
1088303816Ssbruno				break;
1089303816Ssbruno			else
1090303816Ssbruno				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1091303816Ssbruno		}
1092303816Ssbruno	}
1093303816Ssbruno
1094303816Ssbruno	if (error) {
1095303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1096303816Ssbruno		    I40E_ERR_TIMEOUT);
1097303816Ssbruno		return;
1098303816Ssbruno	}
1099303816Ssbruno
1100303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
1101303816Ssbruno}
1102303816Ssbruno
1103303816Ssbrunostatic bool
1104303816Ssbrunoixl_zero_mac(const uint8_t *addr)
1105303816Ssbruno{
1106303816Ssbruno	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1107303816Ssbruno
1108303816Ssbruno	return (cmp_etheraddr(addr, zero));
1109303816Ssbruno}
1110303816Ssbruno
1111303816Ssbrunostatic bool
1112303816Ssbrunoixl_bcast_mac(const uint8_t *addr)
1113303816Ssbruno{
1114303816Ssbruno
1115303816Ssbruno	return (cmp_etheraddr(addr, ixl_bcast_addr));
1116303816Ssbruno}
1117303816Ssbruno
1118303816Ssbrunostatic int
1119303816Ssbrunoixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1120303816Ssbruno{
1121303816Ssbruno
1122303816Ssbruno	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1123303816Ssbruno		return (EINVAL);
1124303816Ssbruno
1125303816Ssbruno	/*
1126303816Ssbruno	 * If the VF is not allowed to change its MAC address, don't let it
1127303816Ssbruno	 * set a MAC filter for an address that is not a multicast address and
1128303816Ssbruno	 * is not its assigned MAC.
1129303816Ssbruno	 */
1130303816Ssbruno	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1131303816Ssbruno	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1132303816Ssbruno		return (EPERM);
1133303816Ssbruno
1134303816Ssbruno	return (0);
1135303816Ssbruno}
1136303816Ssbruno
1137303816Ssbrunostatic void
1138303816Ssbrunoixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1139303816Ssbruno    uint16_t msg_size)
1140303816Ssbruno{
1141303816Ssbruno	struct i40e_virtchnl_ether_addr_list *addr_list;
1142303816Ssbruno	struct i40e_virtchnl_ether_addr *addr;
1143303816Ssbruno	struct ixl_vsi *vsi;
1144303816Ssbruno	int i;
1145303816Ssbruno	size_t expected_size;
1146303816Ssbruno
1147303816Ssbruno	vsi = &vf->vsi;
1148303816Ssbruno
1149303816Ssbruno	if (msg_size < sizeof(*addr_list)) {
1150303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1151303816Ssbruno		    I40E_ERR_PARAM);
1152303816Ssbruno		return;
1153303816Ssbruno	}
1154303816Ssbruno
1155303816Ssbruno	addr_list = msg;
1156303816Ssbruno	expected_size = sizeof(*addr_list) +
1157303816Ssbruno	    addr_list->num_elements * sizeof(*addr);
1158303816Ssbruno
1159303816Ssbruno	if (addr_list->num_elements == 0 ||
1160303816Ssbruno	    addr_list->vsi_id != vsi->vsi_num ||
1161303816Ssbruno	    msg_size != expected_size) {
1162303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1163303816Ssbruno		    I40E_ERR_PARAM);
1164303816Ssbruno		return;
1165303816Ssbruno	}
1166303816Ssbruno
1167303816Ssbruno	for (i = 0; i < addr_list->num_elements; i++) {
1168303816Ssbruno		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1169303816Ssbruno			i40e_send_vf_nack(pf, vf,
1170303816Ssbruno			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1171303816Ssbruno			return;
1172303816Ssbruno		}
1173303816Ssbruno	}
1174303816Ssbruno
1175303816Ssbruno	for (i = 0; i < addr_list->num_elements; i++) {
1176303816Ssbruno		addr = &addr_list->list[i];
1177303816Ssbruno		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1178303816Ssbruno	}
1179303816Ssbruno
1180303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
1181303816Ssbruno}
1182303816Ssbruno
1183303816Ssbrunostatic void
1184303816Ssbrunoixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1185303816Ssbruno    uint16_t msg_size)
1186303816Ssbruno{
1187303816Ssbruno	struct i40e_virtchnl_ether_addr_list *addr_list;
1188303816Ssbruno	struct i40e_virtchnl_ether_addr *addr;
1189303816Ssbruno	size_t expected_size;
1190303816Ssbruno	int i;
1191303816Ssbruno
1192303816Ssbruno	if (msg_size < sizeof(*addr_list)) {
1193303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1194303816Ssbruno		    I40E_ERR_PARAM);
1195303816Ssbruno		return;
1196303816Ssbruno	}
1197303816Ssbruno
1198303816Ssbruno	addr_list = msg;
1199303816Ssbruno	expected_size = sizeof(*addr_list) +
1200303816Ssbruno	    addr_list->num_elements * sizeof(*addr);
1201303816Ssbruno
1202303816Ssbruno	if (addr_list->num_elements == 0 ||
1203303816Ssbruno	    addr_list->vsi_id != vf->vsi.vsi_num ||
1204303816Ssbruno	    msg_size != expected_size) {
1205303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1206303816Ssbruno		    I40E_ERR_PARAM);
1207303816Ssbruno		return;
1208303816Ssbruno	}
1209303816Ssbruno
1210303816Ssbruno	for (i = 0; i < addr_list->num_elements; i++) {
1211303816Ssbruno		addr = &addr_list->list[i];
1212303816Ssbruno		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1213303816Ssbruno			i40e_send_vf_nack(pf, vf,
1214303816Ssbruno			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1215303816Ssbruno			return;
1216303816Ssbruno		}
1217303816Ssbruno	}
1218303816Ssbruno
1219303816Ssbruno	for (i = 0; i < addr_list->num_elements; i++) {
1220303816Ssbruno		addr = &addr_list->list[i];
1221303816Ssbruno		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1222303816Ssbruno	}
1223303816Ssbruno
1224303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
1225303816Ssbruno}
1226303816Ssbruno
1227303816Ssbrunostatic enum i40e_status_code
1228303816Ssbrunoixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1229303816Ssbruno{
1230303816Ssbruno	struct i40e_vsi_context vsi_ctx;
1231303816Ssbruno
1232303816Ssbruno	vsi_ctx.seid = vf->vsi.seid;
1233303816Ssbruno
1234303816Ssbruno	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1235303816Ssbruno	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1236303816Ssbruno	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1237303816Ssbruno	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1238303816Ssbruno	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1239303816Ssbruno}
1240303816Ssbruno
1241303816Ssbrunostatic void
1242303816Ssbrunoixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1243303816Ssbruno    uint16_t msg_size)
1244303816Ssbruno{
1245303816Ssbruno	struct i40e_virtchnl_vlan_filter_list *filter_list;
1246303816Ssbruno	enum i40e_status_code code;
1247303816Ssbruno	size_t expected_size;
1248303816Ssbruno	int i;
1249303816Ssbruno
1250303816Ssbruno	if (msg_size < sizeof(*filter_list)) {
1251303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1252303816Ssbruno		    I40E_ERR_PARAM);
1253303816Ssbruno		return;
1254303816Ssbruno	}
1255303816Ssbruno
1256303816Ssbruno	filter_list = msg;
1257303816Ssbruno	expected_size = sizeof(*filter_list) +
1258303816Ssbruno	    filter_list->num_elements * sizeof(uint16_t);
1259303816Ssbruno	if (filter_list->num_elements == 0 ||
1260303816Ssbruno	    filter_list->vsi_id != vf->vsi.vsi_num ||
1261303816Ssbruno	    msg_size != expected_size) {
1262303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1263303816Ssbruno		    I40E_ERR_PARAM);
1264303816Ssbruno		return;
1265303816Ssbruno	}
1266303816Ssbruno
1267303816Ssbruno	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1268303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1269303816Ssbruno		    I40E_ERR_PARAM);
1270303816Ssbruno		return;
1271303816Ssbruno	}
1272303816Ssbruno
1273303816Ssbruno	for (i = 0; i < filter_list->num_elements; i++) {
1274303816Ssbruno		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1275303816Ssbruno			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1276303816Ssbruno			    I40E_ERR_PARAM);
1277303816Ssbruno			return;
1278303816Ssbruno		}
1279303816Ssbruno	}
1280303816Ssbruno
1281303816Ssbruno	code = ixl_vf_enable_vlan_strip(pf, vf);
1282303816Ssbruno	if (code != I40E_SUCCESS) {
1283303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1284303816Ssbruno		    I40E_ERR_PARAM);
1285303816Ssbruno	}
1286303816Ssbruno
1287303816Ssbruno	for (i = 0; i < filter_list->num_elements; i++)
1288303816Ssbruno		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1289303816Ssbruno
1290303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
1291303816Ssbruno}
1292303816Ssbruno
1293303816Ssbrunostatic void
1294303816Ssbrunoixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1295303816Ssbruno    uint16_t msg_size)
1296303816Ssbruno{
1297303816Ssbruno	struct i40e_virtchnl_vlan_filter_list *filter_list;
1298303816Ssbruno	int i;
1299303816Ssbruno	size_t expected_size;
1300303816Ssbruno
1301303816Ssbruno	if (msg_size < sizeof(*filter_list)) {
1302303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1303303816Ssbruno		    I40E_ERR_PARAM);
1304303816Ssbruno		return;
1305303816Ssbruno	}
1306303816Ssbruno
1307303816Ssbruno	filter_list = msg;
1308303816Ssbruno	expected_size = sizeof(*filter_list) +
1309303816Ssbruno	    filter_list->num_elements * sizeof(uint16_t);
1310303816Ssbruno	if (filter_list->num_elements == 0 ||
1311303816Ssbruno	    filter_list->vsi_id != vf->vsi.vsi_num ||
1312303816Ssbruno	    msg_size != expected_size) {
1313303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1314303816Ssbruno		    I40E_ERR_PARAM);
1315303816Ssbruno		return;
1316303816Ssbruno	}
1317303816Ssbruno
1318303816Ssbruno	for (i = 0; i < filter_list->num_elements; i++) {
1319303816Ssbruno		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1320303816Ssbruno			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1321303816Ssbruno			    I40E_ERR_PARAM);
1322303816Ssbruno			return;
1323303816Ssbruno		}
1324303816Ssbruno	}
1325303816Ssbruno
1326303816Ssbruno	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1327303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1328303816Ssbruno		    I40E_ERR_PARAM);
1329303816Ssbruno		return;
1330303816Ssbruno	}
1331303816Ssbruno
1332303816Ssbruno	for (i = 0; i < filter_list->num_elements; i++)
1333303816Ssbruno		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1334303816Ssbruno
1335303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
1336303816Ssbruno}
1337303816Ssbruno
1338303816Ssbrunostatic void
1339303816Ssbrunoixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1340303816Ssbruno    void *msg, uint16_t msg_size)
1341303816Ssbruno{
1342303816Ssbruno	struct i40e_virtchnl_promisc_info *info;
1343303816Ssbruno	enum i40e_status_code code;
1344303816Ssbruno
1345303816Ssbruno	if (msg_size != sizeof(*info)) {
1346303816Ssbruno		i40e_send_vf_nack(pf, vf,
1347303816Ssbruno		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1348303816Ssbruno		return;
1349303816Ssbruno	}
1350303816Ssbruno
1351303816Ssbruno	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1352303816Ssbruno		i40e_send_vf_nack(pf, vf,
1353303816Ssbruno		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1354303816Ssbruno		return;
1355303816Ssbruno	}
1356303816Ssbruno
1357303816Ssbruno	info = msg;
1358303816Ssbruno	if (info->vsi_id != vf->vsi.vsi_num) {
1359303816Ssbruno		i40e_send_vf_nack(pf, vf,
1360303816Ssbruno		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1361303816Ssbruno		return;
1362303816Ssbruno	}
1363303816Ssbruno
1364303816Ssbruno	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1365303816Ssbruno	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1366303816Ssbruno	if (code != I40E_SUCCESS) {
1367303816Ssbruno		i40e_send_vf_nack(pf, vf,
1368303816Ssbruno		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1369303816Ssbruno		return;
1370303816Ssbruno	}
1371303816Ssbruno
1372303816Ssbruno	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1373303816Ssbruno	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
1374303816Ssbruno	if (code != I40E_SUCCESS) {
1375303816Ssbruno		i40e_send_vf_nack(pf, vf,
1376303816Ssbruno		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1377303816Ssbruno		return;
1378303816Ssbruno	}
1379303816Ssbruno
1380303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1381303816Ssbruno}
1382303816Ssbruno
1383303816Ssbrunostatic void
1384303816Ssbrunoixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1385303816Ssbruno    uint16_t msg_size)
1386303816Ssbruno{
1387303816Ssbruno	struct i40e_virtchnl_queue_select *queue;
1388303816Ssbruno
1389303816Ssbruno	if (msg_size != sizeof(*queue)) {
1390303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1391303816Ssbruno		    I40E_ERR_PARAM);
1392303816Ssbruno		return;
1393303816Ssbruno	}
1394303816Ssbruno
1395303816Ssbruno	queue = msg;
1396303816Ssbruno	if (queue->vsi_id != vf->vsi.vsi_num) {
1397303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1398303816Ssbruno		    I40E_ERR_PARAM);
1399303816Ssbruno		return;
1400303816Ssbruno	}
1401303816Ssbruno
1402303816Ssbruno	ixl_update_eth_stats(&vf->vsi);
1403303816Ssbruno
1404303816Ssbruno	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1405303816Ssbruno	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1406303816Ssbruno}
1407303816Ssbruno
1408303816Ssbrunostatic void
1409303816Ssbrunoixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1410303816Ssbruno    uint16_t msg_size)
1411303816Ssbruno{
1412303816Ssbruno	struct i40e_hw *hw;
1413303816Ssbruno	struct i40e_virtchnl_rss_key *key;
1414303816Ssbruno	struct i40e_aqc_get_set_rss_key_data key_data;
1415303816Ssbruno	enum i40e_status_code status;
1416303816Ssbruno
1417303816Ssbruno	hw = &pf->hw;
1418303816Ssbruno
1419303816Ssbruno	if (msg_size < sizeof(*key)) {
1420303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1421303816Ssbruno		    I40E_ERR_PARAM);
1422303816Ssbruno		return;
1423303816Ssbruno	}
1424303816Ssbruno
1425303816Ssbruno	key = msg;
1426303816Ssbruno
1427303816Ssbruno	if (key->key_len > 52) {
1428303816Ssbruno		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1429303816Ssbruno		    vf->vf_num, key->key_len, 52);
1430303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1431303816Ssbruno		    I40E_ERR_PARAM);
1432303816Ssbruno		return;
1433303816Ssbruno	}
1434303816Ssbruno
1435303816Ssbruno	if (key->vsi_id != vf->vsi.vsi_num) {
1436303816Ssbruno		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1437303816Ssbruno		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1438303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1439303816Ssbruno		    I40E_ERR_PARAM);
1440303816Ssbruno		return;
1441303816Ssbruno	}
1442303816Ssbruno
1443303816Ssbruno	/* Fill out hash using MAC-dependent method */
1444303816Ssbruno	if (hw->mac.type == I40E_MAC_X722) {
1445303816Ssbruno		bzero(&key_data, sizeof(key_data));
1446303816Ssbruno		if (key->key_len <= 40)
1447303816Ssbruno			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1448303816Ssbruno		else {
1449303816Ssbruno			bcopy(key->key, key_data.standard_rss_key, 40);
1450303816Ssbruno			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1451303816Ssbruno		}
1452303816Ssbruno		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1453303816Ssbruno		if (status) {
1454303816Ssbruno			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1455303816Ssbruno			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1456303816Ssbruno			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1457303816Ssbruno			    I40E_ERR_ADMIN_QUEUE_ERROR);
1458303816Ssbruno			return;
1459303816Ssbruno		}
1460303816Ssbruno	} else {
1461303816Ssbruno		for (int i = 0; i < (key->key_len / 4); i++)
1462303816Ssbruno			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1463303816Ssbruno	}
1464303816Ssbruno
1465303816Ssbruno	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1466303816Ssbruno	    vf->vf_num, key->key[0]);
1467303816Ssbruno
1468303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
1469303816Ssbruno}
1470303816Ssbruno
1471303816Ssbrunostatic void
1472303816Ssbrunoixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1473303816Ssbruno    uint16_t msg_size)
1474303816Ssbruno{
1475303816Ssbruno	struct i40e_hw *hw;
1476303816Ssbruno	struct i40e_virtchnl_rss_lut *lut;
1477303816Ssbruno	enum i40e_status_code status;
1478303816Ssbruno
1479303816Ssbruno	hw = &pf->hw;
1480303816Ssbruno
1481303816Ssbruno	if (msg_size < sizeof(*lut)) {
1482303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1483303816Ssbruno		    I40E_ERR_PARAM);
1484303816Ssbruno		return;
1485303816Ssbruno	}
1486303816Ssbruno
1487303816Ssbruno	lut = msg;
1488303816Ssbruno
1489303816Ssbruno	if (lut->lut_entries > 64) {
1490303816Ssbruno		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1491303816Ssbruno		    vf->vf_num, lut->lut_entries, 64);
1492303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1493303816Ssbruno		    I40E_ERR_PARAM);
1494303816Ssbruno		return;
1495303816Ssbruno	}
1496303816Ssbruno
1497303816Ssbruno	if (lut->vsi_id != vf->vsi.vsi_num) {
1498303816Ssbruno		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1499303816Ssbruno		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1500303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1501303816Ssbruno		    I40E_ERR_PARAM);
1502303816Ssbruno		return;
1503303816Ssbruno	}
1504303816Ssbruno
1505303816Ssbruno	/* Fill out LUT using MAC-dependent method */
1506303816Ssbruno	if (hw->mac.type == I40E_MAC_X722) {
1507303816Ssbruno		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1508303816Ssbruno		if (status) {
1509303816Ssbruno			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1510303816Ssbruno			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1511303816Ssbruno			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1512303816Ssbruno			    I40E_ERR_ADMIN_QUEUE_ERROR);
1513303816Ssbruno			return;
1514303816Ssbruno		}
1515303816Ssbruno	} else {
1516303816Ssbruno		for (int i = 0; i < (lut->lut_entries / 4); i++)
1517303816Ssbruno			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1518303816Ssbruno	}
1519303816Ssbruno
1520303816Ssbruno	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1521303816Ssbruno	    vf->vf_num, lut->lut[0], lut->lut_entries);
1522303816Ssbruno
1523303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
1524303816Ssbruno}
1525303816Ssbruno
1526303816Ssbrunostatic void
1527303816Ssbrunoixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1528303816Ssbruno    uint16_t msg_size)
1529303816Ssbruno{
1530303816Ssbruno	struct i40e_hw *hw;
1531303816Ssbruno	struct i40e_virtchnl_rss_hena *hena;
1532303816Ssbruno
1533303816Ssbruno	hw = &pf->hw;
1534303816Ssbruno
1535303816Ssbruno	if (msg_size < sizeof(*hena)) {
1536303816Ssbruno		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
1537303816Ssbruno		    I40E_ERR_PARAM);
1538303816Ssbruno		return;
1539303816Ssbruno	}
1540303816Ssbruno
1541303816Ssbruno	hena = msg;
1542303816Ssbruno
1543303816Ssbruno	/* Set HENA */
1544303816Ssbruno	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1545303816Ssbruno	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1546303816Ssbruno
1547303816Ssbruno	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1548303816Ssbruno	    vf->vf_num, hena->hena);
1549303816Ssbruno
1550303816Ssbruno	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
1551303816Ssbruno}
1552303816Ssbruno
1553303816Ssbrunovoid
1554303816Ssbrunoixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1555303816Ssbruno{
1556303816Ssbruno	struct ixl_vf *vf;
1557303816Ssbruno	void *msg;
1558303816Ssbruno	uint16_t vf_num, msg_size;
1559303816Ssbruno	uint32_t opcode;
1560303816Ssbruno
1561303816Ssbruno	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1562303816Ssbruno	opcode = le32toh(event->desc.cookie_high);
1563303816Ssbruno
1564303816Ssbruno	if (vf_num >= pf->num_vfs) {
1565303816Ssbruno		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1566303816Ssbruno		return;
1567303816Ssbruno	}
1568303816Ssbruno
1569303816Ssbruno	vf = &pf->vfs[vf_num];
1570303816Ssbruno	msg = event->msg_buf;
1571303816Ssbruno	msg_size = event->msg_len;
1572303816Ssbruno
1573303816Ssbruno	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1574303816Ssbruno	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1575303816Ssbruno	    ixl_vc_opcode_str(opcode), opcode,
1576303816Ssbruno	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1577303816Ssbruno	    vf_num, msg_size);
1578303816Ssbruno
1579303816Ssbruno	/* This must be a stray msg from a previously destroyed VF. */
1580303816Ssbruno	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1581303816Ssbruno		return;
1582303816Ssbruno
1583303816Ssbruno	switch (opcode) {
1584303816Ssbruno	case I40E_VIRTCHNL_OP_VERSION:
1585303816Ssbruno		ixl_vf_version_msg(pf, vf, msg, msg_size);
1586303816Ssbruno		break;
1587303816Ssbruno	case I40E_VIRTCHNL_OP_RESET_VF:
1588303816Ssbruno		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1589303816Ssbruno		break;
1590303816Ssbruno	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1591303816Ssbruno		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1592303816Ssbruno		break;
1593303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1594303816Ssbruno		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1595303816Ssbruno		break;
1596303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1597303816Ssbruno		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1598303816Ssbruno		break;
1599303816Ssbruno	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1600303816Ssbruno		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1601303816Ssbruno		break;
1602303816Ssbruno	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1603303816Ssbruno		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1604303816Ssbruno		break;
1605303816Ssbruno	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1606303816Ssbruno		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1607303816Ssbruno		break;
1608303816Ssbruno	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1609303816Ssbruno		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1610303816Ssbruno		break;
1611303816Ssbruno	case I40E_VIRTCHNL_OP_ADD_VLAN:
1612303816Ssbruno		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1613303816Ssbruno		break;
1614303816Ssbruno	case I40E_VIRTCHNL_OP_DEL_VLAN:
1615303816Ssbruno		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1616303816Ssbruno		break;
1617303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1618303816Ssbruno		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1619303816Ssbruno		break;
1620303816Ssbruno	case I40E_VIRTCHNL_OP_GET_STATS:
1621303816Ssbruno		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1622303816Ssbruno		break;
1623303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1624303816Ssbruno		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1625303816Ssbruno		break;
1626303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1627303816Ssbruno		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1628303816Ssbruno		break;
1629303816Ssbruno	case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1630303816Ssbruno		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1631303816Ssbruno		break;
1632303816Ssbruno
1633303816Ssbruno	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1634303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1635303816Ssbruno	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1636303816Ssbruno	default:
1637303816Ssbruno		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1638303816Ssbruno		break;
1639303816Ssbruno	}
1640303816Ssbruno}
1641303816Ssbruno
1642303816Ssbruno/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1643303816Ssbrunovoid
1644303816Ssbrunoixl_handle_vflr(void *arg, int pending)
1645303816Ssbruno{
1646303816Ssbruno	struct ixl_pf *pf;
1647303816Ssbruno	struct ixl_vf *vf;
1648303816Ssbruno	struct i40e_hw *hw;
1649303816Ssbruno	uint16_t global_vf_num;
1650303816Ssbruno	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1651303816Ssbruno	int i;
1652303816Ssbruno
1653303816Ssbruno	pf = arg;
1654303816Ssbruno	hw = &pf->hw;
1655303816Ssbruno
1656303816Ssbruno	IXL_PF_LOCK(pf);
1657303816Ssbruno	for (i = 0; i < pf->num_vfs; i++) {
1658303816Ssbruno		global_vf_num = hw->func_caps.vf_base_id + i;
1659303816Ssbruno
1660303816Ssbruno		vf = &pf->vfs[i];
1661303816Ssbruno		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1662303816Ssbruno			continue;
1663303816Ssbruno
1664303816Ssbruno		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1665303816Ssbruno		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1666303816Ssbruno		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1667303816Ssbruno		if (vflrstat & vflrstat_mask) {
1668303816Ssbruno			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1669303816Ssbruno			    vflrstat_mask);
1670303816Ssbruno
1671303816Ssbruno			ixl_reinit_vf(pf, vf);
1672303816Ssbruno		}
1673303816Ssbruno	}
1674303816Ssbruno
1675303816Ssbruno	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1676303816Ssbruno	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1677303816Ssbruno	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1678303816Ssbruno	ixl_flush(hw);
1679303816Ssbruno
1680303816Ssbruno	IXL_PF_UNLOCK(pf);
1681303816Ssbruno}
1682303816Ssbruno
1683303816Ssbrunostatic int
1684303816Ssbrunoixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1685303816Ssbruno{
1686303816Ssbruno
1687303816Ssbruno	switch (err) {
1688303816Ssbruno	case I40E_AQ_RC_EPERM:
1689303816Ssbruno		return (EPERM);
1690303816Ssbruno	case I40E_AQ_RC_ENOENT:
1691303816Ssbruno		return (ENOENT);
1692303816Ssbruno	case I40E_AQ_RC_ESRCH:
1693303816Ssbruno		return (ESRCH);
1694303816Ssbruno	case I40E_AQ_RC_EINTR:
1695303816Ssbruno		return (EINTR);
1696303816Ssbruno	case I40E_AQ_RC_EIO:
1697303816Ssbruno		return (EIO);
1698303816Ssbruno	case I40E_AQ_RC_ENXIO:
1699303816Ssbruno		return (ENXIO);
1700303816Ssbruno	case I40E_AQ_RC_E2BIG:
1701303816Ssbruno		return (E2BIG);
1702303816Ssbruno	case I40E_AQ_RC_EAGAIN:
1703303816Ssbruno		return (EAGAIN);
1704303816Ssbruno	case I40E_AQ_RC_ENOMEM:
1705303816Ssbruno		return (ENOMEM);
1706303816Ssbruno	case I40E_AQ_RC_EACCES:
1707303816Ssbruno		return (EACCES);
1708303816Ssbruno	case I40E_AQ_RC_EFAULT:
1709303816Ssbruno		return (EFAULT);
1710303816Ssbruno	case I40E_AQ_RC_EBUSY:
1711303816Ssbruno		return (EBUSY);
1712303816Ssbruno	case I40E_AQ_RC_EEXIST:
1713303816Ssbruno		return (EEXIST);
1714303816Ssbruno	case I40E_AQ_RC_EINVAL:
1715303816Ssbruno		return (EINVAL);
1716303816Ssbruno	case I40E_AQ_RC_ENOTTY:
1717303816Ssbruno		return (ENOTTY);
1718303816Ssbruno	case I40E_AQ_RC_ENOSPC:
1719303816Ssbruno		return (ENOSPC);
1720303816Ssbruno	case I40E_AQ_RC_ENOSYS:
1721303816Ssbruno		return (ENOSYS);
1722303816Ssbruno	case I40E_AQ_RC_ERANGE:
1723303816Ssbruno		return (ERANGE);
1724303816Ssbruno	case I40E_AQ_RC_EFLUSHED:
1725303816Ssbruno		return (EINVAL);	/* No exact equivalent in errno.h */
1726303816Ssbruno	case I40E_AQ_RC_BAD_ADDR:
1727303816Ssbruno		return (EFAULT);
1728303816Ssbruno	case I40E_AQ_RC_EMODE:
1729303816Ssbruno		return (EPERM);
1730303816Ssbruno	case I40E_AQ_RC_EFBIG:
1731303816Ssbruno		return (EFBIG);
1732303816Ssbruno	default:
1733303816Ssbruno		return (EINVAL);
1734303816Ssbruno	}
1735303816Ssbruno}
1736303816Ssbruno
1737303816Ssbrunoint
1738303816Ssbrunoixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1739303816Ssbruno{
1740303816Ssbruno	struct ixl_pf *pf;
1741303816Ssbruno	struct i40e_hw *hw;
1742303816Ssbruno	struct ixl_vsi *pf_vsi;
1743303816Ssbruno	enum i40e_status_code ret;
1744303816Ssbruno	int i, error;
1745303816Ssbruno
1746303816Ssbruno	pf = device_get_softc(dev);
1747303816Ssbruno	hw = &pf->hw;
1748303816Ssbruno	pf_vsi = &pf->vsi;
1749303816Ssbruno
1750303816Ssbruno	IXL_PF_LOCK(pf);
1751303816Ssbruno	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1752303816Ssbruno	    M_ZERO);
1753303816Ssbruno
1754303816Ssbruno	if (pf->vfs == NULL) {
1755303816Ssbruno		error = ENOMEM;
1756303816Ssbruno		goto fail;
1757303816Ssbruno	}
1758303816Ssbruno
1759303816Ssbruno	for (i = 0; i < num_vfs; i++)
1760303816Ssbruno		sysctl_ctx_init(&pf->vfs[i].ctx);
1761303816Ssbruno
1762303816Ssbruno	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1763303816Ssbruno	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1764303816Ssbruno	if (ret != I40E_SUCCESS) {
1765303816Ssbruno		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1766303816Ssbruno		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1767303816Ssbruno		    error);
1768303816Ssbruno		goto fail;
1769303816Ssbruno	}
1770303816Ssbruno
1771303816Ssbruno	ixl_enable_adminq(hw);
1772303816Ssbruno
1773303816Ssbruno	pf->num_vfs = num_vfs;
1774303816Ssbruno	IXL_PF_UNLOCK(pf);
1775303816Ssbruno	return (0);
1776303816Ssbruno
1777303816Ssbrunofail:
1778303816Ssbruno	free(pf->vfs, M_IXL);
1779303816Ssbruno	pf->vfs = NULL;
1780303816Ssbruno	IXL_PF_UNLOCK(pf);
1781303816Ssbruno	return (error);
1782303816Ssbruno}
1783303816Ssbruno
1784303816Ssbrunovoid
1785303816Ssbrunoixl_iov_uninit(device_t dev)
1786303816Ssbruno{
1787303816Ssbruno	struct ixl_pf *pf;
1788303816Ssbruno	struct i40e_hw *hw;
1789303816Ssbruno	struct ixl_vsi *vsi;
1790303816Ssbruno	struct ifnet *ifp;
1791303816Ssbruno	struct ixl_vf *vfs;
1792303816Ssbruno	int i, num_vfs;
1793303816Ssbruno
1794303816Ssbruno	pf = device_get_softc(dev);
1795303816Ssbruno	hw = &pf->hw;
1796303816Ssbruno	vsi = &pf->vsi;
1797303816Ssbruno	ifp = vsi->ifp;
1798303816Ssbruno
1799303816Ssbruno	IXL_PF_LOCK(pf);
1800303816Ssbruno	for (i = 0; i < pf->num_vfs; i++) {
1801303816Ssbruno		if (pf->vfs[i].vsi.seid != 0)
1802303816Ssbruno			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1803303816Ssbruno		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1804303816Ssbruno		DDPRINTF(dev, "VF %d: %d released\n",
1805303816Ssbruno		    i, pf->vfs[i].qtag.num_allocated);
1806303816Ssbruno		DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1807303816Ssbruno	}
1808303816Ssbruno
1809303816Ssbruno	if (pf->veb_seid != 0) {
1810303816Ssbruno		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1811303816Ssbruno		pf->veb_seid = 0;
1812303816Ssbruno	}
1813303816Ssbruno
1814303816Ssbruno	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1815303816Ssbruno		ixl_disable_intr(vsi);
1816303816Ssbruno		ixl_flush(hw);
1817303816Ssbruno	}
1818303816Ssbruno
1819303816Ssbruno	vfs = pf->vfs;
1820303816Ssbruno	num_vfs = pf->num_vfs;
1821303816Ssbruno
1822303816Ssbruno	pf->vfs = NULL;
1823303816Ssbruno	pf->num_vfs = 0;
1824303816Ssbruno	IXL_PF_UNLOCK(pf);
1825303816Ssbruno
1826303816Ssbruno	/* Do this after the unlock as sysctl_ctx_free might sleep. */
1827303816Ssbruno	for (i = 0; i < num_vfs; i++)
1828303816Ssbruno		sysctl_ctx_free(&vfs[i].ctx);
1829303816Ssbruno	free(vfs, M_IXL);
1830303816Ssbruno}
1831303816Ssbruno
1832303816Ssbrunostatic int
1833303816Ssbrunoixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1834303816Ssbruno{
1835303816Ssbruno	device_t dev = pf->dev;
1836303816Ssbruno	int error;
1837303816Ssbruno
1838303816Ssbruno	/* Validate, and clamp value if invalid */
1839303816Ssbruno	if (num_queues < 1 || num_queues > 16)
1840303816Ssbruno		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1841303816Ssbruno		    num_queues, vf->vf_num);
1842303816Ssbruno	if (num_queues < 1) {
1843303816Ssbruno		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1844303816Ssbruno		num_queues = 1;
1845303816Ssbruno	} else if (num_queues > 16) {
1846303816Ssbruno		device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1847303816Ssbruno		num_queues = 16;
1848303816Ssbruno	}
1849303816Ssbruno	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1850303816Ssbruno	if (error) {
1851303816Ssbruno		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1852303816Ssbruno		    num_queues, vf->vf_num);
1853303816Ssbruno		return (ENOSPC);
1854303816Ssbruno	}
1855303816Ssbruno
1856303816Ssbruno	DDPRINTF(dev, "VF %d: %d allocated, %d active",
1857303816Ssbruno	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1858303816Ssbruno	DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1859303816Ssbruno
1860303816Ssbruno	return (0);
1861303816Ssbruno}
1862303816Ssbruno
1863303816Ssbrunoint
1864303816Ssbrunoixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1865303816Ssbruno{
1866303816Ssbruno	char sysctl_name[QUEUE_NAME_LEN];
1867303816Ssbruno	struct ixl_pf *pf;
1868303816Ssbruno	struct ixl_vf *vf;
1869303816Ssbruno	const void *mac;
1870303816Ssbruno	size_t size;
1871303816Ssbruno	int error;
1872303816Ssbruno	int vf_num_queues;
1873303816Ssbruno
1874303816Ssbruno	pf = device_get_softc(dev);
1875303816Ssbruno	vf = &pf->vfs[vfnum];
1876303816Ssbruno
1877303816Ssbruno	IXL_PF_LOCK(pf);
1878303816Ssbruno	vf->vf_num = vfnum;
1879303816Ssbruno
1880303816Ssbruno	vf->vsi.back = pf;
1881303816Ssbruno	vf->vf_flags = VF_FLAG_ENABLED;
1882303816Ssbruno	SLIST_INIT(&vf->vsi.ftl);
1883303816Ssbruno
1884303816Ssbruno	/* Reserve queue allocation from PF */
1885303816Ssbruno	vf_num_queues = nvlist_get_number(params, "num-queues");
1886303816Ssbruno	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1887303816Ssbruno	if (error != 0)
1888303816Ssbruno		goto out;
1889303816Ssbruno
1890303816Ssbruno	error = ixl_vf_setup_vsi(pf, vf);
1891303816Ssbruno	if (error != 0)
1892303816Ssbruno		goto out;
1893303816Ssbruno
1894303816Ssbruno	if (nvlist_exists_binary(params, "mac-addr")) {
1895303816Ssbruno		mac = nvlist_get_binary(params, "mac-addr", &size);
1896303816Ssbruno		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1897303816Ssbruno
1898303816Ssbruno		if (nvlist_get_bool(params, "allow-set-mac"))
1899303816Ssbruno			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1900303816Ssbruno	} else
1901303816Ssbruno		/*
1902303816Ssbruno		 * If the administrator has not specified a MAC address then
1903303816Ssbruno		 * we must allow the VF to choose one.
1904303816Ssbruno		 */
1905303816Ssbruno		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1906303816Ssbruno
1907303816Ssbruno	if (nvlist_get_bool(params, "mac-anti-spoof"))
1908303816Ssbruno		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1909303816Ssbruno
1910303816Ssbruno	if (nvlist_get_bool(params, "allow-promisc"))
1911303816Ssbruno		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1912303816Ssbruno
1913303816Ssbruno	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1914303816Ssbruno
1915303816Ssbruno	ixl_reset_vf(pf, vf);
1916303816Ssbrunoout:
1917303816Ssbruno	IXL_PF_UNLOCK(pf);
1918303816Ssbruno	if (error == 0) {
1919303816Ssbruno		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1920303816Ssbruno		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1921303816Ssbruno	}
1922303816Ssbruno
1923303816Ssbruno	return (error);
1924303816Ssbruno}
1925303816Ssbruno
1926