Deleted Added
full compact
ixl_pf_iov.c (303816) ixl_pf_iov.c (303967)
1/******************************************************************************
2
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
1/******************************************************************************
2
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixl/ixl_pf_iov.c 303816 2016-08-07 18:12:36Z sbruno $*/
33/*$FreeBSD: stable/11/sys/dev/ixl/ixl_pf_iov.c 303967 2016-08-11 19:13:30Z sbruno $*/
34
35#include "ixl_pf_iov.h"
36
37/* Private functions */
38static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41
42static bool ixl_zero_mac(const uint8_t *addr);
43static bool ixl_bcast_mac(const uint8_t *addr);
44
45static const char * ixl_vc_opcode_str(uint16_t op);
46static int ixl_vc_opcode_level(uint16_t opcode);
47
48static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49
50static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
52static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
53static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
54static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
55static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
56static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
58static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
59static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
60static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
61static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
64static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
65static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
66static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
68 enum i40e_queue_type *last_type, uint16_t *last_queue);
69static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
70static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
76static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
80static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81
82static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
83
84void
85ixl_initialize_sriov(struct ixl_pf *pf)
86{
87 device_t dev = pf->dev;
88 struct i40e_hw *hw = &pf->hw;
89 nvlist_t *pf_schema, *vf_schema;
90 int iov_error;
91
92 /* SR-IOV is only supported when MSI-X is in use. */
93 if (pf->msix <= 1)
94 return;
95
96 pf_schema = pci_iov_schema_alloc_node();
97 vf_schema = pci_iov_schema_alloc_node();
98 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100 IOV_SCHEMA_HASDEFAULT, TRUE);
101 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102 IOV_SCHEMA_HASDEFAULT, FALSE);
103 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104 IOV_SCHEMA_HASDEFAULT, FALSE);
105 pci_iov_schema_add_uint16(vf_schema, "num-queues",
106 IOV_SCHEMA_HASDEFAULT,
107 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
108
109 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110 if (iov_error != 0) {
111 device_printf(dev,
112 "Failed to initialize SR-IOV (error=%d)\n",
113 iov_error);
114 } else
115 device_printf(dev, "SR-IOV ready\n");
116
117 pf->vc_debug_lvl = 1;
118}
119
120/*
121 * Allocate the VSI for a VF.
122 */
123static int
124ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
125{
126 device_t dev;
127 struct i40e_hw *hw;
128 struct ixl_vsi *vsi;
129 struct i40e_vsi_context vsi_ctx;
130 int i;
131 enum i40e_status_code code;
132
133 hw = &pf->hw;
134 vsi = &pf->vsi;
135 dev = pf->dev;
136
137 vsi_ctx.pf_num = hw->pf_id;
138 vsi_ctx.uplink_seid = pf->veb_seid;
139 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
140 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
141 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142
143 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144
145 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
146 vsi_ctx.info.switch_id = htole16(0);
147
148 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149 vsi_ctx.info.sec_flags = 0;
150 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156
157 vsi_ctx.info.valid_sections |=
158 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160
161 /* ERJ: Only scattered allocation is supported for VFs right now */
162 for (i = 0; i < vf->qtag.num_active; i++)
163 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166
167 vsi_ctx.info.tc_mapping[0] = htole16(
168 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169 (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170
171 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172 if (code != I40E_SUCCESS)
173 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174 vf->vsi.seid = vsi_ctx.seid;
175 vf->vsi.vsi_num = vsi_ctx.vsi_number;
176 // vf->vsi.first_queue = vf->qtag.qidx[0];
177 vf->vsi.num_queues = vf->qtag.num_active;
178
179 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180 if (code != I40E_SUCCESS)
181 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182
183 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184 if (code != I40E_SUCCESS) {
185 device_printf(dev, "Failed to disable BW limit: %d\n",
186 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 }
189
190 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
191 return (0);
192}
193
194static int
195ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
196{
197 struct i40e_hw *hw;
198 int error;
199
200 hw = &pf->hw;
201
202 error = ixl_vf_alloc_vsi(pf, vf);
203 if (error != 0)
204 return (error);
205
206 vf->vsi.hw_filters_add = 0;
207 vf->vsi.hw_filters_del = 0;
208 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
209 ixl_reconfigure_filters(&vf->vsi);
210
211 return (0);
212}
213
214static void
215ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
216 uint32_t val)
217{
218 uint32_t qtable;
219 int index, shift;
220
221 /*
222 * Two queues are mapped in a single register, so we have to do some
223 * gymnastics to convert the queue number into a register index and
224 * shift.
225 */
226 index = qnum / 2;
227 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
228
229 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
230 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
231 qtable |= val << shift;
232 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
233}
234
235static void
236ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
237{
238 struct i40e_hw *hw;
239 uint32_t qtable;
240 int i;
241
242 hw = &pf->hw;
243
244 /*
245 * Contiguous mappings aren't actually supported by the hardware,
246 * so we have to use non-contiguous mappings.
247 */
248 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
249 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
250
251 /* Enable LAN traffic on this VF */
252 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
253 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
254
255 /* Program index of each VF queue into PF queue space
256 * (This is only needed if QTABLE is enabled) */
257 for (i = 0; i < vf->vsi.num_queues; i++) {
258 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
259 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
260
261 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
262 }
263 for (; i < IXL_MAX_VSI_QUEUES; i++)
264 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
265 I40E_VPLAN_QTABLE_QINDEX_MASK);
266
267 /* Map queues allocated to VF to its VSI;
268 * This mapping matches the VF-wide mapping since the VF
269 * is only given a single VSI */
270 for (i = 0; i < vf->vsi.num_queues; i++)
271 ixl_vf_map_vsi_queue(hw, vf, i,
272 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
273
274 /* Set rest of VSI queues as unused. */
275 for (; i < IXL_MAX_VSI_QUEUES; i++)
276 ixl_vf_map_vsi_queue(hw, vf, i,
277 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
278
279 ixl_flush(hw);
280}
281
282static void
283ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
284{
285 struct i40e_hw *hw;
286
287 hw = &pf->hw;
288
289 if (vsi->seid == 0)
290 return;
291
292 i40e_aq_delete_element(hw, vsi->seid, NULL);
293}
294
295static void
296ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
297{
298
299 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
300 ixl_flush(hw);
301}
302
303static void
304ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
305{
306
307 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
308 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
309 ixl_flush(hw);
310}
311
312static void
313ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
314{
315 struct i40e_hw *hw;
316 uint32_t vfint_reg, vpint_reg;
317 int i;
318
319 hw = &pf->hw;
320
321 ixl_vf_vsi_release(pf, &vf->vsi);
322
323 /* Index 0 has a special register. */
324 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
325
326 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
327 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
328 ixl_vf_disable_queue_intr(hw, vfint_reg);
329 }
330
331 /* Index 0 has a special register. */
332 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
333
334 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
335 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
336 ixl_vf_unregister_intr(hw, vpint_reg);
337 }
338
339 vf->vsi.num_queues = 0;
340}
341
342static int
343ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
344{
345 struct i40e_hw *hw;
346 int i;
347 uint16_t global_vf_num;
348 uint32_t ciad;
349
350 hw = &pf->hw;
351 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
352
353 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
354 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
355 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
356 ciad = rd32(hw, I40E_PF_PCI_CIAD);
357 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
358 return (0);
359 DELAY(1);
360 }
361
362 return (ETIMEDOUT);
363}
364
365static void
366ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
367{
368 struct i40e_hw *hw;
369 uint32_t vfrtrig;
370
371 hw = &pf->hw;
372
373 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
374 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
375 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
376 ixl_flush(hw);
377
378 ixl_reinit_vf(pf, vf);
379}
380
381static void
382ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
383{
384 struct i40e_hw *hw;
385 uint32_t vfrstat, vfrtrig;
386 int i, error;
387
388 hw = &pf->hw;
389
390 error = ixl_flush_pcie(pf, vf);
391 if (error != 0)
392 device_printf(pf->dev,
393 "Timed out waiting for PCIe activity to stop on VF-%d\n",
394 vf->vf_num);
395
396 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
397 DELAY(10);
398
399 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
400 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
401 break;
402 }
403
404 if (i == IXL_VF_RESET_TIMEOUT)
405 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
406
407 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
408
409 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
410 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
411 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
412
413 if (vf->vsi.seid != 0)
414 ixl_disable_rings(&vf->vsi);
415
416 ixl_vf_release_resources(pf, vf);
417 ixl_vf_setup_vsi(pf, vf);
418 ixl_vf_map_queues(pf, vf);
419
420 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
421 ixl_flush(hw);
422}
423
424static const char *
425ixl_vc_opcode_str(uint16_t op)
426{
427
428 switch (op) {
429 case I40E_VIRTCHNL_OP_VERSION:
430 return ("VERSION");
431 case I40E_VIRTCHNL_OP_RESET_VF:
432 return ("RESET_VF");
433 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
434 return ("GET_VF_RESOURCES");
435 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
436 return ("CONFIG_TX_QUEUE");
437 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
438 return ("CONFIG_RX_QUEUE");
439 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
440 return ("CONFIG_VSI_QUEUES");
441 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
442 return ("CONFIG_IRQ_MAP");
443 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
444 return ("ENABLE_QUEUES");
445 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
446 return ("DISABLE_QUEUES");
447 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
448 return ("ADD_ETHER_ADDRESS");
449 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
450 return ("DEL_ETHER_ADDRESS");
451 case I40E_VIRTCHNL_OP_ADD_VLAN:
452 return ("ADD_VLAN");
453 case I40E_VIRTCHNL_OP_DEL_VLAN:
454 return ("DEL_VLAN");
455 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
456 return ("CONFIG_PROMISCUOUS_MODE");
457 case I40E_VIRTCHNL_OP_GET_STATS:
458 return ("GET_STATS");
459 case I40E_VIRTCHNL_OP_FCOE:
460 return ("FCOE");
461 case I40E_VIRTCHNL_OP_EVENT:
462 return ("EVENT");
463 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
464 return ("CONFIG_RSS_KEY");
465 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
466 return ("CONFIG_RSS_LUT");
467 case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
468 return ("GET_RSS_HENA_CAPS");
469 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
470 return ("SET_RSS_HENA");
471 default:
472 return ("UNKNOWN");
473 }
474}
475
476static int
477ixl_vc_opcode_level(uint16_t opcode)
478{
479 switch (opcode) {
480 case I40E_VIRTCHNL_OP_GET_STATS:
481 return (10);
482 default:
483 return (5);
484 }
485}
486
487static void
488ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
489 enum i40e_status_code status, void *msg, uint16_t len)
490{
491 struct i40e_hw *hw;
492 int global_vf_id;
493
494 hw = &pf->hw;
495 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
496
497 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
498 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
499 ixl_vc_opcode_str(op), op, status, vf->vf_num);
500
501 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
502}
503
504static void
505ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
506{
507
508 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
509}
510
511static void
512ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
513 enum i40e_status_code status, const char *file, int line)
514{
515
516 I40E_VC_DEBUG(pf, 1,
517 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
518 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
519 status, vf->vf_num, file, line);
520 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
521}
522
523static void
524ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
525 uint16_t msg_size)
526{
527 struct i40e_virtchnl_version_info reply;
528
529 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
530 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
531 I40E_ERR_PARAM);
532 return;
533 }
534
535 vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
536
537 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
538 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
539 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
540 sizeof(reply));
541}
542
543static void
544ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
545 uint16_t msg_size)
546{
547
548 if (msg_size != 0) {
549 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
550 I40E_ERR_PARAM);
551 return;
552 }
553
554 ixl_reset_vf(pf, vf);
555
556 /* No response to a reset message. */
557}
558
559static void
560ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
561 uint16_t msg_size)
562{
563 struct i40e_virtchnl_vf_resource reply;
564
565 if ((vf->version == 0 && msg_size != 0) ||
566 (vf->version == 1 && msg_size != 4)) {
567 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
568 " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
569 vf->version);
570 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
571 I40E_ERR_PARAM);
572 return;
573 }
574
575 bzero(&reply, sizeof(reply));
576
577 if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
578 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
579 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
580 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
581 else
582 /* Force VF RSS setup by PF in 1.1+ VFs */
583 reply.vf_offload_flags = *(u32 *)msg & (
584 I40E_VIRTCHNL_VF_OFFLOAD_L2 |
585 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
586 I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
587
588 reply.num_vsis = 1;
589 reply.num_queue_pairs = vf->vsi.num_queues;
590 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
591 reply.rss_key_size = 52;
592 reply.rss_lut_size = 64;
593 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
594 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
595 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
596 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
597
598 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
599 I40E_SUCCESS, &reply, sizeof(reply));
600}
601
602static int
603ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
604 struct i40e_virtchnl_txq_info *info)
605{
606 struct i40e_hw *hw;
607 struct i40e_hmc_obj_txq txq;
608 uint16_t global_queue_num, global_vf_num;
609 enum i40e_status_code status;
610 uint32_t qtx_ctl;
611
612 hw = &pf->hw;
613 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
615 bzero(&txq, sizeof(txq));
616
617 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
618 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
619
620 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
621 if (status != I40E_SUCCESS)
622 return (EINVAL);
623
624 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
625
626 txq.head_wb_ena = info->headwb_enabled;
627 txq.head_wb_addr = info->dma_headwb_addr;
628 txq.qlen = info->ring_len;
629 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
630 txq.rdylist_act = 0;
631
632 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
633 if (status != I40E_SUCCESS)
634 return (EINVAL);
635
636 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
637 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
638 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
639 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
640 ixl_flush(hw);
641
642 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
643
644 return (0);
645}
646
647static int
648ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
649 struct i40e_virtchnl_rxq_info *info)
650{
651 struct i40e_hw *hw;
652 struct i40e_hmc_obj_rxq rxq;
653 uint16_t global_queue_num;
654 enum i40e_status_code status;
655
656 hw = &pf->hw;
657 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
658 bzero(&rxq, sizeof(rxq));
659
660 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
661 vf->vf_num, global_queue_num, info->queue_id);
662
663 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
664 return (EINVAL);
665
666 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
667 info->max_pkt_size < ETHER_MIN_LEN)
668 return (EINVAL);
669
670 if (info->splithdr_enabled) {
671 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
672 return (EINVAL);
673
674 rxq.hsplit_0 = info->rx_split_pos &
675 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
676 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
677 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
678 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
679 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
680
681 rxq.dtype = 2;
682 }
683
684 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
685 if (status != I40E_SUCCESS)
686 return (EINVAL);
687
688 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
689 rxq.qlen = info->ring_len;
690
691 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
692
693 rxq.dsize = 1;
694 rxq.crcstrip = 1;
695 rxq.l2tsel = 1;
696
697 rxq.rxmax = info->max_pkt_size;
698 rxq.tphrdesc_ena = 1;
699 rxq.tphwdesc_ena = 1;
700 rxq.tphdata_ena = 1;
701 rxq.tphhead_ena = 1;
702 rxq.lrxqthresh = 2;
703 rxq.prefena = 1;
704
705 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
706 if (status != I40E_SUCCESS)
707 return (EINVAL);
708
709 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
710
711 return (0);
712}
713
714static void
715ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
716 uint16_t msg_size)
717{
718 struct i40e_virtchnl_vsi_queue_config_info *info;
719 struct i40e_virtchnl_queue_pair_info *pair;
720 uint16_t expected_msg_size;
721 int i;
722
723 if (msg_size < sizeof(*info)) {
724 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
725 I40E_ERR_PARAM);
726 return;
727 }
728
729 info = msg;
730 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
731 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
732 vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
733 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
734 I40E_ERR_PARAM);
735 return;
736 }
737
738 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
739 if (msg_size != expected_msg_size) {
740 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
741 vf->vf_num, msg_size, expected_msg_size);
742 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
743 I40E_ERR_PARAM);
744 return;
745 }
746
747 if (info->vsi_id != vf->vsi.vsi_num) {
748 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
749 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
750 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
751 I40E_ERR_PARAM);
752 return;
753 }
754
755 for (i = 0; i < info->num_queue_pairs; i++) {
756 pair = &info->qpair[i];
757
758 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
759 pair->rxq.vsi_id != vf->vsi.vsi_num ||
760 pair->txq.queue_id != pair->rxq.queue_id ||
761 pair->txq.queue_id >= vf->vsi.num_queues) {
762
763 i40e_send_vf_nack(pf, vf,
764 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
765 return;
766 }
767
768 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
769 i40e_send_vf_nack(pf, vf,
770 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
771 return;
772 }
773
774 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
775 i40e_send_vf_nack(pf, vf,
776 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
777 return;
778 }
779 }
780
781 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
782}
783
784static void
785ixl_vf_set_qctl(struct ixl_pf *pf,
786 const struct i40e_virtchnl_vector_map *vector,
787 enum i40e_queue_type cur_type, uint16_t cur_queue,
788 enum i40e_queue_type *last_type, uint16_t *last_queue)
789{
790 uint32_t offset, qctl;
791 uint16_t itr_indx;
792
793 if (cur_type == I40E_QUEUE_TYPE_RX) {
794 offset = I40E_QINT_RQCTL(cur_queue);
795 itr_indx = vector->rxitr_idx;
796 } else {
797 offset = I40E_QINT_TQCTL(cur_queue);
798 itr_indx = vector->txitr_idx;
799 }
800
801 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
802 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
803 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
804 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
805 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
806
807 wr32(&pf->hw, offset, qctl);
808
809 *last_type = cur_type;
810 *last_queue = cur_queue;
811}
812
813static void
814ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
815 const struct i40e_virtchnl_vector_map *vector)
816{
817 struct i40e_hw *hw;
818 u_int qindex;
819 enum i40e_queue_type type, last_type;
820 uint32_t lnklst_reg;
821 uint16_t rxq_map, txq_map, cur_queue, last_queue;
822
823 hw = &pf->hw;
824
825 rxq_map = vector->rxq_map;
826 txq_map = vector->txq_map;
827
828 last_queue = IXL_END_OF_INTR_LNKLST;
829 last_type = I40E_QUEUE_TYPE_RX;
830
831 /*
832 * The datasheet says to optimize performance, RX queues and TX queues
833 * should be interleaved in the interrupt linked list, so we process
834 * both at once here.
835 */
836 while ((rxq_map != 0) || (txq_map != 0)) {
837 if (txq_map != 0) {
838 qindex = ffs(txq_map) - 1;
839 type = I40E_QUEUE_TYPE_TX;
840 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
841 ixl_vf_set_qctl(pf, vector, type, cur_queue,
842 &last_type, &last_queue);
843 txq_map &= ~(1 << qindex);
844 }
845
846 if (rxq_map != 0) {
847 qindex = ffs(rxq_map) - 1;
848 type = I40E_QUEUE_TYPE_RX;
849 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
850 ixl_vf_set_qctl(pf, vector, type, cur_queue,
851 &last_type, &last_queue);
852 rxq_map &= ~(1 << qindex);
853 }
854 }
855
856 if (vector->vector_id == 0)
857 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
858 else
859 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
860 vf->vf_num);
861 wr32(hw, lnklst_reg,
862 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
863 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
864
865 ixl_flush(hw);
866}
867
868static void
869ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
870 uint16_t msg_size)
871{
872 struct i40e_virtchnl_irq_map_info *map;
873 struct i40e_virtchnl_vector_map *vector;
874 struct i40e_hw *hw;
875 int i, largest_txq, largest_rxq;
876
877 hw = &pf->hw;
878
879 if (msg_size < sizeof(*map)) {
880 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
881 I40E_ERR_PARAM);
882 return;
883 }
884
885 map = msg;
886 if (map->num_vectors == 0) {
887 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
888 I40E_ERR_PARAM);
889 return;
890 }
891
892 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
893 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
894 I40E_ERR_PARAM);
895 return;
896 }
897
898 for (i = 0; i < map->num_vectors; i++) {
899 vector = &map->vecmap[i];
900
901 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
902 vector->vsi_id != vf->vsi.vsi_num) {
903 i40e_send_vf_nack(pf, vf,
904 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
905 return;
906 }
907
908 if (vector->rxq_map != 0) {
909 largest_rxq = fls(vector->rxq_map) - 1;
910 if (largest_rxq >= vf->vsi.num_queues) {
911 i40e_send_vf_nack(pf, vf,
912 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
913 I40E_ERR_PARAM);
914 return;
915 }
916 }
917
918 if (vector->txq_map != 0) {
919 largest_txq = fls(vector->txq_map) - 1;
920 if (largest_txq >= vf->vsi.num_queues) {
921 i40e_send_vf_nack(pf, vf,
922 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
923 I40E_ERR_PARAM);
924 return;
925 }
926 }
927
928 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
929 vector->txitr_idx > IXL_MAX_ITR_IDX) {
930 i40e_send_vf_nack(pf, vf,
931 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
932 I40E_ERR_PARAM);
933 return;
934 }
935
936 ixl_vf_config_vector(pf, vf, vector);
937 }
938
939 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
940}
941
942static void
943ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
944 uint16_t msg_size)
945{
946 struct i40e_virtchnl_queue_select *select;
947 int error = 0;
948
949 if (msg_size != sizeof(*select)) {
950 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
951 I40E_ERR_PARAM);
952 return;
953 }
954
955 select = msg;
956 if (select->vsi_id != vf->vsi.vsi_num ||
957 select->rx_queues == 0 || select->tx_queues == 0) {
958 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
959 I40E_ERR_PARAM);
960 return;
961 }
962
963 /* Enable TX rings selected by the VF */
964 for (int i = 0; i < 32; i++) {
965 if ((1 << i) & select->tx_queues) {
966 /* Warn if queue is out of VF allocation range */
967 if (i >= vf->vsi.num_queues) {
968 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
969 vf->vf_num, i);
970 break;
971 }
972 /* Skip this queue if it hasn't been configured */
973 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
974 continue;
975 /* Warn if this queue is already marked as enabled */
976 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
977 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
978 vf->vf_num, i);
979
980 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
981 if (error)
982 break;
983 else
984 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
985 }
986 }
987
988 /* Enable RX rings selected by the VF */
989 for (int i = 0; i < 32; i++) {
990 if ((1 << i) & select->rx_queues) {
991 /* Warn if queue is out of VF allocation range */
992 if (i >= vf->vsi.num_queues) {
993 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
994 vf->vf_num, i);
995 break;
996 }
997 /* Skip this queue if it hasn't been configured */
998 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
999 continue;
1000 /* Warn if this queue is already marked as enabled */
1001 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
1002 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
1003 vf->vf_num, i);
1004 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
1005 if (error)
1006 break;
1007 else
1008 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
1009 }
1010 }
1011
1012 if (error) {
1013 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1014 I40E_ERR_TIMEOUT);
1015 return;
1016 }
1017
1018 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
1019}
1020
1021static void
1022ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1023 void *msg, uint16_t msg_size)
1024{
1025 struct i40e_virtchnl_queue_select *select;
1026 int error = 0;
1027
1028 if (msg_size != sizeof(*select)) {
1029 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1030 I40E_ERR_PARAM);
1031 return;
1032 }
1033
1034 select = msg;
1035 if (select->vsi_id != vf->vsi.vsi_num ||
1036 select->rx_queues == 0 || select->tx_queues == 0) {
1037 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1038 I40E_ERR_PARAM);
1039 return;
1040 }
1041
1042 /* Disable TX rings selected by the VF */
1043 for (int i = 0; i < 32; i++) {
1044 if ((1 << i) & select->tx_queues) {
1045 /* Warn if queue is out of VF allocation range */
1046 if (i >= vf->vsi.num_queues) {
1047 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1048 vf->vf_num, i);
1049 break;
1050 }
1051 /* Skip this queue if it hasn't been configured */
1052 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1053 continue;
1054 /* Warn if this queue is already marked as disabled */
1055 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1056 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1057 vf->vf_num, i);
1058 continue;
1059 }
1060 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1061 if (error)
1062 break;
1063 else
1064 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1065 }
1066 }
1067
1068 /* Enable RX rings selected by the VF */
1069 for (int i = 0; i < 32; i++) {
1070 if ((1 << i) & select->rx_queues) {
1071 /* Warn if queue is out of VF allocation range */
1072 if (i >= vf->vsi.num_queues) {
1073 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1074 vf->vf_num, i);
1075 break;
1076 }
1077 /* Skip this queue if it hasn't been configured */
1078 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1079 continue;
1080 /* Warn if this queue is already marked as disabled */
1081 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1082 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1083 vf->vf_num, i);
1084 continue;
1085 }
1086 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1087 if (error)
1088 break;
1089 else
1090 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1091 }
1092 }
1093
1094 if (error) {
1095 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1096 I40E_ERR_TIMEOUT);
1097 return;
1098 }
1099
1100 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
1101}
1102
1103static bool
1104ixl_zero_mac(const uint8_t *addr)
1105{
1106 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1107
1108 return (cmp_etheraddr(addr, zero));
1109}
1110
1111static bool
1112ixl_bcast_mac(const uint8_t *addr)
1113{
1114
1115 return (cmp_etheraddr(addr, ixl_bcast_addr));
1116}
1117
1118static int
1119ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1120{
1121
1122 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1123 return (EINVAL);
1124
1125 /*
1126 * If the VF is not allowed to change its MAC address, don't let it
1127 * set a MAC filter for an address that is not a multicast address and
1128 * is not its assigned MAC.
1129 */
1130 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1131 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1132 return (EPERM);
1133
1134 return (0);
1135}
1136
1137static void
1138ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1139 uint16_t msg_size)
1140{
1141 struct i40e_virtchnl_ether_addr_list *addr_list;
1142 struct i40e_virtchnl_ether_addr *addr;
1143 struct ixl_vsi *vsi;
1144 int i;
1145 size_t expected_size;
1146
1147 vsi = &vf->vsi;
1148
1149 if (msg_size < sizeof(*addr_list)) {
1150 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1151 I40E_ERR_PARAM);
1152 return;
1153 }
1154
1155 addr_list = msg;
1156 expected_size = sizeof(*addr_list) +
1157 addr_list->num_elements * sizeof(*addr);
1158
1159 if (addr_list->num_elements == 0 ||
1160 addr_list->vsi_id != vsi->vsi_num ||
1161 msg_size != expected_size) {
1162 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1163 I40E_ERR_PARAM);
1164 return;
1165 }
1166
1167 for (i = 0; i < addr_list->num_elements; i++) {
1168 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1169 i40e_send_vf_nack(pf, vf,
1170 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1171 return;
1172 }
1173 }
1174
1175 for (i = 0; i < addr_list->num_elements; i++) {
1176 addr = &addr_list->list[i];
1177 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1178 }
1179
1180 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
1181}
1182
1183static void
1184ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1185 uint16_t msg_size)
1186{
1187 struct i40e_virtchnl_ether_addr_list *addr_list;
1188 struct i40e_virtchnl_ether_addr *addr;
1189 size_t expected_size;
1190 int i;
1191
1192 if (msg_size < sizeof(*addr_list)) {
1193 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1194 I40E_ERR_PARAM);
1195 return;
1196 }
1197
1198 addr_list = msg;
1199 expected_size = sizeof(*addr_list) +
1200 addr_list->num_elements * sizeof(*addr);
1201
1202 if (addr_list->num_elements == 0 ||
1203 addr_list->vsi_id != vf->vsi.vsi_num ||
1204 msg_size != expected_size) {
1205 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1206 I40E_ERR_PARAM);
1207 return;
1208 }
1209
1210 for (i = 0; i < addr_list->num_elements; i++) {
1211 addr = &addr_list->list[i];
1212 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1213 i40e_send_vf_nack(pf, vf,
1214 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1215 return;
1216 }
1217 }
1218
1219 for (i = 0; i < addr_list->num_elements; i++) {
1220 addr = &addr_list->list[i];
1221 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1222 }
1223
1224 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
1225}
1226
1227static enum i40e_status_code
1228ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1229{
1230 struct i40e_vsi_context vsi_ctx;
1231
1232 vsi_ctx.seid = vf->vsi.seid;
1233
1234 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1235 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1236 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1237 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1238 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1239}
1240
1241static void
1242ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1243 uint16_t msg_size)
1244{
1245 struct i40e_virtchnl_vlan_filter_list *filter_list;
1246 enum i40e_status_code code;
1247 size_t expected_size;
1248 int i;
1249
1250 if (msg_size < sizeof(*filter_list)) {
1251 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1252 I40E_ERR_PARAM);
1253 return;
1254 }
1255
1256 filter_list = msg;
1257 expected_size = sizeof(*filter_list) +
1258 filter_list->num_elements * sizeof(uint16_t);
1259 if (filter_list->num_elements == 0 ||
1260 filter_list->vsi_id != vf->vsi.vsi_num ||
1261 msg_size != expected_size) {
1262 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1263 I40E_ERR_PARAM);
1264 return;
1265 }
1266
1267 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1268 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1269 I40E_ERR_PARAM);
1270 return;
1271 }
1272
1273 for (i = 0; i < filter_list->num_elements; i++) {
1274 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1275 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1276 I40E_ERR_PARAM);
1277 return;
1278 }
1279 }
1280
1281 code = ixl_vf_enable_vlan_strip(pf, vf);
1282 if (code != I40E_SUCCESS) {
1283 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1284 I40E_ERR_PARAM);
1285 }
1286
1287 for (i = 0; i < filter_list->num_elements; i++)
1288 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1289
1290 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
1291}
1292
1293static void
1294ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1295 uint16_t msg_size)
1296{
1297 struct i40e_virtchnl_vlan_filter_list *filter_list;
1298 int i;
1299 size_t expected_size;
1300
1301 if (msg_size < sizeof(*filter_list)) {
1302 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1303 I40E_ERR_PARAM);
1304 return;
1305 }
1306
1307 filter_list = msg;
1308 expected_size = sizeof(*filter_list) +
1309 filter_list->num_elements * sizeof(uint16_t);
1310 if (filter_list->num_elements == 0 ||
1311 filter_list->vsi_id != vf->vsi.vsi_num ||
1312 msg_size != expected_size) {
1313 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1314 I40E_ERR_PARAM);
1315 return;
1316 }
1317
1318 for (i = 0; i < filter_list->num_elements; i++) {
1319 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1320 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1321 I40E_ERR_PARAM);
1322 return;
1323 }
1324 }
1325
1326 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1327 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1328 I40E_ERR_PARAM);
1329 return;
1330 }
1331
1332 for (i = 0; i < filter_list->num_elements; i++)
1333 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1334
1335 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
1336}
1337
1338static void
1339ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1340 void *msg, uint16_t msg_size)
1341{
1342 struct i40e_virtchnl_promisc_info *info;
1343 enum i40e_status_code code;
1344
1345 if (msg_size != sizeof(*info)) {
1346 i40e_send_vf_nack(pf, vf,
1347 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1348 return;
1349 }
1350
1351 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1352 i40e_send_vf_nack(pf, vf,
1353 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1354 return;
1355 }
1356
1357 info = msg;
1358 if (info->vsi_id != vf->vsi.vsi_num) {
1359 i40e_send_vf_nack(pf, vf,
1360 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1361 return;
1362 }
1363
1364 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1365 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1366 if (code != I40E_SUCCESS) {
1367 i40e_send_vf_nack(pf, vf,
1368 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1369 return;
1370 }
1371
1372 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1373 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
1374 if (code != I40E_SUCCESS) {
1375 i40e_send_vf_nack(pf, vf,
1376 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1377 return;
1378 }
1379
1380 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1381}
1382
1383static void
1384ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1385 uint16_t msg_size)
1386{
1387 struct i40e_virtchnl_queue_select *queue;
1388
1389 if (msg_size != sizeof(*queue)) {
1390 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1391 I40E_ERR_PARAM);
1392 return;
1393 }
1394
1395 queue = msg;
1396 if (queue->vsi_id != vf->vsi.vsi_num) {
1397 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1398 I40E_ERR_PARAM);
1399 return;
1400 }
1401
1402 ixl_update_eth_stats(&vf->vsi);
1403
1404 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1405 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1406}
1407
1408static void
1409ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1410 uint16_t msg_size)
1411{
1412 struct i40e_hw *hw;
1413 struct i40e_virtchnl_rss_key *key;
1414 struct i40e_aqc_get_set_rss_key_data key_data;
1415 enum i40e_status_code status;
1416
1417 hw = &pf->hw;
1418
1419 if (msg_size < sizeof(*key)) {
1420 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1421 I40E_ERR_PARAM);
1422 return;
1423 }
1424
1425 key = msg;
1426
1427 if (key->key_len > 52) {
1428 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1429 vf->vf_num, key->key_len, 52);
1430 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1431 I40E_ERR_PARAM);
1432 return;
1433 }
1434
1435 if (key->vsi_id != vf->vsi.vsi_num) {
1436 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1437 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1438 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1439 I40E_ERR_PARAM);
1440 return;
1441 }
1442
1443 /* Fill out hash using MAC-dependent method */
1444 if (hw->mac.type == I40E_MAC_X722) {
1445 bzero(&key_data, sizeof(key_data));
1446 if (key->key_len <= 40)
1447 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1448 else {
1449 bcopy(key->key, key_data.standard_rss_key, 40);
1450 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1451 }
1452 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1453 if (status) {
1454 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1455 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1456 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1457 I40E_ERR_ADMIN_QUEUE_ERROR);
1458 return;
1459 }
1460 } else {
1461 for (int i = 0; i < (key->key_len / 4); i++)
1462 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1463 }
1464
1465 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1466 vf->vf_num, key->key[0]);
1467
1468 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
1469}
1470
1471static void
1472ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1473 uint16_t msg_size)
1474{
1475 struct i40e_hw *hw;
1476 struct i40e_virtchnl_rss_lut *lut;
1477 enum i40e_status_code status;
1478
1479 hw = &pf->hw;
1480
1481 if (msg_size < sizeof(*lut)) {
1482 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1483 I40E_ERR_PARAM);
1484 return;
1485 }
1486
1487 lut = msg;
1488
1489 if (lut->lut_entries > 64) {
1490 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1491 vf->vf_num, lut->lut_entries, 64);
1492 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1493 I40E_ERR_PARAM);
1494 return;
1495 }
1496
1497 if (lut->vsi_id != vf->vsi.vsi_num) {
1498 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1499 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1500 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1501 I40E_ERR_PARAM);
1502 return;
1503 }
1504
1505 /* Fill out LUT using MAC-dependent method */
1506 if (hw->mac.type == I40E_MAC_X722) {
1507 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1508 if (status) {
1509 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1510 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1511 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1512 I40E_ERR_ADMIN_QUEUE_ERROR);
1513 return;
1514 }
1515 } else {
1516 for (int i = 0; i < (lut->lut_entries / 4); i++)
1517 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1518 }
1519
1520 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1521 vf->vf_num, lut->lut[0], lut->lut_entries);
1522
1523 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
1524}
1525
1526static void
1527ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1528 uint16_t msg_size)
1529{
1530 struct i40e_hw *hw;
1531 struct i40e_virtchnl_rss_hena *hena;
1532
1533 hw = &pf->hw;
1534
1535 if (msg_size < sizeof(*hena)) {
1536 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
1537 I40E_ERR_PARAM);
1538 return;
1539 }
1540
1541 hena = msg;
1542
1543 /* Set HENA */
1544 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1545 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1546
1547 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1548 vf->vf_num, hena->hena);
1549
1550 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
1551}
1552
1553void
1554ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1555{
1556 struct ixl_vf *vf;
1557 void *msg;
1558 uint16_t vf_num, msg_size;
1559 uint32_t opcode;
1560
1561 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1562 opcode = le32toh(event->desc.cookie_high);
1563
1564 if (vf_num >= pf->num_vfs) {
1565 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1566 return;
1567 }
1568
1569 vf = &pf->vfs[vf_num];
1570 msg = event->msg_buf;
1571 msg_size = event->msg_len;
1572
1573 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1574 "Got msg %s(%d) from%sVF-%d of size %d\n",
1575 ixl_vc_opcode_str(opcode), opcode,
1576 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1577 vf_num, msg_size);
1578
1579 /* This must be a stray msg from a previously destroyed VF. */
1580 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1581 return;
1582
1583 switch (opcode) {
1584 case I40E_VIRTCHNL_OP_VERSION:
1585 ixl_vf_version_msg(pf, vf, msg, msg_size);
1586 break;
1587 case I40E_VIRTCHNL_OP_RESET_VF:
1588 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1589 break;
1590 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1591 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1592 break;
1593 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1594 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1595 break;
1596 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1597 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1598 break;
1599 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1600 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1601 break;
1602 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1603 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1604 break;
1605 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1606 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1607 break;
1608 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1609 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1610 break;
1611 case I40E_VIRTCHNL_OP_ADD_VLAN:
1612 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1613 break;
1614 case I40E_VIRTCHNL_OP_DEL_VLAN:
1615 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1616 break;
1617 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1618 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1619 break;
1620 case I40E_VIRTCHNL_OP_GET_STATS:
1621 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1622 break;
1623 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1624 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1625 break;
1626 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1627 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1628 break;
1629 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1630 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1631 break;
1632
1633 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1634 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1635 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1636 default:
1637 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1638 break;
1639 }
1640}
1641
1642/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1643void
1644ixl_handle_vflr(void *arg, int pending)
1645{
1646 struct ixl_pf *pf;
1647 struct ixl_vf *vf;
1648 struct i40e_hw *hw;
1649 uint16_t global_vf_num;
1650 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1651 int i;
1652
1653 pf = arg;
1654 hw = &pf->hw;
1655
1656 IXL_PF_LOCK(pf);
1657 for (i = 0; i < pf->num_vfs; i++) {
1658 global_vf_num = hw->func_caps.vf_base_id + i;
1659
1660 vf = &pf->vfs[i];
1661 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1662 continue;
1663
1664 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1665 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1666 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1667 if (vflrstat & vflrstat_mask) {
1668 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1669 vflrstat_mask);
1670
1671 ixl_reinit_vf(pf, vf);
1672 }
1673 }
1674
1675 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1676 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1677 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1678 ixl_flush(hw);
1679
1680 IXL_PF_UNLOCK(pf);
1681}
1682
1683static int
1684ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1685{
1686
1687 switch (err) {
1688 case I40E_AQ_RC_EPERM:
1689 return (EPERM);
1690 case I40E_AQ_RC_ENOENT:
1691 return (ENOENT);
1692 case I40E_AQ_RC_ESRCH:
1693 return (ESRCH);
1694 case I40E_AQ_RC_EINTR:
1695 return (EINTR);
1696 case I40E_AQ_RC_EIO:
1697 return (EIO);
1698 case I40E_AQ_RC_ENXIO:
1699 return (ENXIO);
1700 case I40E_AQ_RC_E2BIG:
1701 return (E2BIG);
1702 case I40E_AQ_RC_EAGAIN:
1703 return (EAGAIN);
1704 case I40E_AQ_RC_ENOMEM:
1705 return (ENOMEM);
1706 case I40E_AQ_RC_EACCES:
1707 return (EACCES);
1708 case I40E_AQ_RC_EFAULT:
1709 return (EFAULT);
1710 case I40E_AQ_RC_EBUSY:
1711 return (EBUSY);
1712 case I40E_AQ_RC_EEXIST:
1713 return (EEXIST);
1714 case I40E_AQ_RC_EINVAL:
1715 return (EINVAL);
1716 case I40E_AQ_RC_ENOTTY:
1717 return (ENOTTY);
1718 case I40E_AQ_RC_ENOSPC:
1719 return (ENOSPC);
1720 case I40E_AQ_RC_ENOSYS:
1721 return (ENOSYS);
1722 case I40E_AQ_RC_ERANGE:
1723 return (ERANGE);
1724 case I40E_AQ_RC_EFLUSHED:
1725 return (EINVAL); /* No exact equivalent in errno.h */
1726 case I40E_AQ_RC_BAD_ADDR:
1727 return (EFAULT);
1728 case I40E_AQ_RC_EMODE:
1729 return (EPERM);
1730 case I40E_AQ_RC_EFBIG:
1731 return (EFBIG);
1732 default:
1733 return (EINVAL);
1734 }
1735}
1736
1737int
1738ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1739{
1740 struct ixl_pf *pf;
1741 struct i40e_hw *hw;
1742 struct ixl_vsi *pf_vsi;
1743 enum i40e_status_code ret;
1744 int i, error;
1745
1746 pf = device_get_softc(dev);
1747 hw = &pf->hw;
1748 pf_vsi = &pf->vsi;
1749
1750 IXL_PF_LOCK(pf);
1751 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1752 M_ZERO);
1753
1754 if (pf->vfs == NULL) {
1755 error = ENOMEM;
1756 goto fail;
1757 }
1758
1759 for (i = 0; i < num_vfs; i++)
1760 sysctl_ctx_init(&pf->vfs[i].ctx);
1761
1762 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1763 1, FALSE, &pf->veb_seid, FALSE, NULL);
1764 if (ret != I40E_SUCCESS) {
1765 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1766 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1767 error);
1768 goto fail;
1769 }
1770
1771 ixl_enable_adminq(hw);
1772
1773 pf->num_vfs = num_vfs;
1774 IXL_PF_UNLOCK(pf);
1775 return (0);
1776
1777fail:
1778 free(pf->vfs, M_IXL);
1779 pf->vfs = NULL;
1780 IXL_PF_UNLOCK(pf);
1781 return (error);
1782}
1783
1784void
1785ixl_iov_uninit(device_t dev)
1786{
1787 struct ixl_pf *pf;
1788 struct i40e_hw *hw;
1789 struct ixl_vsi *vsi;
1790 struct ifnet *ifp;
1791 struct ixl_vf *vfs;
1792 int i, num_vfs;
1793
1794 pf = device_get_softc(dev);
1795 hw = &pf->hw;
1796 vsi = &pf->vsi;
1797 ifp = vsi->ifp;
1798
1799 IXL_PF_LOCK(pf);
1800 for (i = 0; i < pf->num_vfs; i++) {
1801 if (pf->vfs[i].vsi.seid != 0)
1802 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1803 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1804 DDPRINTF(dev, "VF %d: %d released\n",
1805 i, pf->vfs[i].qtag.num_allocated);
1806 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1807 }
1808
1809 if (pf->veb_seid != 0) {
1810 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1811 pf->veb_seid = 0;
1812 }
1813
1814 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1815 ixl_disable_intr(vsi);
1816 ixl_flush(hw);
1817 }
1818
1819 vfs = pf->vfs;
1820 num_vfs = pf->num_vfs;
1821
1822 pf->vfs = NULL;
1823 pf->num_vfs = 0;
1824 IXL_PF_UNLOCK(pf);
1825
1826 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1827 for (i = 0; i < num_vfs; i++)
1828 sysctl_ctx_free(&vfs[i].ctx);
1829 free(vfs, M_IXL);
1830}
1831
1832static int
1833ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1834{
1835 device_t dev = pf->dev;
1836 int error;
1837
1838 /* Validate, and clamp value if invalid */
1839 if (num_queues < 1 || num_queues > 16)
1840 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1841 num_queues, vf->vf_num);
1842 if (num_queues < 1) {
1843 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1844 num_queues = 1;
1845 } else if (num_queues > 16) {
1846 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1847 num_queues = 16;
1848 }
1849 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1850 if (error) {
1851 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1852 num_queues, vf->vf_num);
1853 return (ENOSPC);
1854 }
1855
1856 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1857 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1858 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1859
1860 return (0);
1861}
1862
1863int
1864ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1865{
1866 char sysctl_name[QUEUE_NAME_LEN];
1867 struct ixl_pf *pf;
1868 struct ixl_vf *vf;
1869 const void *mac;
1870 size_t size;
1871 int error;
1872 int vf_num_queues;
1873
1874 pf = device_get_softc(dev);
1875 vf = &pf->vfs[vfnum];
1876
1877 IXL_PF_LOCK(pf);
1878 vf->vf_num = vfnum;
1879
1880 vf->vsi.back = pf;
1881 vf->vf_flags = VF_FLAG_ENABLED;
1882 SLIST_INIT(&vf->vsi.ftl);
1883
1884 /* Reserve queue allocation from PF */
1885 vf_num_queues = nvlist_get_number(params, "num-queues");
1886 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1887 if (error != 0)
1888 goto out;
1889
1890 error = ixl_vf_setup_vsi(pf, vf);
1891 if (error != 0)
1892 goto out;
1893
1894 if (nvlist_exists_binary(params, "mac-addr")) {
1895 mac = nvlist_get_binary(params, "mac-addr", &size);
1896 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1897
1898 if (nvlist_get_bool(params, "allow-set-mac"))
1899 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1900 } else
1901 /*
1902 * If the administrator has not specified a MAC address then
1903 * we must allow the VF to choose one.
1904 */
1905 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1906
1907 if (nvlist_get_bool(params, "mac-anti-spoof"))
1908 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1909
1910 if (nvlist_get_bool(params, "allow-promisc"))
1911 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1912
1913 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1914
1915 ixl_reset_vf(pf, vf);
1916out:
1917 IXL_PF_UNLOCK(pf);
1918 if (error == 0) {
1919 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1920 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1921 }
1922
1923 return (error);
1924}
1925
34
35#include "ixl_pf_iov.h"
36
37/* Private functions */
38static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41
42static bool ixl_zero_mac(const uint8_t *addr);
43static bool ixl_bcast_mac(const uint8_t *addr);
44
45static const char * ixl_vc_opcode_str(uint16_t op);
46static int ixl_vc_opcode_level(uint16_t opcode);
47
48static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49
50static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
52static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
53static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
54static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
55static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
56static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
58static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
59static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
60static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
61static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
64static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
65static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
66static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
68 enum i40e_queue_type *last_type, uint16_t *last_queue);
69static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
70static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
76static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
80static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81
82static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
83
84void
85ixl_initialize_sriov(struct ixl_pf *pf)
86{
87 device_t dev = pf->dev;
88 struct i40e_hw *hw = &pf->hw;
89 nvlist_t *pf_schema, *vf_schema;
90 int iov_error;
91
92 /* SR-IOV is only supported when MSI-X is in use. */
93 if (pf->msix <= 1)
94 return;
95
96 pf_schema = pci_iov_schema_alloc_node();
97 vf_schema = pci_iov_schema_alloc_node();
98 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100 IOV_SCHEMA_HASDEFAULT, TRUE);
101 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102 IOV_SCHEMA_HASDEFAULT, FALSE);
103 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104 IOV_SCHEMA_HASDEFAULT, FALSE);
105 pci_iov_schema_add_uint16(vf_schema, "num-queues",
106 IOV_SCHEMA_HASDEFAULT,
107 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
108
109 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110 if (iov_error != 0) {
111 device_printf(dev,
112 "Failed to initialize SR-IOV (error=%d)\n",
113 iov_error);
114 } else
115 device_printf(dev, "SR-IOV ready\n");
116
117 pf->vc_debug_lvl = 1;
118}
119
120/*
121 * Allocate the VSI for a VF.
122 */
123static int
124ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
125{
126 device_t dev;
127 struct i40e_hw *hw;
128 struct ixl_vsi *vsi;
129 struct i40e_vsi_context vsi_ctx;
130 int i;
131 enum i40e_status_code code;
132
133 hw = &pf->hw;
134 vsi = &pf->vsi;
135 dev = pf->dev;
136
137 vsi_ctx.pf_num = hw->pf_id;
138 vsi_ctx.uplink_seid = pf->veb_seid;
139 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
140 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
141 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142
143 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144
145 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
146 vsi_ctx.info.switch_id = htole16(0);
147
148 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149 vsi_ctx.info.sec_flags = 0;
150 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156
157 vsi_ctx.info.valid_sections |=
158 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160
161 /* ERJ: Only scattered allocation is supported for VFs right now */
162 for (i = 0; i < vf->qtag.num_active; i++)
163 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166
167 vsi_ctx.info.tc_mapping[0] = htole16(
168 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169 (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170
171 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172 if (code != I40E_SUCCESS)
173 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174 vf->vsi.seid = vsi_ctx.seid;
175 vf->vsi.vsi_num = vsi_ctx.vsi_number;
176 // vf->vsi.first_queue = vf->qtag.qidx[0];
177 vf->vsi.num_queues = vf->qtag.num_active;
178
179 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180 if (code != I40E_SUCCESS)
181 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182
183 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184 if (code != I40E_SUCCESS) {
185 device_printf(dev, "Failed to disable BW limit: %d\n",
186 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 }
189
190 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
191 return (0);
192}
193
194static int
195ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
196{
197 struct i40e_hw *hw;
198 int error;
199
200 hw = &pf->hw;
201
202 error = ixl_vf_alloc_vsi(pf, vf);
203 if (error != 0)
204 return (error);
205
206 vf->vsi.hw_filters_add = 0;
207 vf->vsi.hw_filters_del = 0;
208 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
209 ixl_reconfigure_filters(&vf->vsi);
210
211 return (0);
212}
213
214static void
215ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
216 uint32_t val)
217{
218 uint32_t qtable;
219 int index, shift;
220
221 /*
222 * Two queues are mapped in a single register, so we have to do some
223 * gymnastics to convert the queue number into a register index and
224 * shift.
225 */
226 index = qnum / 2;
227 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
228
229 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
230 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
231 qtable |= val << shift;
232 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
233}
234
235static void
236ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
237{
238 struct i40e_hw *hw;
239 uint32_t qtable;
240 int i;
241
242 hw = &pf->hw;
243
244 /*
245 * Contiguous mappings aren't actually supported by the hardware,
246 * so we have to use non-contiguous mappings.
247 */
248 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
249 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
250
251 /* Enable LAN traffic on this VF */
252 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
253 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
254
255 /* Program index of each VF queue into PF queue space
256 * (This is only needed if QTABLE is enabled) */
257 for (i = 0; i < vf->vsi.num_queues; i++) {
258 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
259 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
260
261 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
262 }
263 for (; i < IXL_MAX_VSI_QUEUES; i++)
264 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
265 I40E_VPLAN_QTABLE_QINDEX_MASK);
266
267 /* Map queues allocated to VF to its VSI;
268 * This mapping matches the VF-wide mapping since the VF
269 * is only given a single VSI */
270 for (i = 0; i < vf->vsi.num_queues; i++)
271 ixl_vf_map_vsi_queue(hw, vf, i,
272 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
273
274 /* Set rest of VSI queues as unused. */
275 for (; i < IXL_MAX_VSI_QUEUES; i++)
276 ixl_vf_map_vsi_queue(hw, vf, i,
277 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
278
279 ixl_flush(hw);
280}
281
282static void
283ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
284{
285 struct i40e_hw *hw;
286
287 hw = &pf->hw;
288
289 if (vsi->seid == 0)
290 return;
291
292 i40e_aq_delete_element(hw, vsi->seid, NULL);
293}
294
295static void
296ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
297{
298
299 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
300 ixl_flush(hw);
301}
302
303static void
304ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
305{
306
307 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
308 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
309 ixl_flush(hw);
310}
311
312static void
313ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
314{
315 struct i40e_hw *hw;
316 uint32_t vfint_reg, vpint_reg;
317 int i;
318
319 hw = &pf->hw;
320
321 ixl_vf_vsi_release(pf, &vf->vsi);
322
323 /* Index 0 has a special register. */
324 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
325
326 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
327 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
328 ixl_vf_disable_queue_intr(hw, vfint_reg);
329 }
330
331 /* Index 0 has a special register. */
332 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
333
334 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
335 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
336 ixl_vf_unregister_intr(hw, vpint_reg);
337 }
338
339 vf->vsi.num_queues = 0;
340}
341
342static int
343ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
344{
345 struct i40e_hw *hw;
346 int i;
347 uint16_t global_vf_num;
348 uint32_t ciad;
349
350 hw = &pf->hw;
351 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
352
353 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
354 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
355 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
356 ciad = rd32(hw, I40E_PF_PCI_CIAD);
357 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
358 return (0);
359 DELAY(1);
360 }
361
362 return (ETIMEDOUT);
363}
364
365static void
366ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
367{
368 struct i40e_hw *hw;
369 uint32_t vfrtrig;
370
371 hw = &pf->hw;
372
373 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
374 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
375 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
376 ixl_flush(hw);
377
378 ixl_reinit_vf(pf, vf);
379}
380
381static void
382ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
383{
384 struct i40e_hw *hw;
385 uint32_t vfrstat, vfrtrig;
386 int i, error;
387
388 hw = &pf->hw;
389
390 error = ixl_flush_pcie(pf, vf);
391 if (error != 0)
392 device_printf(pf->dev,
393 "Timed out waiting for PCIe activity to stop on VF-%d\n",
394 vf->vf_num);
395
396 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
397 DELAY(10);
398
399 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
400 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
401 break;
402 }
403
404 if (i == IXL_VF_RESET_TIMEOUT)
405 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
406
407 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
408
409 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
410 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
411 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
412
413 if (vf->vsi.seid != 0)
414 ixl_disable_rings(&vf->vsi);
415
416 ixl_vf_release_resources(pf, vf);
417 ixl_vf_setup_vsi(pf, vf);
418 ixl_vf_map_queues(pf, vf);
419
420 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
421 ixl_flush(hw);
422}
423
424static const char *
425ixl_vc_opcode_str(uint16_t op)
426{
427
428 switch (op) {
429 case I40E_VIRTCHNL_OP_VERSION:
430 return ("VERSION");
431 case I40E_VIRTCHNL_OP_RESET_VF:
432 return ("RESET_VF");
433 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
434 return ("GET_VF_RESOURCES");
435 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
436 return ("CONFIG_TX_QUEUE");
437 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
438 return ("CONFIG_RX_QUEUE");
439 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
440 return ("CONFIG_VSI_QUEUES");
441 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
442 return ("CONFIG_IRQ_MAP");
443 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
444 return ("ENABLE_QUEUES");
445 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
446 return ("DISABLE_QUEUES");
447 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
448 return ("ADD_ETHER_ADDRESS");
449 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
450 return ("DEL_ETHER_ADDRESS");
451 case I40E_VIRTCHNL_OP_ADD_VLAN:
452 return ("ADD_VLAN");
453 case I40E_VIRTCHNL_OP_DEL_VLAN:
454 return ("DEL_VLAN");
455 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
456 return ("CONFIG_PROMISCUOUS_MODE");
457 case I40E_VIRTCHNL_OP_GET_STATS:
458 return ("GET_STATS");
459 case I40E_VIRTCHNL_OP_FCOE:
460 return ("FCOE");
461 case I40E_VIRTCHNL_OP_EVENT:
462 return ("EVENT");
463 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
464 return ("CONFIG_RSS_KEY");
465 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
466 return ("CONFIG_RSS_LUT");
467 case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
468 return ("GET_RSS_HENA_CAPS");
469 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
470 return ("SET_RSS_HENA");
471 default:
472 return ("UNKNOWN");
473 }
474}
475
476static int
477ixl_vc_opcode_level(uint16_t opcode)
478{
479 switch (opcode) {
480 case I40E_VIRTCHNL_OP_GET_STATS:
481 return (10);
482 default:
483 return (5);
484 }
485}
486
487static void
488ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
489 enum i40e_status_code status, void *msg, uint16_t len)
490{
491 struct i40e_hw *hw;
492 int global_vf_id;
493
494 hw = &pf->hw;
495 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
496
497 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
498 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
499 ixl_vc_opcode_str(op), op, status, vf->vf_num);
500
501 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
502}
503
504static void
505ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
506{
507
508 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
509}
510
511static void
512ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
513 enum i40e_status_code status, const char *file, int line)
514{
515
516 I40E_VC_DEBUG(pf, 1,
517 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
518 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
519 status, vf->vf_num, file, line);
520 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
521}
522
523static void
524ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
525 uint16_t msg_size)
526{
527 struct i40e_virtchnl_version_info reply;
528
529 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
530 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
531 I40E_ERR_PARAM);
532 return;
533 }
534
535 vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
536
537 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
538 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
539 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
540 sizeof(reply));
541}
542
543static void
544ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
545 uint16_t msg_size)
546{
547
548 if (msg_size != 0) {
549 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
550 I40E_ERR_PARAM);
551 return;
552 }
553
554 ixl_reset_vf(pf, vf);
555
556 /* No response to a reset message. */
557}
558
559static void
560ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
561 uint16_t msg_size)
562{
563 struct i40e_virtchnl_vf_resource reply;
564
565 if ((vf->version == 0 && msg_size != 0) ||
566 (vf->version == 1 && msg_size != 4)) {
567 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
568 " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
569 vf->version);
570 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
571 I40E_ERR_PARAM);
572 return;
573 }
574
575 bzero(&reply, sizeof(reply));
576
577 if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
578 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
579 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
580 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
581 else
582 /* Force VF RSS setup by PF in 1.1+ VFs */
583 reply.vf_offload_flags = *(u32 *)msg & (
584 I40E_VIRTCHNL_VF_OFFLOAD_L2 |
585 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
586 I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
587
588 reply.num_vsis = 1;
589 reply.num_queue_pairs = vf->vsi.num_queues;
590 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
591 reply.rss_key_size = 52;
592 reply.rss_lut_size = 64;
593 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
594 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
595 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
596 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
597
598 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
599 I40E_SUCCESS, &reply, sizeof(reply));
600}
601
602static int
603ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
604 struct i40e_virtchnl_txq_info *info)
605{
606 struct i40e_hw *hw;
607 struct i40e_hmc_obj_txq txq;
608 uint16_t global_queue_num, global_vf_num;
609 enum i40e_status_code status;
610 uint32_t qtx_ctl;
611
612 hw = &pf->hw;
613 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
615 bzero(&txq, sizeof(txq));
616
617 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
618 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
619
620 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
621 if (status != I40E_SUCCESS)
622 return (EINVAL);
623
624 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
625
626 txq.head_wb_ena = info->headwb_enabled;
627 txq.head_wb_addr = info->dma_headwb_addr;
628 txq.qlen = info->ring_len;
629 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
630 txq.rdylist_act = 0;
631
632 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
633 if (status != I40E_SUCCESS)
634 return (EINVAL);
635
636 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
637 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
638 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
639 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
640 ixl_flush(hw);
641
642 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
643
644 return (0);
645}
646
647static int
648ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
649 struct i40e_virtchnl_rxq_info *info)
650{
651 struct i40e_hw *hw;
652 struct i40e_hmc_obj_rxq rxq;
653 uint16_t global_queue_num;
654 enum i40e_status_code status;
655
656 hw = &pf->hw;
657 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
658 bzero(&rxq, sizeof(rxq));
659
660 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
661 vf->vf_num, global_queue_num, info->queue_id);
662
663 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
664 return (EINVAL);
665
666 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
667 info->max_pkt_size < ETHER_MIN_LEN)
668 return (EINVAL);
669
670 if (info->splithdr_enabled) {
671 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
672 return (EINVAL);
673
674 rxq.hsplit_0 = info->rx_split_pos &
675 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
676 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
677 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
678 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
679 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
680
681 rxq.dtype = 2;
682 }
683
684 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
685 if (status != I40E_SUCCESS)
686 return (EINVAL);
687
688 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
689 rxq.qlen = info->ring_len;
690
691 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
692
693 rxq.dsize = 1;
694 rxq.crcstrip = 1;
695 rxq.l2tsel = 1;
696
697 rxq.rxmax = info->max_pkt_size;
698 rxq.tphrdesc_ena = 1;
699 rxq.tphwdesc_ena = 1;
700 rxq.tphdata_ena = 1;
701 rxq.tphhead_ena = 1;
702 rxq.lrxqthresh = 2;
703 rxq.prefena = 1;
704
705 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
706 if (status != I40E_SUCCESS)
707 return (EINVAL);
708
709 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
710
711 return (0);
712}
713
714static void
715ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
716 uint16_t msg_size)
717{
718 struct i40e_virtchnl_vsi_queue_config_info *info;
719 struct i40e_virtchnl_queue_pair_info *pair;
720 uint16_t expected_msg_size;
721 int i;
722
723 if (msg_size < sizeof(*info)) {
724 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
725 I40E_ERR_PARAM);
726 return;
727 }
728
729 info = msg;
730 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
731 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
732 vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
733 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
734 I40E_ERR_PARAM);
735 return;
736 }
737
738 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
739 if (msg_size != expected_msg_size) {
740 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
741 vf->vf_num, msg_size, expected_msg_size);
742 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
743 I40E_ERR_PARAM);
744 return;
745 }
746
747 if (info->vsi_id != vf->vsi.vsi_num) {
748 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
749 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
750 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
751 I40E_ERR_PARAM);
752 return;
753 }
754
755 for (i = 0; i < info->num_queue_pairs; i++) {
756 pair = &info->qpair[i];
757
758 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
759 pair->rxq.vsi_id != vf->vsi.vsi_num ||
760 pair->txq.queue_id != pair->rxq.queue_id ||
761 pair->txq.queue_id >= vf->vsi.num_queues) {
762
763 i40e_send_vf_nack(pf, vf,
764 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
765 return;
766 }
767
768 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
769 i40e_send_vf_nack(pf, vf,
770 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
771 return;
772 }
773
774 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
775 i40e_send_vf_nack(pf, vf,
776 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
777 return;
778 }
779 }
780
781 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
782}
783
784static void
785ixl_vf_set_qctl(struct ixl_pf *pf,
786 const struct i40e_virtchnl_vector_map *vector,
787 enum i40e_queue_type cur_type, uint16_t cur_queue,
788 enum i40e_queue_type *last_type, uint16_t *last_queue)
789{
790 uint32_t offset, qctl;
791 uint16_t itr_indx;
792
793 if (cur_type == I40E_QUEUE_TYPE_RX) {
794 offset = I40E_QINT_RQCTL(cur_queue);
795 itr_indx = vector->rxitr_idx;
796 } else {
797 offset = I40E_QINT_TQCTL(cur_queue);
798 itr_indx = vector->txitr_idx;
799 }
800
801 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
802 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
803 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
804 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
805 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
806
807 wr32(&pf->hw, offset, qctl);
808
809 *last_type = cur_type;
810 *last_queue = cur_queue;
811}
812
813static void
814ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
815 const struct i40e_virtchnl_vector_map *vector)
816{
817 struct i40e_hw *hw;
818 u_int qindex;
819 enum i40e_queue_type type, last_type;
820 uint32_t lnklst_reg;
821 uint16_t rxq_map, txq_map, cur_queue, last_queue;
822
823 hw = &pf->hw;
824
825 rxq_map = vector->rxq_map;
826 txq_map = vector->txq_map;
827
828 last_queue = IXL_END_OF_INTR_LNKLST;
829 last_type = I40E_QUEUE_TYPE_RX;
830
831 /*
832 * The datasheet says to optimize performance, RX queues and TX queues
833 * should be interleaved in the interrupt linked list, so we process
834 * both at once here.
835 */
836 while ((rxq_map != 0) || (txq_map != 0)) {
837 if (txq_map != 0) {
838 qindex = ffs(txq_map) - 1;
839 type = I40E_QUEUE_TYPE_TX;
840 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
841 ixl_vf_set_qctl(pf, vector, type, cur_queue,
842 &last_type, &last_queue);
843 txq_map &= ~(1 << qindex);
844 }
845
846 if (rxq_map != 0) {
847 qindex = ffs(rxq_map) - 1;
848 type = I40E_QUEUE_TYPE_RX;
849 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
850 ixl_vf_set_qctl(pf, vector, type, cur_queue,
851 &last_type, &last_queue);
852 rxq_map &= ~(1 << qindex);
853 }
854 }
855
856 if (vector->vector_id == 0)
857 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
858 else
859 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
860 vf->vf_num);
861 wr32(hw, lnklst_reg,
862 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
863 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
864
865 ixl_flush(hw);
866}
867
868static void
869ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
870 uint16_t msg_size)
871{
872 struct i40e_virtchnl_irq_map_info *map;
873 struct i40e_virtchnl_vector_map *vector;
874 struct i40e_hw *hw;
875 int i, largest_txq, largest_rxq;
876
877 hw = &pf->hw;
878
879 if (msg_size < sizeof(*map)) {
880 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
881 I40E_ERR_PARAM);
882 return;
883 }
884
885 map = msg;
886 if (map->num_vectors == 0) {
887 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
888 I40E_ERR_PARAM);
889 return;
890 }
891
892 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
893 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
894 I40E_ERR_PARAM);
895 return;
896 }
897
898 for (i = 0; i < map->num_vectors; i++) {
899 vector = &map->vecmap[i];
900
901 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
902 vector->vsi_id != vf->vsi.vsi_num) {
903 i40e_send_vf_nack(pf, vf,
904 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
905 return;
906 }
907
908 if (vector->rxq_map != 0) {
909 largest_rxq = fls(vector->rxq_map) - 1;
910 if (largest_rxq >= vf->vsi.num_queues) {
911 i40e_send_vf_nack(pf, vf,
912 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
913 I40E_ERR_PARAM);
914 return;
915 }
916 }
917
918 if (vector->txq_map != 0) {
919 largest_txq = fls(vector->txq_map) - 1;
920 if (largest_txq >= vf->vsi.num_queues) {
921 i40e_send_vf_nack(pf, vf,
922 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
923 I40E_ERR_PARAM);
924 return;
925 }
926 }
927
928 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
929 vector->txitr_idx > IXL_MAX_ITR_IDX) {
930 i40e_send_vf_nack(pf, vf,
931 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
932 I40E_ERR_PARAM);
933 return;
934 }
935
936 ixl_vf_config_vector(pf, vf, vector);
937 }
938
939 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
940}
941
942static void
943ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
944 uint16_t msg_size)
945{
946 struct i40e_virtchnl_queue_select *select;
947 int error = 0;
948
949 if (msg_size != sizeof(*select)) {
950 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
951 I40E_ERR_PARAM);
952 return;
953 }
954
955 select = msg;
956 if (select->vsi_id != vf->vsi.vsi_num ||
957 select->rx_queues == 0 || select->tx_queues == 0) {
958 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
959 I40E_ERR_PARAM);
960 return;
961 }
962
963 /* Enable TX rings selected by the VF */
964 for (int i = 0; i < 32; i++) {
965 if ((1 << i) & select->tx_queues) {
966 /* Warn if queue is out of VF allocation range */
967 if (i >= vf->vsi.num_queues) {
968 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
969 vf->vf_num, i);
970 break;
971 }
972 /* Skip this queue if it hasn't been configured */
973 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
974 continue;
975 /* Warn if this queue is already marked as enabled */
976 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
977 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
978 vf->vf_num, i);
979
980 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
981 if (error)
982 break;
983 else
984 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
985 }
986 }
987
988 /* Enable RX rings selected by the VF */
989 for (int i = 0; i < 32; i++) {
990 if ((1 << i) & select->rx_queues) {
991 /* Warn if queue is out of VF allocation range */
992 if (i >= vf->vsi.num_queues) {
993 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
994 vf->vf_num, i);
995 break;
996 }
997 /* Skip this queue if it hasn't been configured */
998 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
999 continue;
1000 /* Warn if this queue is already marked as enabled */
1001 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
1002 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
1003 vf->vf_num, i);
1004 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
1005 if (error)
1006 break;
1007 else
1008 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
1009 }
1010 }
1011
1012 if (error) {
1013 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1014 I40E_ERR_TIMEOUT);
1015 return;
1016 }
1017
1018 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
1019}
1020
1021static void
1022ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1023 void *msg, uint16_t msg_size)
1024{
1025 struct i40e_virtchnl_queue_select *select;
1026 int error = 0;
1027
1028 if (msg_size != sizeof(*select)) {
1029 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1030 I40E_ERR_PARAM);
1031 return;
1032 }
1033
1034 select = msg;
1035 if (select->vsi_id != vf->vsi.vsi_num ||
1036 select->rx_queues == 0 || select->tx_queues == 0) {
1037 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1038 I40E_ERR_PARAM);
1039 return;
1040 }
1041
1042 /* Disable TX rings selected by the VF */
1043 for (int i = 0; i < 32; i++) {
1044 if ((1 << i) & select->tx_queues) {
1045 /* Warn if queue is out of VF allocation range */
1046 if (i >= vf->vsi.num_queues) {
1047 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1048 vf->vf_num, i);
1049 break;
1050 }
1051 /* Skip this queue if it hasn't been configured */
1052 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1053 continue;
1054 /* Warn if this queue is already marked as disabled */
1055 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1056 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1057 vf->vf_num, i);
1058 continue;
1059 }
1060 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1061 if (error)
1062 break;
1063 else
1064 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1065 }
1066 }
1067
1068 /* Enable RX rings selected by the VF */
1069 for (int i = 0; i < 32; i++) {
1070 if ((1 << i) & select->rx_queues) {
1071 /* Warn if queue is out of VF allocation range */
1072 if (i >= vf->vsi.num_queues) {
1073 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1074 vf->vf_num, i);
1075 break;
1076 }
1077 /* Skip this queue if it hasn't been configured */
1078 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1079 continue;
1080 /* Warn if this queue is already marked as disabled */
1081 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1082 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1083 vf->vf_num, i);
1084 continue;
1085 }
1086 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1087 if (error)
1088 break;
1089 else
1090 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1091 }
1092 }
1093
1094 if (error) {
1095 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1096 I40E_ERR_TIMEOUT);
1097 return;
1098 }
1099
1100 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
1101}
1102
1103static bool
1104ixl_zero_mac(const uint8_t *addr)
1105{
1106 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1107
1108 return (cmp_etheraddr(addr, zero));
1109}
1110
1111static bool
1112ixl_bcast_mac(const uint8_t *addr)
1113{
1114
1115 return (cmp_etheraddr(addr, ixl_bcast_addr));
1116}
1117
1118static int
1119ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1120{
1121
1122 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1123 return (EINVAL);
1124
1125 /*
1126 * If the VF is not allowed to change its MAC address, don't let it
1127 * set a MAC filter for an address that is not a multicast address and
1128 * is not its assigned MAC.
1129 */
1130 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1131 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1132 return (EPERM);
1133
1134 return (0);
1135}
1136
1137static void
1138ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1139 uint16_t msg_size)
1140{
1141 struct i40e_virtchnl_ether_addr_list *addr_list;
1142 struct i40e_virtchnl_ether_addr *addr;
1143 struct ixl_vsi *vsi;
1144 int i;
1145 size_t expected_size;
1146
1147 vsi = &vf->vsi;
1148
1149 if (msg_size < sizeof(*addr_list)) {
1150 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1151 I40E_ERR_PARAM);
1152 return;
1153 }
1154
1155 addr_list = msg;
1156 expected_size = sizeof(*addr_list) +
1157 addr_list->num_elements * sizeof(*addr);
1158
1159 if (addr_list->num_elements == 0 ||
1160 addr_list->vsi_id != vsi->vsi_num ||
1161 msg_size != expected_size) {
1162 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1163 I40E_ERR_PARAM);
1164 return;
1165 }
1166
1167 for (i = 0; i < addr_list->num_elements; i++) {
1168 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1169 i40e_send_vf_nack(pf, vf,
1170 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1171 return;
1172 }
1173 }
1174
1175 for (i = 0; i < addr_list->num_elements; i++) {
1176 addr = &addr_list->list[i];
1177 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1178 }
1179
1180 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
1181}
1182
1183static void
1184ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1185 uint16_t msg_size)
1186{
1187 struct i40e_virtchnl_ether_addr_list *addr_list;
1188 struct i40e_virtchnl_ether_addr *addr;
1189 size_t expected_size;
1190 int i;
1191
1192 if (msg_size < sizeof(*addr_list)) {
1193 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1194 I40E_ERR_PARAM);
1195 return;
1196 }
1197
1198 addr_list = msg;
1199 expected_size = sizeof(*addr_list) +
1200 addr_list->num_elements * sizeof(*addr);
1201
1202 if (addr_list->num_elements == 0 ||
1203 addr_list->vsi_id != vf->vsi.vsi_num ||
1204 msg_size != expected_size) {
1205 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1206 I40E_ERR_PARAM);
1207 return;
1208 }
1209
1210 for (i = 0; i < addr_list->num_elements; i++) {
1211 addr = &addr_list->list[i];
1212 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1213 i40e_send_vf_nack(pf, vf,
1214 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1215 return;
1216 }
1217 }
1218
1219 for (i = 0; i < addr_list->num_elements; i++) {
1220 addr = &addr_list->list[i];
1221 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1222 }
1223
1224 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
1225}
1226
1227static enum i40e_status_code
1228ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1229{
1230 struct i40e_vsi_context vsi_ctx;
1231
1232 vsi_ctx.seid = vf->vsi.seid;
1233
1234 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1235 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1236 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1237 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1238 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1239}
1240
1241static void
1242ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1243 uint16_t msg_size)
1244{
1245 struct i40e_virtchnl_vlan_filter_list *filter_list;
1246 enum i40e_status_code code;
1247 size_t expected_size;
1248 int i;
1249
1250 if (msg_size < sizeof(*filter_list)) {
1251 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1252 I40E_ERR_PARAM);
1253 return;
1254 }
1255
1256 filter_list = msg;
1257 expected_size = sizeof(*filter_list) +
1258 filter_list->num_elements * sizeof(uint16_t);
1259 if (filter_list->num_elements == 0 ||
1260 filter_list->vsi_id != vf->vsi.vsi_num ||
1261 msg_size != expected_size) {
1262 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1263 I40E_ERR_PARAM);
1264 return;
1265 }
1266
1267 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1268 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1269 I40E_ERR_PARAM);
1270 return;
1271 }
1272
1273 for (i = 0; i < filter_list->num_elements; i++) {
1274 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1275 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1276 I40E_ERR_PARAM);
1277 return;
1278 }
1279 }
1280
1281 code = ixl_vf_enable_vlan_strip(pf, vf);
1282 if (code != I40E_SUCCESS) {
1283 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1284 I40E_ERR_PARAM);
1285 }
1286
1287 for (i = 0; i < filter_list->num_elements; i++)
1288 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1289
1290 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
1291}
1292
1293static void
1294ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1295 uint16_t msg_size)
1296{
1297 struct i40e_virtchnl_vlan_filter_list *filter_list;
1298 int i;
1299 size_t expected_size;
1300
1301 if (msg_size < sizeof(*filter_list)) {
1302 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1303 I40E_ERR_PARAM);
1304 return;
1305 }
1306
1307 filter_list = msg;
1308 expected_size = sizeof(*filter_list) +
1309 filter_list->num_elements * sizeof(uint16_t);
1310 if (filter_list->num_elements == 0 ||
1311 filter_list->vsi_id != vf->vsi.vsi_num ||
1312 msg_size != expected_size) {
1313 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1314 I40E_ERR_PARAM);
1315 return;
1316 }
1317
1318 for (i = 0; i < filter_list->num_elements; i++) {
1319 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1320 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1321 I40E_ERR_PARAM);
1322 return;
1323 }
1324 }
1325
1326 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1327 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1328 I40E_ERR_PARAM);
1329 return;
1330 }
1331
1332 for (i = 0; i < filter_list->num_elements; i++)
1333 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1334
1335 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
1336}
1337
1338static void
1339ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1340 void *msg, uint16_t msg_size)
1341{
1342 struct i40e_virtchnl_promisc_info *info;
1343 enum i40e_status_code code;
1344
1345 if (msg_size != sizeof(*info)) {
1346 i40e_send_vf_nack(pf, vf,
1347 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1348 return;
1349 }
1350
1351 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1352 i40e_send_vf_nack(pf, vf,
1353 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1354 return;
1355 }
1356
1357 info = msg;
1358 if (info->vsi_id != vf->vsi.vsi_num) {
1359 i40e_send_vf_nack(pf, vf,
1360 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1361 return;
1362 }
1363
1364 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1365 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1366 if (code != I40E_SUCCESS) {
1367 i40e_send_vf_nack(pf, vf,
1368 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1369 return;
1370 }
1371
1372 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1373 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
1374 if (code != I40E_SUCCESS) {
1375 i40e_send_vf_nack(pf, vf,
1376 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1377 return;
1378 }
1379
1380 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1381}
1382
1383static void
1384ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1385 uint16_t msg_size)
1386{
1387 struct i40e_virtchnl_queue_select *queue;
1388
1389 if (msg_size != sizeof(*queue)) {
1390 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1391 I40E_ERR_PARAM);
1392 return;
1393 }
1394
1395 queue = msg;
1396 if (queue->vsi_id != vf->vsi.vsi_num) {
1397 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1398 I40E_ERR_PARAM);
1399 return;
1400 }
1401
1402 ixl_update_eth_stats(&vf->vsi);
1403
1404 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1405 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1406}
1407
1408static void
1409ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1410 uint16_t msg_size)
1411{
1412 struct i40e_hw *hw;
1413 struct i40e_virtchnl_rss_key *key;
1414 struct i40e_aqc_get_set_rss_key_data key_data;
1415 enum i40e_status_code status;
1416
1417 hw = &pf->hw;
1418
1419 if (msg_size < sizeof(*key)) {
1420 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1421 I40E_ERR_PARAM);
1422 return;
1423 }
1424
1425 key = msg;
1426
1427 if (key->key_len > 52) {
1428 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1429 vf->vf_num, key->key_len, 52);
1430 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1431 I40E_ERR_PARAM);
1432 return;
1433 }
1434
1435 if (key->vsi_id != vf->vsi.vsi_num) {
1436 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1437 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1438 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1439 I40E_ERR_PARAM);
1440 return;
1441 }
1442
1443 /* Fill out hash using MAC-dependent method */
1444 if (hw->mac.type == I40E_MAC_X722) {
1445 bzero(&key_data, sizeof(key_data));
1446 if (key->key_len <= 40)
1447 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1448 else {
1449 bcopy(key->key, key_data.standard_rss_key, 40);
1450 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1451 }
1452 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1453 if (status) {
1454 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1455 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1456 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1457 I40E_ERR_ADMIN_QUEUE_ERROR);
1458 return;
1459 }
1460 } else {
1461 for (int i = 0; i < (key->key_len / 4); i++)
1462 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1463 }
1464
1465 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1466 vf->vf_num, key->key[0]);
1467
1468 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
1469}
1470
1471static void
1472ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1473 uint16_t msg_size)
1474{
1475 struct i40e_hw *hw;
1476 struct i40e_virtchnl_rss_lut *lut;
1477 enum i40e_status_code status;
1478
1479 hw = &pf->hw;
1480
1481 if (msg_size < sizeof(*lut)) {
1482 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1483 I40E_ERR_PARAM);
1484 return;
1485 }
1486
1487 lut = msg;
1488
1489 if (lut->lut_entries > 64) {
1490 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1491 vf->vf_num, lut->lut_entries, 64);
1492 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1493 I40E_ERR_PARAM);
1494 return;
1495 }
1496
1497 if (lut->vsi_id != vf->vsi.vsi_num) {
1498 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1499 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1500 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1501 I40E_ERR_PARAM);
1502 return;
1503 }
1504
1505 /* Fill out LUT using MAC-dependent method */
1506 if (hw->mac.type == I40E_MAC_X722) {
1507 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1508 if (status) {
1509 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1510 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1511 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1512 I40E_ERR_ADMIN_QUEUE_ERROR);
1513 return;
1514 }
1515 } else {
1516 for (int i = 0; i < (lut->lut_entries / 4); i++)
1517 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1518 }
1519
1520 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1521 vf->vf_num, lut->lut[0], lut->lut_entries);
1522
1523 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
1524}
1525
1526static void
1527ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1528 uint16_t msg_size)
1529{
1530 struct i40e_hw *hw;
1531 struct i40e_virtchnl_rss_hena *hena;
1532
1533 hw = &pf->hw;
1534
1535 if (msg_size < sizeof(*hena)) {
1536 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
1537 I40E_ERR_PARAM);
1538 return;
1539 }
1540
1541 hena = msg;
1542
1543 /* Set HENA */
1544 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1545 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1546
1547 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1548 vf->vf_num, hena->hena);
1549
1550 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
1551}
1552
1553void
1554ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1555{
1556 struct ixl_vf *vf;
1557 void *msg;
1558 uint16_t vf_num, msg_size;
1559 uint32_t opcode;
1560
1561 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1562 opcode = le32toh(event->desc.cookie_high);
1563
1564 if (vf_num >= pf->num_vfs) {
1565 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1566 return;
1567 }
1568
1569 vf = &pf->vfs[vf_num];
1570 msg = event->msg_buf;
1571 msg_size = event->msg_len;
1572
1573 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1574 "Got msg %s(%d) from%sVF-%d of size %d\n",
1575 ixl_vc_opcode_str(opcode), opcode,
1576 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1577 vf_num, msg_size);
1578
1579 /* This must be a stray msg from a previously destroyed VF. */
1580 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1581 return;
1582
1583 switch (opcode) {
1584 case I40E_VIRTCHNL_OP_VERSION:
1585 ixl_vf_version_msg(pf, vf, msg, msg_size);
1586 break;
1587 case I40E_VIRTCHNL_OP_RESET_VF:
1588 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1589 break;
1590 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1591 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1592 break;
1593 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1594 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1595 break;
1596 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1597 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1598 break;
1599 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1600 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1601 break;
1602 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1603 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1604 break;
1605 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1606 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1607 break;
1608 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1609 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1610 break;
1611 case I40E_VIRTCHNL_OP_ADD_VLAN:
1612 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1613 break;
1614 case I40E_VIRTCHNL_OP_DEL_VLAN:
1615 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1616 break;
1617 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1618 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1619 break;
1620 case I40E_VIRTCHNL_OP_GET_STATS:
1621 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1622 break;
1623 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1624 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1625 break;
1626 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1627 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1628 break;
1629 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1630 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1631 break;
1632
1633 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1634 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1635 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1636 default:
1637 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1638 break;
1639 }
1640}
1641
1642/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1643void
1644ixl_handle_vflr(void *arg, int pending)
1645{
1646 struct ixl_pf *pf;
1647 struct ixl_vf *vf;
1648 struct i40e_hw *hw;
1649 uint16_t global_vf_num;
1650 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1651 int i;
1652
1653 pf = arg;
1654 hw = &pf->hw;
1655
1656 IXL_PF_LOCK(pf);
1657 for (i = 0; i < pf->num_vfs; i++) {
1658 global_vf_num = hw->func_caps.vf_base_id + i;
1659
1660 vf = &pf->vfs[i];
1661 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1662 continue;
1663
1664 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1665 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1666 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1667 if (vflrstat & vflrstat_mask) {
1668 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1669 vflrstat_mask);
1670
1671 ixl_reinit_vf(pf, vf);
1672 }
1673 }
1674
1675 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1676 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1677 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1678 ixl_flush(hw);
1679
1680 IXL_PF_UNLOCK(pf);
1681}
1682
1683static int
1684ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1685{
1686
1687 switch (err) {
1688 case I40E_AQ_RC_EPERM:
1689 return (EPERM);
1690 case I40E_AQ_RC_ENOENT:
1691 return (ENOENT);
1692 case I40E_AQ_RC_ESRCH:
1693 return (ESRCH);
1694 case I40E_AQ_RC_EINTR:
1695 return (EINTR);
1696 case I40E_AQ_RC_EIO:
1697 return (EIO);
1698 case I40E_AQ_RC_ENXIO:
1699 return (ENXIO);
1700 case I40E_AQ_RC_E2BIG:
1701 return (E2BIG);
1702 case I40E_AQ_RC_EAGAIN:
1703 return (EAGAIN);
1704 case I40E_AQ_RC_ENOMEM:
1705 return (ENOMEM);
1706 case I40E_AQ_RC_EACCES:
1707 return (EACCES);
1708 case I40E_AQ_RC_EFAULT:
1709 return (EFAULT);
1710 case I40E_AQ_RC_EBUSY:
1711 return (EBUSY);
1712 case I40E_AQ_RC_EEXIST:
1713 return (EEXIST);
1714 case I40E_AQ_RC_EINVAL:
1715 return (EINVAL);
1716 case I40E_AQ_RC_ENOTTY:
1717 return (ENOTTY);
1718 case I40E_AQ_RC_ENOSPC:
1719 return (ENOSPC);
1720 case I40E_AQ_RC_ENOSYS:
1721 return (ENOSYS);
1722 case I40E_AQ_RC_ERANGE:
1723 return (ERANGE);
1724 case I40E_AQ_RC_EFLUSHED:
1725 return (EINVAL); /* No exact equivalent in errno.h */
1726 case I40E_AQ_RC_BAD_ADDR:
1727 return (EFAULT);
1728 case I40E_AQ_RC_EMODE:
1729 return (EPERM);
1730 case I40E_AQ_RC_EFBIG:
1731 return (EFBIG);
1732 default:
1733 return (EINVAL);
1734 }
1735}
1736
1737int
1738ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1739{
1740 struct ixl_pf *pf;
1741 struct i40e_hw *hw;
1742 struct ixl_vsi *pf_vsi;
1743 enum i40e_status_code ret;
1744 int i, error;
1745
1746 pf = device_get_softc(dev);
1747 hw = &pf->hw;
1748 pf_vsi = &pf->vsi;
1749
1750 IXL_PF_LOCK(pf);
1751 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1752 M_ZERO);
1753
1754 if (pf->vfs == NULL) {
1755 error = ENOMEM;
1756 goto fail;
1757 }
1758
1759 for (i = 0; i < num_vfs; i++)
1760 sysctl_ctx_init(&pf->vfs[i].ctx);
1761
1762 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1763 1, FALSE, &pf->veb_seid, FALSE, NULL);
1764 if (ret != I40E_SUCCESS) {
1765 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1766 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1767 error);
1768 goto fail;
1769 }
1770
1771 ixl_enable_adminq(hw);
1772
1773 pf->num_vfs = num_vfs;
1774 IXL_PF_UNLOCK(pf);
1775 return (0);
1776
1777fail:
1778 free(pf->vfs, M_IXL);
1779 pf->vfs = NULL;
1780 IXL_PF_UNLOCK(pf);
1781 return (error);
1782}
1783
1784void
1785ixl_iov_uninit(device_t dev)
1786{
1787 struct ixl_pf *pf;
1788 struct i40e_hw *hw;
1789 struct ixl_vsi *vsi;
1790 struct ifnet *ifp;
1791 struct ixl_vf *vfs;
1792 int i, num_vfs;
1793
1794 pf = device_get_softc(dev);
1795 hw = &pf->hw;
1796 vsi = &pf->vsi;
1797 ifp = vsi->ifp;
1798
1799 IXL_PF_LOCK(pf);
1800 for (i = 0; i < pf->num_vfs; i++) {
1801 if (pf->vfs[i].vsi.seid != 0)
1802 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1803 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1804 DDPRINTF(dev, "VF %d: %d released\n",
1805 i, pf->vfs[i].qtag.num_allocated);
1806 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1807 }
1808
1809 if (pf->veb_seid != 0) {
1810 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1811 pf->veb_seid = 0;
1812 }
1813
1814 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1815 ixl_disable_intr(vsi);
1816 ixl_flush(hw);
1817 }
1818
1819 vfs = pf->vfs;
1820 num_vfs = pf->num_vfs;
1821
1822 pf->vfs = NULL;
1823 pf->num_vfs = 0;
1824 IXL_PF_UNLOCK(pf);
1825
1826 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1827 for (i = 0; i < num_vfs; i++)
1828 sysctl_ctx_free(&vfs[i].ctx);
1829 free(vfs, M_IXL);
1830}
1831
1832static int
1833ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1834{
1835 device_t dev = pf->dev;
1836 int error;
1837
1838 /* Validate, and clamp value if invalid */
1839 if (num_queues < 1 || num_queues > 16)
1840 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1841 num_queues, vf->vf_num);
1842 if (num_queues < 1) {
1843 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1844 num_queues = 1;
1845 } else if (num_queues > 16) {
1846 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1847 num_queues = 16;
1848 }
1849 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1850 if (error) {
1851 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1852 num_queues, vf->vf_num);
1853 return (ENOSPC);
1854 }
1855
1856 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1857 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1858 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1859
1860 return (0);
1861}
1862
1863int
1864ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1865{
1866 char sysctl_name[QUEUE_NAME_LEN];
1867 struct ixl_pf *pf;
1868 struct ixl_vf *vf;
1869 const void *mac;
1870 size_t size;
1871 int error;
1872 int vf_num_queues;
1873
1874 pf = device_get_softc(dev);
1875 vf = &pf->vfs[vfnum];
1876
1877 IXL_PF_LOCK(pf);
1878 vf->vf_num = vfnum;
1879
1880 vf->vsi.back = pf;
1881 vf->vf_flags = VF_FLAG_ENABLED;
1882 SLIST_INIT(&vf->vsi.ftl);
1883
1884 /* Reserve queue allocation from PF */
1885 vf_num_queues = nvlist_get_number(params, "num-queues");
1886 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1887 if (error != 0)
1888 goto out;
1889
1890 error = ixl_vf_setup_vsi(pf, vf);
1891 if (error != 0)
1892 goto out;
1893
1894 if (nvlist_exists_binary(params, "mac-addr")) {
1895 mac = nvlist_get_binary(params, "mac-addr", &size);
1896 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1897
1898 if (nvlist_get_bool(params, "allow-set-mac"))
1899 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1900 } else
1901 /*
1902 * If the administrator has not specified a MAC address then
1903 * we must allow the VF to choose one.
1904 */
1905 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1906
1907 if (nvlist_get_bool(params, "mac-anti-spoof"))
1908 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1909
1910 if (nvlist_get_bool(params, "allow-promisc"))
1911 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1912
1913 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1914
1915 ixl_reset_vf(pf, vf);
1916out:
1917 IXL_PF_UNLOCK(pf);
1918 if (error == 0) {
1919 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1920 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1921 }
1922
1923 return (error);
1924}
1925