if_sriov.c revision 315333
1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/if_sriov.c 315333 2017-03-15 21:20:17Z erj $*/
34
35#include "ixgbe.h"
36
37#ifdef PCI_IOV
38
39MALLOC_DECLARE(M_IXGBE);
40
41/************************************************************************
42 * ixgbe_pci_iov_detach
43 ************************************************************************/
44int
45ixgbe_pci_iov_detach(device_t dev)
46{
47	return pci_iov_detach(dev);
48}
49
50/************************************************************************
51 * ixgbe_define_iov_schemas
52 ************************************************************************/
53void
54ixgbe_define_iov_schemas(device_t dev, int *error)
55{
56	nvlist_t *pf_schema, *vf_schema;
57
58	pf_schema = pci_iov_schema_alloc_node();
59	vf_schema = pci_iov_schema_alloc_node();
60	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
61	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
62	    IOV_SCHEMA_HASDEFAULT, TRUE);
63	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
64	    IOV_SCHEMA_HASDEFAULT, FALSE);
65	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
66	    IOV_SCHEMA_HASDEFAULT, FALSE);
67	*error = pci_iov_attach(dev, pf_schema, vf_schema);
68	if (*error != 0) {
69		device_printf(dev,
70		    "Error %d setting up SR-IOV\n", *error);
71	}
72} /* ixgbe_define_iov_schemas */
73
74/************************************************************************
75 * ixgbe_align_all_queue_indices
76 ************************************************************************/
77inline void
78ixgbe_align_all_queue_indices(struct adapter *adapter)
79{
80	int i;
81	int index;
82
83	for (i = 0; i < adapter->num_queues; i++) {
84		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
85		adapter->rx_rings[i].me = index;
86		adapter->tx_rings[i].me = index;
87	}
88}
89
90/* Support functions for SR-IOV/VF management */
91static inline void
92ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
93{
94	if (vf->flags & IXGBE_VF_CTS)
95		msg |= IXGBE_VT_MSGTYPE_CTS;
96
97	ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
98}
99
100static inline void
101ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
102{
103	msg &= IXGBE_VT_MSG_MASK;
104	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
105}
106
107static inline void
108ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
109{
110	msg &= IXGBE_VT_MSG_MASK;
111	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
112}
113
114static inline void
115ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
116{
117	if (!(vf->flags & IXGBE_VF_CTS))
118		ixgbe_send_vf_nack(adapter, vf, 0);
119}
120
121static inline boolean_t
122ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
123{
124	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
125}
126
127static inline int
128ixgbe_vf_queues(int mode)
129{
130	switch (mode) {
131	case IXGBE_64_VM:
132		return (2);
133	case IXGBE_32_VM:
134		return (4);
135	case IXGBE_NO_VM:
136	default:
137		return (0);
138	}
139}
140
141inline int
142ixgbe_vf_que_index(int mode, int vfnum, int num)
143{
144	return ((vfnum * ixgbe_vf_queues(mode)) + num);
145}
146
147static inline void
148ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
149{
150	if (adapter->max_frame_size < max_frame)
151		adapter->max_frame_size = max_frame;
152}
153
154inline u32
155ixgbe_get_mrqc(int iov_mode)
156{
157	u32 mrqc;
158
159	switch (iov_mode) {
160	case IXGBE_64_VM:
161		mrqc = IXGBE_MRQC_VMDQRSS64EN;
162		break;
163	case IXGBE_32_VM:
164		mrqc = IXGBE_MRQC_VMDQRSS32EN;
165		break;
166	case IXGBE_NO_VM:
167		mrqc = 0;
168		break;
169	default:
170		panic("Unexpected SR-IOV mode %d", iov_mode);
171	}
172
173	return mrqc;
174}
175
176
177inline u32
178ixgbe_get_mtqc(int iov_mode)
179{
180	uint32_t mtqc;
181
182	switch (iov_mode) {
183	case IXGBE_64_VM:
184		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
185		break;
186	case IXGBE_32_VM:
187		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
188		break;
189	case IXGBE_NO_VM:
190		mtqc = IXGBE_MTQC_64Q_1PB;
191		break;
192	default:
193		panic("Unexpected SR-IOV mode %d", iov_mode);
194	}
195
196	return mtqc;
197}
198
199void
200ixgbe_ping_all_vfs(struct adapter *adapter)
201{
202	struct ixgbe_vf *vf;
203
204	for (int i = 0; i < adapter->num_vfs; i++) {
205		vf = &adapter->vfs[i];
206		if (vf->flags & IXGBE_VF_ACTIVE)
207			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
208	}
209} /* ixgbe_ping_all_vfs */
210
211
212static void
213ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
214                          uint16_t tag)
215{
216	struct ixgbe_hw *hw;
217	uint32_t vmolr, vmvir;
218
219	hw = &adapter->hw;
220
221	vf->vlan_tag = tag;
222
223	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
224
225	/* Do not receive packets that pass inexact filters. */
226	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
227
228	/* Disable Multicast Promicuous Mode. */
229	vmolr &= ~IXGBE_VMOLR_MPE;
230
231	/* Accept broadcasts. */
232	vmolr |= IXGBE_VMOLR_BAM;
233
234	if (tag == 0) {
235		/* Accept non-vlan tagged traffic. */
236		//vmolr |= IXGBE_VMOLR_AUPE;
237
238		/* Allow VM to tag outgoing traffic; no default tag. */
239		vmvir = 0;
240	} else {
241		/* Require vlan-tagged traffic. */
242		vmolr &= ~IXGBE_VMOLR_AUPE;
243
244		/* Tag all traffic with provided vlan tag. */
245		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
246	}
247	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
248	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
249} /* ixgbe_vf_set_default_vlan */
250
251
252static boolean_t
253ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
254{
255
256	/*
257	 * Frame size compatibility between PF and VF is only a problem on
258	 * 82599-based cards.  X540 and later support any combination of jumbo
259	 * frames on PFs and VFs.
260	 */
261	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
262		return (TRUE);
263
264	switch (vf->api_ver) {
265	case IXGBE_API_VER_1_0:
266	case IXGBE_API_VER_UNKNOWN:
267		/*
268		 * On legacy (1.0 and older) VF versions, we don't support jumbo
269		 * frames on either the PF or the VF.
270		 */
271		if (adapter->max_frame_size > ETHER_MAX_LEN ||
272		    vf->max_frame_size > ETHER_MAX_LEN)
273			return (FALSE);
274
275		return (TRUE);
276
277		break;
278	case IXGBE_API_VER_1_1:
279	default:
280		/*
281		 * 1.1 or later VF versions always work if they aren't using
282		 * jumbo frames.
283		 */
284		if (vf->max_frame_size <= ETHER_MAX_LEN)
285			return (TRUE);
286
287		/*
288		 * Jumbo frames only work with VFs if the PF is also using jumbo
289		 * frames.
290		 */
291		if (adapter->max_frame_size <= ETHER_MAX_LEN)
292			return (TRUE);
293
294		return (FALSE);
295
296	}
297} /* ixgbe_vf_frame_size_compatible */
298
299
300static void
301ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
302{
303	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
304
305	// XXX clear multicast addresses
306
307	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
308
309	vf->api_ver = IXGBE_API_VER_UNKNOWN;
310} /* ixgbe_process_vf_reset */
311
312
313static void
314ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
315{
316	struct ixgbe_hw *hw;
317	uint32_t vf_index, vfte;
318
319	hw = &adapter->hw;
320
321	vf_index = IXGBE_VF_INDEX(vf->pool);
322	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
323	vfte |= IXGBE_VF_BIT(vf->pool);
324	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
325} /* ixgbe_vf_enable_transmit */
326
327
328static void
329ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
330{
331	struct ixgbe_hw *hw;
332	uint32_t vf_index, vfre;
333
334	hw = &adapter->hw;
335
336	vf_index = IXGBE_VF_INDEX(vf->pool);
337	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
338	if (ixgbe_vf_frame_size_compatible(adapter, vf))
339		vfre |= IXGBE_VF_BIT(vf->pool);
340	else
341		vfre &= ~IXGBE_VF_BIT(vf->pool);
342	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
343} /* ixgbe_vf_enable_receive */
344
345
346static void
347ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
348{
349	struct ixgbe_hw *hw;
350	uint32_t ack;
351	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
352
353	hw = &adapter->hw;
354
355	ixgbe_process_vf_reset(adapter, vf);
356
357	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
358		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
359		    vf->pool, TRUE);
360		ack = IXGBE_VT_MSGTYPE_ACK;
361	} else
362		ack = IXGBE_VT_MSGTYPE_NACK;
363
364	ixgbe_vf_enable_transmit(adapter, vf);
365	ixgbe_vf_enable_receive(adapter, vf);
366
367	vf->flags |= IXGBE_VF_CTS;
368
369	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
370	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
371	resp[3] = hw->mac.mc_filter_type;
372	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
373} /* ixgbe_vf_reset_msg */
374
375
376static void
377ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
378{
379	uint8_t *mac;
380
381	mac = (uint8_t*)&msg[1];
382
383	/* Check that the VF has permission to change the MAC address. */
384	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
385		ixgbe_send_vf_nack(adapter, vf, msg[0]);
386		return;
387	}
388
389	if (ixgbe_validate_mac_addr(mac) != 0) {
390		ixgbe_send_vf_nack(adapter, vf, msg[0]);
391		return;
392	}
393
394	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
395
396	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
397	    TRUE);
398
399	ixgbe_send_vf_ack(adapter, vf, msg[0]);
400} /* ixgbe_vf_set_mac */
401
402
403/*
404 * VF multicast addresses are set by using the appropriate bit in
405 * 1 of 128 32 bit addresses (4096 possible).
406 */
407static void
408ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
409{
410	u16	*list = (u16*)&msg[1];
411	int	entries;
412	u32	vmolr, vec_bit, vec_reg, mta_reg;
413
414	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
415	entries = min(entries, IXGBE_MAX_VF_MC);
416
417	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
418
419	vf->num_mc_hashes = entries;
420
421	/* Set the appropriate MTA bit */
422	for (int i = 0; i < entries; i++) {
423		vf->mc_hash[i] = list[i];
424		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
425		vec_bit = vf->mc_hash[i] & 0x1F;
426		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
427		mta_reg |= (1 << vec_bit);
428		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
429	}
430
431	vmolr |= IXGBE_VMOLR_ROMPE;
432	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
433	ixgbe_send_vf_ack(adapter, vf, msg[0]);
434	return;
435} /* ixgbe_vf_set_mc_addr */
436
437
438static void
439ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
440{
441	struct ixgbe_hw *hw;
442	int enable;
443	uint16_t tag;
444
445	hw = &adapter->hw;
446	enable = IXGBE_VT_MSGINFO(msg[0]);
447	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
448
449	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
450		ixgbe_send_vf_nack(adapter, vf, msg[0]);
451		return;
452	}
453
454	/* It is illegal to enable vlan tag 0. */
455	if (tag == 0 && enable != 0){
456		ixgbe_send_vf_nack(adapter, vf, msg[0]);
457		return;
458	}
459
460	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
461	ixgbe_send_vf_ack(adapter, vf, msg[0]);
462} /* ixgbe_vf_set_vlan */
463
464
465static void
466ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
467{
468	struct ixgbe_hw *hw;
469	uint32_t vf_max_size, pf_max_size, mhadd;
470
471	hw = &adapter->hw;
472	vf_max_size = msg[1];
473
474	if (vf_max_size < ETHER_CRC_LEN) {
475		/* We intentionally ACK invalid LPE requests. */
476		ixgbe_send_vf_ack(adapter, vf, msg[0]);
477		return;
478	}
479
480	vf_max_size -= ETHER_CRC_LEN;
481
482	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
483		/* We intentionally ACK invalid LPE requests. */
484		ixgbe_send_vf_ack(adapter, vf, msg[0]);
485		return;
486	}
487
488	vf->max_frame_size = vf_max_size;
489	ixgbe_update_max_frame(adapter, vf->max_frame_size);
490
491	/*
492	 * We might have to disable reception to this VF if the frame size is
493	 * not compatible with the config on the PF.
494	 */
495	ixgbe_vf_enable_receive(adapter, vf);
496
497	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
498	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
499
500	if (pf_max_size < adapter->max_frame_size) {
501		mhadd &= ~IXGBE_MHADD_MFS_MASK;
502		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
503		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
504	}
505
506	ixgbe_send_vf_ack(adapter, vf, msg[0]);
507} /* ixgbe_vf_set_lpe */
508
509
510static void
511ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
512                     uint32_t *msg)
513{
514	//XXX implement this
515	ixgbe_send_vf_nack(adapter, vf, msg[0]);
516} /* ixgbe_vf_set_macvlan */
517
518
519static void
520ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
521    uint32_t *msg)
522{
523
524	switch (msg[1]) {
525	case IXGBE_API_VER_1_0:
526	case IXGBE_API_VER_1_1:
527		vf->api_ver = msg[1];
528		ixgbe_send_vf_ack(adapter, vf, msg[0]);
529		break;
530	default:
531		vf->api_ver = IXGBE_API_VER_UNKNOWN;
532		ixgbe_send_vf_nack(adapter, vf, msg[0]);
533		break;
534	}
535} /* ixgbe_vf_api_negotiate */
536
537
538static void
539ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
540{
541	struct ixgbe_hw *hw;
542	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
543	int num_queues;
544
545	hw = &adapter->hw;
546
547	/* GET_QUEUES is not supported on pre-1.1 APIs. */
548	switch (msg[0]) {
549	case IXGBE_API_VER_1_0:
550	case IXGBE_API_VER_UNKNOWN:
551		ixgbe_send_vf_nack(adapter, vf, msg[0]);
552		return;
553	}
554
555	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
556	    IXGBE_VT_MSGTYPE_CTS;
557
558	num_queues = ixgbe_vf_queues(adapter->iov_mode);
559	resp[IXGBE_VF_TX_QUEUES] = num_queues;
560	resp[IXGBE_VF_RX_QUEUES] = num_queues;
561	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
562	resp[IXGBE_VF_DEF_QUEUE] = 0;
563
564	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
565} /* ixgbe_vf_get_queues */
566
567
568static void
569ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
570{
571	struct ixgbe_hw *hw;
572	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
573	int error;
574
575	hw = &adapter->hw;
576
577	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
578
579	if (error != 0)
580		return;
581
582	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
583	    adapter->ifp->if_xname, msg[0], vf->pool);
584	if (msg[0] == IXGBE_VF_RESET) {
585		ixgbe_vf_reset_msg(adapter, vf, msg);
586		return;
587	}
588
589	if (!(vf->flags & IXGBE_VF_CTS)) {
590		ixgbe_send_vf_nack(adapter, vf, msg[0]);
591		return;
592	}
593
594	switch (msg[0] & IXGBE_VT_MSG_MASK) {
595	case IXGBE_VF_SET_MAC_ADDR:
596		ixgbe_vf_set_mac(adapter, vf, msg);
597		break;
598	case IXGBE_VF_SET_MULTICAST:
599		ixgbe_vf_set_mc_addr(adapter, vf, msg);
600		break;
601	case IXGBE_VF_SET_VLAN:
602		ixgbe_vf_set_vlan(adapter, vf, msg);
603		break;
604	case IXGBE_VF_SET_LPE:
605		ixgbe_vf_set_lpe(adapter, vf, msg);
606		break;
607	case IXGBE_VF_SET_MACVLAN:
608		ixgbe_vf_set_macvlan(adapter, vf, msg);
609		break;
610	case IXGBE_VF_API_NEGOTIATE:
611		ixgbe_vf_api_negotiate(adapter, vf, msg);
612		break;
613	case IXGBE_VF_GET_QUEUES:
614		ixgbe_vf_get_queues(adapter, vf, msg);
615		break;
616	default:
617		ixgbe_send_vf_nack(adapter, vf, msg[0]);
618	}
619} /* ixgbe_process_vf_msg */
620
621
622/* Tasklet for handling VF -> PF mailbox messages */
623void
624ixgbe_handle_mbx(void *context, int pending)
625{
626	struct adapter *adapter;
627	struct ixgbe_hw *hw;
628	struct ixgbe_vf *vf;
629	int i;
630
631	adapter = context;
632	hw = &adapter->hw;
633
634	IXGBE_CORE_LOCK(adapter);
635	for (i = 0; i < adapter->num_vfs; i++) {
636		vf = &adapter->vfs[i];
637
638		if (vf->flags & IXGBE_VF_ACTIVE) {
639			if (ixgbe_check_for_rst(hw, vf->pool) == 0)
640				ixgbe_process_vf_reset(adapter, vf);
641
642			if (ixgbe_check_for_msg(hw, vf->pool) == 0)
643				ixgbe_process_vf_msg(adapter, vf);
644
645			if (ixgbe_check_for_ack(hw, vf->pool) == 0)
646				ixgbe_process_vf_ack(adapter, vf);
647		}
648	}
649	IXGBE_CORE_UNLOCK(adapter);
650} /* ixgbe_handle_mbx */
651
652int
653ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
654{
655	struct adapter *adapter;
656	int retval = 0;
657
658	adapter = device_get_softc(dev);
659	adapter->iov_mode = IXGBE_NO_VM;
660	adapter->num_vfs = num_vfs;
661
662	if (adapter->num_vfs == 0) {
663		/* Would we ever get num_vfs = 0? */
664		retval = EINVAL;
665		goto err_init_iov;
666	}
667
668	if (adapter->num_queues <= 2)
669		adapter->iov_mode = IXGBE_64_VM;
670	else if (adapter->num_queues <= 4)
671		adapter->iov_mode = IXGBE_32_VM;
672	else {
673		retval = EINVAL;
674		goto err_init_iov;
675	}
676
677	/* Reserve 1 VM's worth of queues for the PF */
678	adapter->pool = adapter->iov_mode - 1;
679
680	if (num_vfs > adapter->pool) {
681		retval = ENOSPC;
682		goto err_init_iov;
683	}
684
685	IXGBE_CORE_LOCK(adapter);
686
687	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
688	    M_NOWAIT | M_ZERO);
689
690	if (adapter->vfs == NULL) {
691		retval = ENOMEM;
692		IXGBE_CORE_UNLOCK(adapter);
693		goto err_init_iov;
694	}
695
696	ixgbe_init_locked(adapter);
697	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
698
699	IXGBE_CORE_UNLOCK(adapter);
700
701	return retval;
702
703err_init_iov:
704	adapter->num_vfs = 0;
705	adapter->pool = 0;
706	adapter->iov_mode = IXGBE_NO_VM;
707
708	return retval;
709} /* ixgbe_init_iov */
710
711void
712ixgbe_uninit_iov(device_t dev)
713{
714	struct ixgbe_hw *hw;
715	struct adapter *adapter;
716	uint32_t pf_reg, vf_reg;
717
718	adapter = device_get_softc(dev);
719	hw = &adapter->hw;
720
721	IXGBE_CORE_LOCK(adapter);
722
723	/* Enable rx/tx for the PF and disable it for all VFs. */
724	pf_reg = IXGBE_VF_INDEX(adapter->pool);
725	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
726	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
727
728	if (pf_reg == 0)
729		vf_reg = 1;
730	else
731		vf_reg = 0;
732	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
733	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
734
735	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
736
737	free(adapter->vfs, M_IXGBE);
738	adapter->vfs = NULL;
739	adapter->num_vfs = 0;
740	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
741
742	IXGBE_CORE_UNLOCK(adapter);
743} /* ixgbe_uninit_iov */
744
745static void
746ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
747{
748	struct ixgbe_hw *hw;
749	uint32_t vf_index, pfmbimr;
750
751	IXGBE_CORE_LOCK_ASSERT(adapter);
752
753	hw = &adapter->hw;
754
755	if (!(vf->flags & IXGBE_VF_ACTIVE))
756		return;
757
758	vf_index = IXGBE_VF_INDEX(vf->pool);
759	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
760	pfmbimr |= IXGBE_VF_BIT(vf->pool);
761	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
762
763	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
764
765	// XXX multicast addresses
766
767	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
768		ixgbe_set_rar(&adapter->hw, vf->rar_index,
769		    vf->ether_addr, vf->pool, TRUE);
770	}
771
772	ixgbe_vf_enable_transmit(adapter, vf);
773	ixgbe_vf_enable_receive(adapter, vf);
774
775	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
776} /* ixgbe_init_vf */
777
778void
779ixgbe_initialize_iov(struct adapter *adapter)
780{
781	struct ixgbe_hw *hw = &adapter->hw;
782	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
783	int i;
784
785	if (adapter->iov_mode == IXGBE_NO_VM)
786		return;
787
788	IXGBE_CORE_LOCK_ASSERT(adapter);
789
790	/* RMW appropriate registers based on IOV mode */
791	/* Read... */
792	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
793	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
794	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
795	/* Modify... */
796	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
797	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
798	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
799	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
800	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
801	switch (adapter->iov_mode) {
802	case IXGBE_64_VM:
803		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
804		mtqc    |= IXGBE_MTQC_64VF;
805		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
806		gpie    |= IXGBE_GPIE_VTMODE_64;
807		break;
808	case IXGBE_32_VM:
809		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
810		mtqc    |= IXGBE_MTQC_32VF;
811		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
812		gpie    |= IXGBE_GPIE_VTMODE_32;
813		break;
814	default:
815		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
816	}
817	/* Write... */
818	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
819	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
820	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
821	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
822
823	/* Enable rx/tx for the PF. */
824	vf_reg = IXGBE_VF_INDEX(adapter->pool);
825	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
826	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
827
828	/* Allow VM-to-VM communication. */
829	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
830
831	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
832	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
833	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
834
835	for (i = 0; i < adapter->num_vfs; i++)
836		ixgbe_init_vf(adapter, &adapter->vfs[i]);
837} /* ixgbe_initialize_iov */
838
839
840/* Check the max frame setting of all active VF's */
841void
842ixgbe_recalculate_max_frame(struct adapter *adapter)
843{
844	struct ixgbe_vf *vf;
845
846	IXGBE_CORE_LOCK_ASSERT(adapter);
847
848	for (int i = 0; i < adapter->num_vfs; i++) {
849		vf = &adapter->vfs[i];
850		if (vf->flags & IXGBE_VF_ACTIVE)
851			ixgbe_update_max_frame(adapter, vf->max_frame_size);
852	}
853} /* ixgbe_recalculate_max_frame */
854
855int
856ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
857{
858	struct adapter *adapter;
859	struct ixgbe_vf *vf;
860	const void *mac;
861
862	adapter = device_get_softc(dev);
863
864	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
865	    vfnum, adapter->num_vfs));
866
867	IXGBE_CORE_LOCK(adapter);
868	vf = &adapter->vfs[vfnum];
869	vf->pool= vfnum;
870
871	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
872	vf->rar_index = vfnum + 1;
873	vf->default_vlan = 0;
874	vf->max_frame_size = ETHER_MAX_LEN;
875	ixgbe_update_max_frame(adapter, vf->max_frame_size);
876
877	if (nvlist_exists_binary(config, "mac-addr")) {
878		mac = nvlist_get_binary(config, "mac-addr", NULL);
879		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
880		if (nvlist_get_bool(config, "allow-set-mac"))
881			vf->flags |= IXGBE_VF_CAP_MAC;
882	} else
883		/*
884		 * If the administrator has not specified a MAC address then
885		 * we must allow the VF to choose one.
886		 */
887		vf->flags |= IXGBE_VF_CAP_MAC;
888
889	vf->flags = IXGBE_VF_ACTIVE;
890
891	ixgbe_init_vf(adapter, vf);
892	IXGBE_CORE_UNLOCK(adapter);
893
894	return (0);
895} /* ixgbe_add_vf */
896
897#else
898
899void
900ixgbe_handle_mbx(void *context, int pending)
901{
902	UNREFERENCED_2PARAMETER(context, pending);
903} /* ixgbe_handle_mbx */
904
905#endif
906