1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_sriov.c 347419 2019-05-10 00:46:43Z erj $*/
34
35#include "ixgbe.h"
36
37#ifdef PCI_IOV
38
39MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
40
41/************************************************************************
42 * ixgbe_pci_iov_detach
43 ************************************************************************/
44int
45ixgbe_pci_iov_detach(device_t dev)
46{
47	return pci_iov_detach(dev);
48}
49
50/************************************************************************
51 * ixgbe_define_iov_schemas
52 ************************************************************************/
53void
54ixgbe_define_iov_schemas(device_t dev, int *error)
55{
56	nvlist_t *pf_schema, *vf_schema;
57
58	pf_schema = pci_iov_schema_alloc_node();
59	vf_schema = pci_iov_schema_alloc_node();
60	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
61	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
62	    IOV_SCHEMA_HASDEFAULT, TRUE);
63	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
64	    IOV_SCHEMA_HASDEFAULT, FALSE);
65	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
66	    IOV_SCHEMA_HASDEFAULT, FALSE);
67	*error = pci_iov_attach(dev, pf_schema, vf_schema);
68	if (*error != 0) {
69		device_printf(dev,
70		    "Error %d setting up SR-IOV\n", *error);
71	}
72} /* ixgbe_define_iov_schemas */
73
74/************************************************************************
75 * ixgbe_align_all_queue_indices
76 ************************************************************************/
77inline void
78ixgbe_align_all_queue_indices(struct adapter *adapter)
79{
80	int i;
81	int index;
82
83	for (i = 0; i < adapter->num_queues; i++) {
84		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
85		adapter->rx_rings[i].me = index;
86		adapter->tx_rings[i].me = index;
87	}
88}
89
90/* Support functions for SR-IOV/VF management */
91static inline void
92ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
93{
94	if (vf->flags & IXGBE_VF_CTS)
95		msg |= IXGBE_VT_MSGTYPE_CTS;
96
97	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
98}
99
100static inline void
101ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
102{
103	msg &= IXGBE_VT_MSG_MASK;
104	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
105}
106
107static inline void
108ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
109{
110	msg &= IXGBE_VT_MSG_MASK;
111	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
112}
113
114static inline void
115ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
116{
117	if (!(vf->flags & IXGBE_VF_CTS))
118		ixgbe_send_vf_nack(adapter, vf, 0);
119}
120
121static inline boolean_t
122ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
123{
124	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
125}
126
127static inline int
128ixgbe_vf_queues(int mode)
129{
130	switch (mode) {
131	case IXGBE_64_VM:
132		return (2);
133	case IXGBE_32_VM:
134		return (4);
135	case IXGBE_NO_VM:
136	default:
137		return (0);
138	}
139}
140
141inline int
142ixgbe_vf_que_index(int mode, int vfnum, int num)
143{
144	return ((vfnum * ixgbe_vf_queues(mode)) + num);
145}
146
147static inline void
148ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
149{
150	if (adapter->max_frame_size < max_frame)
151		adapter->max_frame_size = max_frame;
152}
153
154inline u32
155ixgbe_get_mrqc(int iov_mode)
156{
157	u32 mrqc;
158
159	switch (iov_mode) {
160	case IXGBE_64_VM:
161		mrqc = IXGBE_MRQC_VMDQRSS64EN;
162		break;
163	case IXGBE_32_VM:
164		mrqc = IXGBE_MRQC_VMDQRSS32EN;
165		break;
166	case IXGBE_NO_VM:
167		mrqc = 0;
168		break;
169	default:
170		panic("Unexpected SR-IOV mode %d", iov_mode);
171	}
172
173	return mrqc;
174}
175
176
177inline u32
178ixgbe_get_mtqc(int iov_mode)
179{
180	uint32_t mtqc;
181
182	switch (iov_mode) {
183	case IXGBE_64_VM:
184		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
185		break;
186	case IXGBE_32_VM:
187		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
188		break;
189	case IXGBE_NO_VM:
190		mtqc = IXGBE_MTQC_64Q_1PB;
191		break;
192	default:
193		panic("Unexpected SR-IOV mode %d", iov_mode);
194	}
195
196	return mtqc;
197}
198
199void
200ixgbe_ping_all_vfs(struct adapter *adapter)
201{
202	struct ixgbe_vf *vf;
203
204	for (int i = 0; i < adapter->num_vfs; i++) {
205		vf = &adapter->vfs[i];
206		if (vf->flags & IXGBE_VF_ACTIVE)
207			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
208	}
209} /* ixgbe_ping_all_vfs */
210
211
212static void
213ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
214                          uint16_t tag)
215{
216	struct ixgbe_hw *hw;
217	uint32_t vmolr, vmvir;
218
219	hw = &adapter->hw;
220
221	vf->vlan_tag = tag;
222
223	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
224
225	/* Do not receive packets that pass inexact filters. */
226	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
227
228	/* Disable Multicast Promicuous Mode. */
229	vmolr &= ~IXGBE_VMOLR_MPE;
230
231	/* Accept broadcasts. */
232	vmolr |= IXGBE_VMOLR_BAM;
233
234	if (tag == 0) {
235		/* Accept non-vlan tagged traffic. */
236		//vmolr |= IXGBE_VMOLR_AUPE;
237
238		/* Allow VM to tag outgoing traffic; no default tag. */
239		vmvir = 0;
240	} else {
241		/* Require vlan-tagged traffic. */
242		vmolr &= ~IXGBE_VMOLR_AUPE;
243
244		/* Tag all traffic with provided vlan tag. */
245		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
246	}
247	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
248	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
249} /* ixgbe_vf_set_default_vlan */
250
251
252static boolean_t
253ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
254{
255
256	/*
257	 * Frame size compatibility between PF and VF is only a problem on
258	 * 82599-based cards.  X540 and later support any combination of jumbo
259	 * frames on PFs and VFs.
260	 */
261	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
262		return (TRUE);
263
264	switch (vf->api_ver) {
265	case IXGBE_API_VER_1_0:
266	case IXGBE_API_VER_UNKNOWN:
267		/*
268		 * On legacy (1.0 and older) VF versions, we don't support jumbo
269		 * frames on either the PF or the VF.
270		 */
271		if (adapter->max_frame_size > ETHER_MAX_LEN ||
272		    vf->max_frame_size > ETHER_MAX_LEN)
273			return (FALSE);
274
275		return (TRUE);
276
277		break;
278	case IXGBE_API_VER_1_1:
279	default:
280		/*
281		 * 1.1 or later VF versions always work if they aren't using
282		 * jumbo frames.
283		 */
284		if (vf->max_frame_size <= ETHER_MAX_LEN)
285			return (TRUE);
286
287		/*
288		 * Jumbo frames only work with VFs if the PF is also using jumbo
289		 * frames.
290		 */
291		if (adapter->max_frame_size <= ETHER_MAX_LEN)
292			return (TRUE);
293
294		return (FALSE);
295
296	}
297} /* ixgbe_vf_frame_size_compatible */
298
299
300static void
301ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
302{
303	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
304
305	// XXX clear multicast addresses
306
307	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
308
309	vf->api_ver = IXGBE_API_VER_UNKNOWN;
310} /* ixgbe_process_vf_reset */
311
312
313static void
314ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
315{
316	struct ixgbe_hw *hw;
317	uint32_t vf_index, vfte;
318
319	hw = &adapter->hw;
320
321	vf_index = IXGBE_VF_INDEX(vf->pool);
322	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
323	vfte |= IXGBE_VF_BIT(vf->pool);
324	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
325} /* ixgbe_vf_enable_transmit */
326
327
328static void
329ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
330{
331	struct ixgbe_hw *hw;
332	uint32_t vf_index, vfre;
333
334	hw = &adapter->hw;
335
336	vf_index = IXGBE_VF_INDEX(vf->pool);
337	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
338	if (ixgbe_vf_frame_size_compatible(adapter, vf))
339		vfre |= IXGBE_VF_BIT(vf->pool);
340	else
341		vfre &= ~IXGBE_VF_BIT(vf->pool);
342	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
343} /* ixgbe_vf_enable_receive */
344
345
346static void
347ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
348{
349	struct ixgbe_hw *hw;
350	uint32_t ack;
351	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
352
353	hw = &adapter->hw;
354
355	ixgbe_process_vf_reset(adapter, vf);
356
357	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
358		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
359		    vf->pool, TRUE);
360		ack = IXGBE_VT_MSGTYPE_ACK;
361	} else
362		ack = IXGBE_VT_MSGTYPE_NACK;
363
364	ixgbe_vf_enable_transmit(adapter, vf);
365	ixgbe_vf_enable_receive(adapter, vf);
366
367	vf->flags |= IXGBE_VF_CTS;
368
369	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
370	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
371	resp[3] = hw->mac.mc_filter_type;
372	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
373} /* ixgbe_vf_reset_msg */
374
375
376static void
377ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
378{
379	uint8_t *mac;
380
381	mac = (uint8_t*)&msg[1];
382
383	/* Check that the VF has permission to change the MAC address. */
384	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
385		ixgbe_send_vf_nack(adapter, vf, msg[0]);
386		return;
387	}
388
389	if (ixgbe_validate_mac_addr(mac) != 0) {
390		ixgbe_send_vf_nack(adapter, vf, msg[0]);
391		return;
392	}
393
394	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
395
396	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
397	    TRUE);
398
399	ixgbe_send_vf_ack(adapter, vf, msg[0]);
400} /* ixgbe_vf_set_mac */
401
402
403/*
404 * VF multicast addresses are set by using the appropriate bit in
405 * 1 of 128 32 bit addresses (4096 possible).
406 */
407static void
408ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
409{
410	u16	*list = (u16*)&msg[1];
411	int	entries;
412	u32	vmolr, vec_bit, vec_reg, mta_reg;
413
414	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
415	entries = min(entries, IXGBE_MAX_VF_MC);
416
417	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
418
419	vf->num_mc_hashes = entries;
420
421	/* Set the appropriate MTA bit */
422	for (int i = 0; i < entries; i++) {
423		vf->mc_hash[i] = list[i];
424		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
425		vec_bit = vf->mc_hash[i] & 0x1F;
426		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
427		mta_reg |= (1 << vec_bit);
428		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
429	}
430
431	vmolr |= IXGBE_VMOLR_ROMPE;
432	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
433	ixgbe_send_vf_ack(adapter, vf, msg[0]);
434} /* ixgbe_vf_set_mc_addr */
435
436
437static void
438ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
439{
440	struct ixgbe_hw *hw;
441	int enable;
442	uint16_t tag;
443
444	hw = &adapter->hw;
445	enable = IXGBE_VT_MSGINFO(msg[0]);
446	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
447
448	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
449		ixgbe_send_vf_nack(adapter, vf, msg[0]);
450		return;
451	}
452
453	/* It is illegal to enable vlan tag 0. */
454	if (tag == 0 && enable != 0){
455		ixgbe_send_vf_nack(adapter, vf, msg[0]);
456		return;
457	}
458
459	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
460	ixgbe_send_vf_ack(adapter, vf, msg[0]);
461} /* ixgbe_vf_set_vlan */
462
463
464static void
465ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
466{
467	struct ixgbe_hw *hw;
468	uint32_t vf_max_size, pf_max_size, mhadd;
469
470	hw = &adapter->hw;
471	vf_max_size = msg[1];
472
473	if (vf_max_size < ETHER_CRC_LEN) {
474		/* We intentionally ACK invalid LPE requests. */
475		ixgbe_send_vf_ack(adapter, vf, msg[0]);
476		return;
477	}
478
479	vf_max_size -= ETHER_CRC_LEN;
480
481	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
482		/* We intentionally ACK invalid LPE requests. */
483		ixgbe_send_vf_ack(adapter, vf, msg[0]);
484		return;
485	}
486
487	vf->max_frame_size = vf_max_size;
488	ixgbe_update_max_frame(adapter, vf->max_frame_size);
489
490	/*
491	 * We might have to disable reception to this VF if the frame size is
492	 * not compatible with the config on the PF.
493	 */
494	ixgbe_vf_enable_receive(adapter, vf);
495
496	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
497	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
498
499	if (pf_max_size < adapter->max_frame_size) {
500		mhadd &= ~IXGBE_MHADD_MFS_MASK;
501		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
502		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
503	}
504
505	ixgbe_send_vf_ack(adapter, vf, msg[0]);
506} /* ixgbe_vf_set_lpe */
507
508
509static void
510ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
511                     uint32_t *msg)
512{
513	//XXX implement this
514	ixgbe_send_vf_nack(adapter, vf, msg[0]);
515} /* ixgbe_vf_set_macvlan */
516
517
518static void
519ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
520    uint32_t *msg)
521{
522
523	switch (msg[1]) {
524	case IXGBE_API_VER_1_0:
525	case IXGBE_API_VER_1_1:
526		vf->api_ver = msg[1];
527		ixgbe_send_vf_ack(adapter, vf, msg[0]);
528		break;
529	default:
530		vf->api_ver = IXGBE_API_VER_UNKNOWN;
531		ixgbe_send_vf_nack(adapter, vf, msg[0]);
532		break;
533	}
534} /* ixgbe_vf_api_negotiate */
535
536
537static void
538ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
539{
540	struct ixgbe_hw *hw;
541	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
542	int num_queues;
543
544	hw = &adapter->hw;
545
546	/* GET_QUEUES is not supported on pre-1.1 APIs. */
547	switch (msg[0]) {
548	case IXGBE_API_VER_1_0:
549	case IXGBE_API_VER_UNKNOWN:
550		ixgbe_send_vf_nack(adapter, vf, msg[0]);
551		return;
552	}
553
554	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
555	    IXGBE_VT_MSGTYPE_CTS;
556
557	num_queues = ixgbe_vf_queues(adapter->iov_mode);
558	resp[IXGBE_VF_TX_QUEUES] = num_queues;
559	resp[IXGBE_VF_RX_QUEUES] = num_queues;
560	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
561	resp[IXGBE_VF_DEF_QUEUE] = 0;
562
563	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
564} /* ixgbe_vf_get_queues */
565
566
567static void
568ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
569{
570	struct ixgbe_hw *hw;
571	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
572	int error;
573
574	hw = &adapter->hw;
575
576	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
577
578	if (error != 0)
579		return;
580
581	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
582	    adapter->ifp->if_xname, msg[0], vf->pool);
583	if (msg[0] == IXGBE_VF_RESET) {
584		ixgbe_vf_reset_msg(adapter, vf, msg);
585		return;
586	}
587
588	if (!(vf->flags & IXGBE_VF_CTS)) {
589		ixgbe_send_vf_nack(adapter, vf, msg[0]);
590		return;
591	}
592
593	switch (msg[0] & IXGBE_VT_MSG_MASK) {
594	case IXGBE_VF_SET_MAC_ADDR:
595		ixgbe_vf_set_mac(adapter, vf, msg);
596		break;
597	case IXGBE_VF_SET_MULTICAST:
598		ixgbe_vf_set_mc_addr(adapter, vf, msg);
599		break;
600	case IXGBE_VF_SET_VLAN:
601		ixgbe_vf_set_vlan(adapter, vf, msg);
602		break;
603	case IXGBE_VF_SET_LPE:
604		ixgbe_vf_set_lpe(adapter, vf, msg);
605		break;
606	case IXGBE_VF_SET_MACVLAN:
607		ixgbe_vf_set_macvlan(adapter, vf, msg);
608		break;
609	case IXGBE_VF_API_NEGOTIATE:
610		ixgbe_vf_api_negotiate(adapter, vf, msg);
611		break;
612	case IXGBE_VF_GET_QUEUES:
613		ixgbe_vf_get_queues(adapter, vf, msg);
614		break;
615	default:
616		ixgbe_send_vf_nack(adapter, vf, msg[0]);
617	}
618} /* ixgbe_process_vf_msg */
619
620
621/* Tasklet for handling VF -> PF mailbox messages */
622void
623ixgbe_handle_mbx(void *context)
624{
625	struct adapter *adapter;
626	struct ixgbe_hw *hw;
627	struct ixgbe_vf *vf;
628	int i;
629
630	adapter = context;
631	hw = &adapter->hw;
632
633	IXGBE_CORE_LOCK(adapter);
634	for (i = 0; i < adapter->num_vfs; i++) {
635		vf = &adapter->vfs[i];
636
637		if (vf->flags & IXGBE_VF_ACTIVE) {
638			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
639				ixgbe_process_vf_reset(adapter, vf);
640
641			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
642				ixgbe_process_vf_msg(adapter, vf);
643
644			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
645				ixgbe_process_vf_ack(adapter, vf);
646		}
647	}
648	IXGBE_CORE_UNLOCK(adapter);
649} /* ixgbe_handle_mbx */
650
651int
652ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
653{
654	struct adapter *adapter;
655	int retval = 0;
656
657	adapter = device_get_softc(dev);
658	adapter->iov_mode = IXGBE_NO_VM;
659
660	if (num_vfs == 0) {
661		/* Would we ever get num_vfs = 0? */
662		retval = EINVAL;
663		goto err_init_iov;
664	}
665
666	/*
667	 * We've got to reserve a VM's worth of queues for the PF,
668	 * thus we go into "64 VF mode" if 32+ VFs are requested.
669	 * With 64 VFs, you can only have two queues per VF.
670	 * With 32 VFs, you can have up to four queues per VF.
671	 */
672	if (num_vfs >= IXGBE_32_VM)
673		adapter->iov_mode = IXGBE_64_VM;
674	else
675		adapter->iov_mode = IXGBE_32_VM;
676
677	/* Again, reserving 1 VM's worth of queues for the PF */
678	adapter->pool = adapter->iov_mode - 1;
679
680	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
681		retval = ENOSPC;
682		goto err_init_iov;
683	}
684
685	IXGBE_CORE_LOCK(adapter);
686
687	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
688	    M_NOWAIT | M_ZERO);
689
690	if (adapter->vfs == NULL) {
691		retval = ENOMEM;
692		IXGBE_CORE_UNLOCK(adapter);
693		goto err_init_iov;
694	}
695
696	adapter->num_vfs = num_vfs;
697	adapter->init_locked(adapter);
698	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
699
700	IXGBE_CORE_UNLOCK(adapter);
701
702	return retval;
703
704err_init_iov:
705	adapter->num_vfs = 0;
706	adapter->pool = 0;
707	adapter->iov_mode = IXGBE_NO_VM;
708
709	return retval;
710} /* ixgbe_init_iov */
711
712void
713ixgbe_uninit_iov(device_t dev)
714{
715	struct ixgbe_hw *hw;
716	struct adapter *adapter;
717	uint32_t pf_reg, vf_reg;
718
719	adapter = device_get_softc(dev);
720	hw = &adapter->hw;
721
722	IXGBE_CORE_LOCK(adapter);
723
724	/* Enable rx/tx for the PF and disable it for all VFs. */
725	pf_reg = IXGBE_VF_INDEX(adapter->pool);
726	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
727	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
728
729	if (pf_reg == 0)
730		vf_reg = 1;
731	else
732		vf_reg = 0;
733	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
734	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
735
736	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
737
738	free(adapter->vfs, M_IXGBE_SRIOV);
739	adapter->vfs = NULL;
740	adapter->num_vfs = 0;
741	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
742
743	IXGBE_CORE_UNLOCK(adapter);
744} /* ixgbe_uninit_iov */
745
746static void
747ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
748{
749	struct ixgbe_hw *hw;
750	uint32_t vf_index, pfmbimr;
751
752	IXGBE_CORE_LOCK_ASSERT(adapter);
753
754	hw = &adapter->hw;
755
756	if (!(vf->flags & IXGBE_VF_ACTIVE))
757		return;
758
759	vf_index = IXGBE_VF_INDEX(vf->pool);
760	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
761	pfmbimr |= IXGBE_VF_BIT(vf->pool);
762	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
763
764	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
765
766	// XXX multicast addresses
767
768	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
769		ixgbe_set_rar(&adapter->hw, vf->rar_index,
770		    vf->ether_addr, vf->pool, TRUE);
771	}
772
773	ixgbe_vf_enable_transmit(adapter, vf);
774	ixgbe_vf_enable_receive(adapter, vf);
775
776	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
777} /* ixgbe_init_vf */
778
779void
780ixgbe_initialize_iov(struct adapter *adapter)
781{
782	struct ixgbe_hw *hw = &adapter->hw;
783	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
784	int i;
785
786	if (adapter->iov_mode == IXGBE_NO_VM)
787		return;
788
789	IXGBE_CORE_LOCK_ASSERT(adapter);
790
791	/* RMW appropriate registers based on IOV mode */
792	/* Read... */
793	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
794	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
795	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
796	/* Modify... */
797	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
798	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
799	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
800	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
801	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
802	switch (adapter->iov_mode) {
803	case IXGBE_64_VM:
804		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
805		mtqc    |= IXGBE_MTQC_64VF;
806		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
807		gpie    |= IXGBE_GPIE_VTMODE_64;
808		break;
809	case IXGBE_32_VM:
810		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
811		mtqc    |= IXGBE_MTQC_32VF;
812		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
813		gpie    |= IXGBE_GPIE_VTMODE_32;
814		break;
815	default:
816		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
817	}
818	/* Write... */
819	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
820	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
821	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
822	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
823
824	/* Enable rx/tx for the PF. */
825	vf_reg = IXGBE_VF_INDEX(adapter->pool);
826	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
827	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
828
829	/* Allow VM-to-VM communication. */
830	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
831
832	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
833	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
834	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
835
836	for (i = 0; i < adapter->num_vfs; i++)
837		ixgbe_init_vf(adapter, &adapter->vfs[i]);
838} /* ixgbe_initialize_iov */
839
840
841/* Check the max frame setting of all active VF's */
842void
843ixgbe_recalculate_max_frame(struct adapter *adapter)
844{
845	struct ixgbe_vf *vf;
846
847	IXGBE_CORE_LOCK_ASSERT(adapter);
848
849	for (int i = 0; i < adapter->num_vfs; i++) {
850		vf = &adapter->vfs[i];
851		if (vf->flags & IXGBE_VF_ACTIVE)
852			ixgbe_update_max_frame(adapter, vf->max_frame_size);
853	}
854} /* ixgbe_recalculate_max_frame */
855
856int
857ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
858{
859	struct adapter *adapter;
860	struct ixgbe_vf *vf;
861	const void *mac;
862
863	adapter = device_get_softc(dev);
864
865	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
866	    vfnum, adapter->num_vfs));
867
868	IXGBE_CORE_LOCK(adapter);
869	vf = &adapter->vfs[vfnum];
870	vf->pool= vfnum;
871
872	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
873	vf->rar_index = vfnum + 1;
874	vf->default_vlan = 0;
875	vf->max_frame_size = ETHER_MAX_LEN;
876	ixgbe_update_max_frame(adapter, vf->max_frame_size);
877
878	if (nvlist_exists_binary(config, "mac-addr")) {
879		mac = nvlist_get_binary(config, "mac-addr", NULL);
880		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
881		if (nvlist_get_bool(config, "allow-set-mac"))
882			vf->flags |= IXGBE_VF_CAP_MAC;
883	} else
884		/*
885		 * If the administrator has not specified a MAC address then
886		 * we must allow the VF to choose one.
887		 */
888		vf->flags |= IXGBE_VF_CAP_MAC;
889
890	vf->flags |= IXGBE_VF_ACTIVE;
891
892	ixgbe_init_vf(adapter, vf);
893	IXGBE_CORE_UNLOCK(adapter);
894
895	return (0);
896} /* ixgbe_add_vf */
897
898#else
899
900void
901ixgbe_handle_mbx(void *context)
902{
903	UNREFERENCED_1PARAMETER(context);
904} /* ixgbe_handle_mbx */
905
906inline int
907ixgbe_vf_que_index(int mode, int vfnum, int num)
908{
909	UNREFERENCED_2PARAMETER(mode, vfnum);
910
911	return num;
912} /* ixgbe_vf_que_index */
913
914#endif
915