1/* SPDX-License-Identifier: BSD-3-Clause */
2/*  Copyright (c) 2021, Intel Corporation
3 *  All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions are met:
7 *
8 *   1. Redistributions of source code must retain the above copyright notice,
9 *      this list of conditions and the following disclaimer.
10 *
11 *   2. Redistributions in binary form must reproduce the above copyright
12 *      notice, this list of conditions and the following disclaimer in the
13 *      documentation and/or other materials provided with the distribution.
14 *
15 *   3. Neither the name of the Intel Corporation nor the names of its
16 *      contributors may be used to endorse or promote products derived from
17 *      this software without specific prior written permission.
18 *
19 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 *  POSSIBILITY OF SUCH DAMAGE.
30 */
31/*$FreeBSD$*/
32
33#include "ice_common.h"
34#include "ice_sriov.h"
35
36/**
37 * ice_aq_send_msg_to_vf
38 * @hw: pointer to the hardware structure
39 * @vfid: VF ID to send msg
40 * @v_opcode: opcodes for VF-PF communication
41 * @v_retval: return error code
42 * @msg: pointer to the msg buffer
43 * @msglen: msg length
44 * @cd: pointer to command details
45 *
46 * Send message to VF driver (0x0802) using mailbox
47 * queue and asynchronously sending message via
48 * ice_sq_send_cmd() function
49 */
50enum ice_status
51ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
52		      u8 *msg, u16 msglen, struct ice_sq_cd *cd)
53{
54	struct ice_aqc_pf_vf_msg *cmd;
55	struct ice_aq_desc desc;
56
57	ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
58
59	cmd = &desc.params.virt;
60	cmd->id = CPU_TO_LE32(vfid);
61
62	desc.cookie_high = CPU_TO_LE32(v_opcode);
63	desc.cookie_low = CPU_TO_LE32(v_retval);
64
65	if (msglen)
66		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
67
68	return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
69}
70
71/**
72 * ice_aq_send_msg_to_pf
73 * @hw: pointer to the hardware structure
74 * @v_opcode: opcodes for VF-PF communication
75 * @v_retval: return error code
76 * @msg: pointer to the msg buffer
77 * @msglen: msg length
78 * @cd: pointer to command details
79 *
80 * Send message to PF driver using mailbox queue. By default, this
81 * message is sent asynchronously, i.e. ice_sq_send_cmd()
82 * does not wait for completion before returning.
83 */
84enum ice_status
85ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
86		      enum ice_status v_retval, u8 *msg, u16 msglen,
87		      struct ice_sq_cd *cd)
88{
89	struct ice_aq_desc desc;
90
91	ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf);
92	desc.cookie_high = CPU_TO_LE32(v_opcode);
93	desc.cookie_low = CPU_TO_LE32(v_retval);
94
95	if (msglen)
96		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
97
98	return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
99}
100
101/**
102 * ice_conv_link_speed_to_virtchnl
103 * @adv_link_support: determines the format of the returned link speed
104 * @link_speed: variable containing the link_speed to be converted
105 *
106 * Convert link speed supported by HW to link speed supported by virtchnl.
107 * If adv_link_support is true, then return link speed in Mbps. Else return
108 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
109 * needs to cast back to an enum virtchnl_link_speed in the case where
110 * adv_link_support is false, but when adv_link_support is true the caller can
111 * expect the speed in Mbps.
112 */
113u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
114{
115	u32 speed;
116
117	if (adv_link_support)
118		switch (link_speed) {
119		case ICE_AQ_LINK_SPEED_10MB:
120			speed = ICE_LINK_SPEED_10MBPS;
121			break;
122		case ICE_AQ_LINK_SPEED_100MB:
123			speed = ICE_LINK_SPEED_100MBPS;
124			break;
125		case ICE_AQ_LINK_SPEED_1000MB:
126			speed = ICE_LINK_SPEED_1000MBPS;
127			break;
128		case ICE_AQ_LINK_SPEED_2500MB:
129			speed = ICE_LINK_SPEED_2500MBPS;
130			break;
131		case ICE_AQ_LINK_SPEED_5GB:
132			speed = ICE_LINK_SPEED_5000MBPS;
133			break;
134		case ICE_AQ_LINK_SPEED_10GB:
135			speed = ICE_LINK_SPEED_10000MBPS;
136			break;
137		case ICE_AQ_LINK_SPEED_20GB:
138			speed = ICE_LINK_SPEED_20000MBPS;
139			break;
140		case ICE_AQ_LINK_SPEED_25GB:
141			speed = ICE_LINK_SPEED_25000MBPS;
142			break;
143		case ICE_AQ_LINK_SPEED_40GB:
144			speed = ICE_LINK_SPEED_40000MBPS;
145			break;
146		case ICE_AQ_LINK_SPEED_50GB:
147			speed = ICE_LINK_SPEED_50000MBPS;
148			break;
149		case ICE_AQ_LINK_SPEED_100GB:
150			speed = ICE_LINK_SPEED_100000MBPS;
151			break;
152		default:
153			speed = ICE_LINK_SPEED_UNKNOWN;
154			break;
155		}
156	else
157		/* Virtchnl speeds are not defined for every speed supported in
158		 * the hardware. To maintain compatibility with older AVF
159		 * drivers, while reporting the speed the new speed values are
160		 * resolved to the closest known virtchnl speeds
161		 */
162		switch (link_speed) {
163		case ICE_AQ_LINK_SPEED_10MB:
164		case ICE_AQ_LINK_SPEED_100MB:
165			speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
166			break;
167		case ICE_AQ_LINK_SPEED_1000MB:
168		case ICE_AQ_LINK_SPEED_2500MB:
169		case ICE_AQ_LINK_SPEED_5GB:
170			speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
171			break;
172		case ICE_AQ_LINK_SPEED_10GB:
173			speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
174			break;
175		case ICE_AQ_LINK_SPEED_20GB:
176			speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
177			break;
178		case ICE_AQ_LINK_SPEED_25GB:
179			speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
180			break;
181		case ICE_AQ_LINK_SPEED_40GB:
182		case ICE_AQ_LINK_SPEED_50GB:
183		case ICE_AQ_LINK_SPEED_100GB:
184			speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
185			break;
186		default:
187			speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
188			break;
189		}
190
191	return speed;
192}
193
194/* The mailbox overflow detection algorithm helps to check if there
195 * is a possibility of a malicious VF transmitting too many MBX messages to the
196 * PF.
197 * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
198 * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
199 * The struct ice_mbx_snapshot helps to track and traverse a static window of
200 * messages within the mailbox queue while looking for a malicious VF.
201 *
202 * 2. When the caller starts processing its mailbox queue in response to an
203 * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
204 * the algorithm can be run for the first time for that interrupt. This can be
205 * done via ice_mbx_reset_snapshot().
206 *
207 * 3. For every message read by the caller from the MBX Queue, the caller must
208 * call the detection algorithm's entry function ice_mbx_vf_state_handler().
209 * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
210 * filled as it is required to be passed to the algorithm.
211 *
212 * 4. Every time a message is read from the MBX queue, a VFId is received which
213 * is passed to the state handler. The boolean output is_malvf of the state
214 * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
215 * whether this VF is malicious or not.
216 *
217 * 5. When a VF is identified to be malicious, the caller can send a message
218 * to the system administrator. The caller can invoke ice_mbx_report_malvf()
219 * to help determine if a malicious VF is to be reported or not. This function
220 * requires the caller to maintain a global bitmap to track all malicious VFs
221 * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
222 * to be malicious by ice_mbx_vf_state_handler().
223 *
224 * 6. The global bitmap maintained by PF can be cleared completely if PF is in
225 * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
226 * When a VF is shut down and brought back up, we assume that the new VF
227 * brought up is not malicious and hence report it if found malicious.
228 *
229 * 7. The function ice_mbx_reset_snapshot() is called to reset the information
230 * in ice_mbx_snapshot for every new mailbox interrupt handled.
231 *
232 * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
233 * when driver is unloaded.
234 */
235#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
236/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
237 * the max messages check must be ignored in the algorithm
238 */
239#define ICE_IGNORE_MAX_MSG_CNT	0xFFFF
240
241/**
242 * ice_mbx_traverse - Pass through mailbox snapshot
243 * @hw: pointer to the HW struct
244 * @new_state: new algorithm state
245 *
246 * Traversing the mailbox static snapshot without checking
247 * for malicious VFs.
248 */
249static void
250ice_mbx_traverse(struct ice_hw *hw,
251		 enum ice_mbx_snapshot_state *new_state)
252{
253	struct ice_mbx_snap_buffer_data *snap_buf;
254	u32 num_iterations;
255
256	snap_buf = &hw->mbx_snapshot.mbx_buf;
257
258	/* As mailbox buffer is circular, applying a mask
259	 * on the incremented iteration count.
260	 */
261	num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
262
263	/* Checking either of the below conditions to exit snapshot traversal:
264	 * Condition-1: If the number of iterations in the mailbox is equal to
265	 * the mailbox head which would indicate that we have reached the end
266	 * of the static snapshot.
267	 * Condition-2: If the maximum messages serviced in the mailbox for a
268	 * given interrupt is the highest possible value then there is no need
269	 * to check if the number of messages processed is equal to it. If not
270	 * check if the number of messages processed is greater than or equal
271	 * to the maximum number of mailbox entries serviced in current work item.
272	 */
273	if (num_iterations == snap_buf->head ||
274	    (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
275	     ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
276		*new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
277}
278
279/**
280 * ice_mbx_detect_malvf - Detect malicious VF in snapshot
281 * @hw: pointer to the HW struct
282 * @vf_id: relative virtual function ID
283 * @new_state: new algorithm state
284 * @is_malvf: boolean output to indicate if VF is malicious
285 *
286 * This function tracks the number of asynchronous messages
287 * sent per VF and marks the VF as malicious if it exceeds
288 * the permissible number of messages to send.
289 */
290static enum ice_status
291ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
292		     enum ice_mbx_snapshot_state *new_state,
293		     bool *is_malvf)
294{
295	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
296
297	if (vf_id >= snap->mbx_vf.vfcntr_len)
298		return ICE_ERR_OUT_OF_RANGE;
299
300	/* increment the message count in the VF array */
301	snap->mbx_vf.vf_cntr[vf_id]++;
302
303	if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
304		*is_malvf = true;
305
306	/* continue to iterate through the mailbox snapshot */
307	ice_mbx_traverse(hw, new_state);
308
309	return ICE_SUCCESS;
310}
311
312/**
313 * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
314 * @snap: pointer to mailbox snapshot structure in the ice_hw struct
315 *
316 * Reset the mailbox snapshot structure and clear VF counter array.
317 */
318static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
319{
320	u32 vfcntr_len;
321
322	if (!snap || !snap->mbx_vf.vf_cntr)
323		return;
324
325	/* Clear VF counters. */
326	vfcntr_len = snap->mbx_vf.vfcntr_len;
327	if (vfcntr_len)
328		ice_memset(snap->mbx_vf.vf_cntr, 0,
329			   (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)),
330			   ICE_NONDMA_MEM);
331
332	/* Reset mailbox snapshot for a new capture. */
333	ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf),
334		   ICE_NONDMA_MEM);
335	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
336}
337
338/**
339 * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
340 * @hw: pointer to the HW struct
341 * @mbx_data: pointer to structure containing mailbox data
342 * @vf_id: relative virtual function (VF) ID
343 * @is_malvf: boolean output to indicate if VF is malicious
344 *
345 * The function serves as an entry point for the malicious VF
346 * detection algorithm by handling the different states and state
347 * transitions of the algorithm:
348 * New snapshot: This state is entered when creating a new static
349 * snapshot. The data from any previous mailbox snapshot is
350 * cleared and a new capture of the mailbox head and tail is
351 * logged. This will be the new static snapshot to detect
352 * asynchronous messages sent by VFs. On capturing the snapshot
353 * and depending on whether the number of pending messages in that
354 * snapshot exceed the watermark value, the state machine enters
355 * traverse or detect states.
356 * Traverse: If pending message count is below watermark then iterate
357 * through the snapshot without any action on VF.
358 * Detect: If pending message count exceeds watermark traverse
359 * the static snapshot and look for a malicious VF.
360 */
361enum ice_status
362ice_mbx_vf_state_handler(struct ice_hw *hw,
363			 struct ice_mbx_data *mbx_data, u16 vf_id,
364			 bool *is_malvf)
365{
366	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
367	struct ice_mbx_snap_buffer_data *snap_buf;
368	struct ice_ctl_q_info *cq = &hw->mailboxq;
369	enum ice_mbx_snapshot_state new_state;
370	enum ice_status status = ICE_SUCCESS;
371
372	if (!is_malvf || !mbx_data)
373		return ICE_ERR_BAD_PTR;
374
375	/* When entering the mailbox state machine assume that the VF
376	 * is not malicious until detected.
377	 */
378	*is_malvf = false;
379
380	 /* Checking if max messages allowed to be processed while servicing current
381	  * interrupt is not less than the defined AVF message threshold.
382	  */
383	if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
384		return ICE_ERR_INVAL_SIZE;
385
386	/* The watermark value should not be lesser than the threshold limit
387	 * set for the number of asynchronous messages a VF can send to mailbox
388	 * nor should it be greater than the maximum number of messages in the
389	 * mailbox serviced in current interrupt.
390	 */
391	if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
392	    mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
393		return ICE_ERR_PARAM;
394
395	new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
396	snap_buf = &snap->mbx_buf;
397
398	switch (snap_buf->state) {
399	case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
400		/* Clear any previously held data in mailbox snapshot structure. */
401		ice_mbx_reset_snapshot(snap);
402
403		/* Collect the pending ARQ count, number of messages processed and
404		 * the maximum number of messages allowed to be processed from the
405		 * Mailbox for current interrupt.
406		 */
407		snap_buf->num_pending_arq = mbx_data->num_pending_arq;
408		snap_buf->num_msg_proc = mbx_data->num_msg_proc;
409		snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
410
411		/* Capture a new static snapshot of the mailbox by logging the
412		 * head and tail of snapshot and set num_iterations to the tail
413		 * value to mark the start of the iteration through the snapshot.
414		 */
415		snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
416						  mbx_data->num_pending_arq);
417		snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
418		snap_buf->num_iterations = snap_buf->tail;
419
420		/* Pending ARQ messages returned by ice_clean_rq_elem
421		 * is the difference between the head and tail of the
422		 * mailbox queue. Comparing this value against the watermark
423		 * helps to check if we potentially have malicious VFs.
424		 */
425		if (snap_buf->num_pending_arq >=
426		    mbx_data->async_watermark_val) {
427			new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
428			status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
429		} else {
430			new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
431			ice_mbx_traverse(hw, &new_state);
432		}
433		break;
434
435	case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
436		new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
437		ice_mbx_traverse(hw, &new_state);
438		break;
439
440	case ICE_MAL_VF_DETECT_STATE_DETECT:
441		new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
442		status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
443		break;
444
445	default:
446		new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
447		status = ICE_ERR_CFG;
448	}
449
450	snap_buf->state = new_state;
451
452	return status;
453}
454
455/**
456 * ice_mbx_report_malvf - Track and note malicious VF
457 * @hw: pointer to the HW struct
458 * @all_malvfs: all malicious VFs tracked by PF
459 * @bitmap_len: length of bitmap in bits
460 * @vf_id: relative virtual function ID of the malicious VF
461 * @report_malvf: boolean to indicate if malicious VF must be reported
462 *
463 * This function will update a bitmap that keeps track of the malicious
464 * VFs attached to the PF. A malicious VF must be reported only once if
465 * discovered between VF resets or loading so the function checks
466 * the input vf_id against the bitmap to verify if the VF has been
467 * detected in any previous mailbox iterations.
468 */
469enum ice_status
470ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
471		     u16 bitmap_len, u16 vf_id, bool *report_malvf)
472{
473	if (!all_malvfs || !report_malvf)
474		return ICE_ERR_PARAM;
475
476	*report_malvf = false;
477
478	if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
479		return ICE_ERR_INVAL_SIZE;
480
481	if (vf_id >= bitmap_len)
482		return ICE_ERR_OUT_OF_RANGE;
483
484	/* If the vf_id is found in the bitmap set bit and boolean to true */
485	if (!ice_is_bit_set(all_malvfs, vf_id)) {
486		ice_set_bit(vf_id, all_malvfs);
487		ice_debug(hw, ICE_DBG_TRACE, "Malicious VF=%d found\n", vf_id);
488		*report_malvf = true;
489	}
490
491	return ICE_SUCCESS;
492}
493
494/**
495 * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
496 * @snap: pointer to the mailbox snapshot structure
497 * @all_malvfs: all malicious VFs tracked by PF
498 * @bitmap_len: length of bitmap in bits
499 * @vf_id: relative virtual function ID of the malicious VF
500 *
501 * In case of a VF reset, this function can be called to clear
502 * the bit corresponding to the VF ID in the bitmap tracking all
503 * malicious VFs attached to the PF. The function also clears the
504 * VF counter array at the index of the VF ID. This is to ensure
505 * that the new VF loaded is not considered malicious before going
506 * through the overflow detection algorithm.
507 */
508enum ice_status
509ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, ice_bitmap_t *all_malvfs,
510		    u16 bitmap_len, u16 vf_id)
511{
512	if (!snap || !all_malvfs)
513		return ICE_ERR_PARAM;
514
515	if (bitmap_len < snap->mbx_vf.vfcntr_len)
516		return ICE_ERR_INVAL_SIZE;
517
518	/* Ensure VF ID value is not larger than bitmap or VF counter length */
519	if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
520		return ICE_ERR_OUT_OF_RANGE;
521
522	/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
523	ice_clear_bit(vf_id, all_malvfs);
524
525	/* Clear the VF counter in the mailbox snapshot structure for that VF ID.
526	 * This is to ensure that if a VF is unloaded and a new one brought back
527	 * up with the same VF ID for a snapshot currently in traversal or detect
528	 * state the counter for that VF ID does not increment on top of existing
529	 * values in the mailbox overflow detection algorithm.
530	 */
531	snap->mbx_vf.vf_cntr[vf_id] = 0;
532
533	return ICE_SUCCESS;
534}
535
536/**
537 * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
538 * @hw: pointer to the hardware structure
539 * @vf_count: number of VFs allocated on a PF
540 *
541 * Clear the mailbox snapshot structure and allocate memory
542 * for the VF counter array based on the number of VFs allocated
543 * on that PF.
544 *
545 * Assumption: This function will assume ice_get_caps() has already been
546 * called to ensure that the vf_count can be compared against the number
547 * of VFs supported as defined in the functional capabilities of the device.
548 */
549enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
550{
551	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
552
553	/* Ensure that the number of VFs allocated is non-zero and
554	 * is not greater than the number of supported VFs defined in
555	 * the functional capabilities of the PF.
556	 */
557	if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
558		return ICE_ERR_INVAL_SIZE;
559
560	snap->mbx_vf.vf_cntr =
561		(u32 *)ice_calloc(hw, vf_count,
562				  sizeof(*snap->mbx_vf.vf_cntr));
563	if (!snap->mbx_vf.vf_cntr)
564		return ICE_ERR_NO_MEMORY;
565
566	/* Setting the VF counter length to the number of allocated
567	 * VFs for given PF's functional capabilities.
568	 */
569	snap->mbx_vf.vfcntr_len = vf_count;
570
571	/* Clear mbx_buf in the mailbox snaphot structure and setting the
572	 * mailbox snapshot state to a new capture.
573	 */
574	ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
575	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
576
577	return ICE_SUCCESS;
578}
579
580/**
581 * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
582 * @hw: pointer to the hardware structure
583 *
584 * Clear the mailbox snapshot structure and free the VF counter array.
585 */
586void ice_mbx_deinit_snapshot(struct ice_hw *hw)
587{
588	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
589
590	/* Free VF counter array and reset vf counter length */
591	ice_free(hw, snap->mbx_vf.vf_cntr);
592	snap->mbx_vf.vfcntr_len = 0;
593
594	/* Clear mbx_buf in the mailbox snaphot structure */
595	ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
596}
597