1/* SPDX-License-Identifier: BSD-3-Clause */
2/*  Copyright (c) 2024, Intel Corporation
3 *  All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions are met:
7 *
8 *   1. Redistributions of source code must retain the above copyright notice,
9 *      this list of conditions and the following disclaimer.
10 *
11 *   2. Redistributions in binary form must reproduce the above copyright
12 *      notice, this list of conditions and the following disclaimer in the
13 *      documentation and/or other materials provided with the distribution.
14 *
15 *   3. Neither the name of the Intel Corporation nor the names of its
16 *      contributors may be used to endorse or promote products derived from
17 *      this software without specific prior written permission.
18 *
19 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 *  POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "ice_ddp_common.h"
33#include "ice_type.h"
34#include "ice_common.h"
35#include "ice_sched.h"
36
37/**
38 * ice_aq_download_pkg
39 * @hw: pointer to the hardware structure
40 * @pkg_buf: the package buffer to transfer
41 * @buf_size: the size of the package buffer
42 * @last_buf: last buffer indicator
43 * @error_offset: returns error offset
44 * @error_info: returns error information
45 * @cd: pointer to command details structure or NULL
46 *
47 * Download Package (0x0C40)
48 */
49static enum ice_status
50ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
51		    u16 buf_size, bool last_buf, u32 *error_offset,
52		    u32 *error_info, struct ice_sq_cd *cd)
53{
54	struct ice_aqc_download_pkg *cmd;
55	struct ice_aq_desc desc;
56	enum ice_status status;
57
58	if (error_offset)
59		*error_offset = 0;
60	if (error_info)
61		*error_info = 0;
62
63	cmd = &desc.params.download_pkg;
64	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
65	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
66
67	if (last_buf)
68		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
69
70	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
71	if (status == ICE_ERR_AQ_ERROR) {
72		/* Read error from buffer only when the FW returned an error */
73		struct ice_aqc_download_pkg_resp *resp;
74
75		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
76		if (error_offset)
77			*error_offset = LE32_TO_CPU(resp->error_offset);
78		if (error_info)
79			*error_info = LE32_TO_CPU(resp->error_info);
80	}
81
82	return status;
83}
84
85/**
86 * ice_aq_upload_section
87 * @hw: pointer to the hardware structure
88 * @pkg_buf: the package buffer which will receive the section
89 * @buf_size: the size of the package buffer
90 * @cd: pointer to command details structure or NULL
91 *
92 * Upload Section (0x0C41)
93 */
94enum ice_status
95ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
96		      u16 buf_size, struct ice_sq_cd *cd)
97{
98	struct ice_aq_desc desc;
99
100	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
101	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
102
103	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
104}
105
106/**
107 * ice_aq_update_pkg
108 * @hw: pointer to the hardware structure
109 * @pkg_buf: the package cmd buffer
110 * @buf_size: the size of the package cmd buffer
111 * @last_buf: last buffer indicator
112 * @error_offset: returns error offset
113 * @error_info: returns error information
114 * @cd: pointer to command details structure or NULL
115 *
116 * Update Package (0x0C42)
117 */
118static enum ice_status
119ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
120		  bool last_buf, u32 *error_offset, u32 *error_info,
121		  struct ice_sq_cd *cd)
122{
123	struct ice_aqc_download_pkg *cmd;
124	struct ice_aq_desc desc;
125	enum ice_status status;
126
127	if (error_offset)
128		*error_offset = 0;
129	if (error_info)
130		*error_info = 0;
131
132	cmd = &desc.params.download_pkg;
133	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
134	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
135
136	if (last_buf)
137		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
138
139	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
140	if (status == ICE_ERR_AQ_ERROR) {
141		/* Read error from buffer only when the FW returned an error */
142		struct ice_aqc_download_pkg_resp *resp;
143
144		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
145		if (error_offset)
146			*error_offset = LE32_TO_CPU(resp->error_offset);
147		if (error_info)
148			*error_info = LE32_TO_CPU(resp->error_info);
149	}
150
151	return status;
152}
153
154/**
155 * ice_find_seg_in_pkg
156 * @hw: pointer to the hardware structure
157 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
158 * @pkg_hdr: pointer to the package header to be searched
159 *
160 * This function searches a package file for a particular segment type. On
161 * success it returns a pointer to the segment header, otherwise it will
162 * return NULL.
163 */
164struct ice_generic_seg_hdr *
165ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
166		    struct ice_pkg_hdr *pkg_hdr)
167{
168	u32 i;
169
170	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
171		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
172		  pkg_hdr->pkg_format_ver.update,
173		  pkg_hdr->pkg_format_ver.draft);
174
175	/* Search all package segments for the requested segment type */
176	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
177		struct ice_generic_seg_hdr *seg;
178
179		seg = (struct ice_generic_seg_hdr *)
180			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
181
182		if (LE32_TO_CPU(seg->seg_type) == seg_type)
183			return seg;
184	}
185
186	return NULL;
187}
188
189/**
190 * ice_get_pkg_seg_by_idx
191 * @pkg_hdr: pointer to the package header to be searched
192 * @idx: index of segment
193 */
194static struct ice_generic_seg_hdr *
195ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
196{
197	struct ice_generic_seg_hdr *seg = NULL;
198
199	if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
200		seg = (struct ice_generic_seg_hdr *)
201			((u8 *)pkg_hdr +
202			 LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
203
204	return seg;
205}
206
207/**
208 * ice_is_signing_seg_at_idx - determine if segment is a signing segment
209 * @pkg_hdr: pointer to package header
210 * @idx: segment index
211 */
212static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
213{
214	struct ice_generic_seg_hdr *seg;
215	bool retval = false;
216
217	seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
218	if (seg)
219		retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
220
221	return retval;
222}
223
224/**
225 * ice_is_signing_seg_type_at_idx
226 * @pkg_hdr: pointer to package header
227 * @idx: segment index
228 * @seg_id: segment id that is expected
229 * @sign_type: signing type
230 *
231 * Determine if a segment is a signing segment of the correct type
232 */
233static bool
234ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
235			       u32 seg_id, u32 sign_type)
236{
237	bool result = false;
238
239	if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
240		struct ice_sign_seg *seg;
241
242		seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
243								    idx);
244		if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
245		    LE32_TO_CPU(seg->sign_type) == sign_type)
246			result = true;
247	}
248
249	return result;
250}
251
252/**
253 * ice_update_pkg_no_lock
254 * @hw: pointer to the hardware structure
255 * @bufs: pointer to an array of buffers
256 * @count: the number of buffers in the array
257 */
258enum ice_status
259ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
260{
261	enum ice_status status = ICE_SUCCESS;
262	u32 i;
263
264	for (i = 0; i < count; i++) {
265		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
266		bool last = ((i + 1) == count);
267		u32 offset, info;
268
269		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
270					   last, &offset, &info, NULL);
271
272		if (status) {
273			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
274				  status, offset, info);
275			break;
276		}
277	}
278
279	return status;
280}
281
282/**
283 * ice_update_pkg
284 * @hw: pointer to the hardware structure
285 * @bufs: pointer to an array of buffers
286 * @count: the number of buffers in the array
287 *
288 * Obtains change lock and updates package.
289 */
290enum ice_status
291ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
292{
293	enum ice_status status;
294
295	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
296	if (status)
297		return status;
298
299	status = ice_update_pkg_no_lock(hw, bufs, count);
300
301	ice_release_change_lock(hw);
302
303	return status;
304}
305
306static enum ice_ddp_state
307ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
308{
309	switch (aq_err) {
310	case ICE_AQ_RC_ENOSEC:
311		return ICE_DDP_PKG_NO_SEC_MANIFEST;
312	case ICE_AQ_RC_EBADSIG:
313		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
314	case ICE_AQ_RC_ESVN:
315		return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
316	case ICE_AQ_RC_EBADMAN:
317		return ICE_DDP_PKG_MANIFEST_INVALID;
318	case ICE_AQ_RC_EBADBUF:
319		return ICE_DDP_PKG_BUFFER_INVALID;
320	default:
321		return ICE_DDP_PKG_ERR;
322	}
323}
324
325/**
326 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
327 * @buf: pointer to buffer header
328 */
329static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
330{
331	bool metadata = false;
332
333	if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
334		metadata = true;
335
336	return metadata;
337}
338
339/**
340 * ice_is_last_download_buffer
341 * @buf: pointer to current buffer header
342 * @idx: index of the buffer in the current sequence
343 * @count: the buffer count in the current sequence
344 *
345 * Note: this routine should only be called if the buffer is not the last buffer
346 */
347static bool
348ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
349{
350	bool last = ((idx + 1) == count);
351
352	/* A set metadata flag in the next buffer will signal that the current
353	 * buffer will be the last buffer downloaded
354	 */
355	if (!last) {
356		struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
357
358		last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
359	}
360
361	return last;
362}
363
364/**
365 * ice_dwnld_cfg_bufs_no_lock
366 * @hw: pointer to the hardware structure
367 * @bufs: pointer to an array of buffers
368 * @start: buffer index of first buffer to download
369 * @count: the number of buffers to download
370 * @indicate_last: if true, then set last buffer flag on last buffer download
371 *
372 * Downloads package configuration buffers to the firmware. Metadata buffers
373 * are skipped, and the first metadata buffer found indicates that the rest
374 * of the buffers are all metadata buffers.
375 */
376static enum ice_ddp_state
377ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
378			   u32 count, bool indicate_last)
379{
380	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
381	struct ice_buf_hdr *bh;
382	enum ice_aq_err err;
383	u32 offset, info, i;
384
385	if (!bufs || !count)
386		return ICE_DDP_PKG_ERR;
387
388	/* If the first buffer's first section has its metadata bit set
389	 * then there are no buffers to be downloaded, and the operation is
390	 * considered a success.
391	 */
392	bh = (struct ice_buf_hdr *)(bufs + start);
393	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
394		return ICE_DDP_PKG_SUCCESS;
395
396	for (i = 0; i < count; i++) {
397		enum ice_status status;
398		bool last = false;
399
400		bh = (struct ice_buf_hdr *)(bufs + start + i);
401
402		if (indicate_last)
403			last = ice_is_last_download_buffer(bh, i, count);
404
405		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
406					     &offset, &info, NULL);
407
408		/* Save AQ status from download package */
409		if (status) {
410			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
411				  status, offset, info);
412			err = hw->adminq.sq_last_status;
413			state = ice_map_aq_err_to_ddp_state(err);
414			break;
415		}
416
417		if (last)
418			break;
419	}
420
421	return state;
422}
423
424/**
425 * ice_aq_get_pkg_info_list
426 * @hw: pointer to the hardware structure
427 * @pkg_info: the buffer which will receive the information list
428 * @buf_size: the size of the pkg_info information buffer
429 * @cd: pointer to command details structure or NULL
430 *
431 * Get Package Info List (0x0C43)
432 */
433static enum ice_status
434ice_aq_get_pkg_info_list(struct ice_hw *hw,
435			 struct ice_aqc_get_pkg_info_resp *pkg_info,
436			 u16 buf_size, struct ice_sq_cd *cd)
437{
438	struct ice_aq_desc desc;
439
440	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
441
442	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
443}
444
445/**
446 * ice_has_signing_seg - determine if package has a signing segment
447 * @hw: pointer to the hardware structure
448 * @pkg_hdr: pointer to the driver's package hdr
449 */
450static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
451{
452	struct ice_generic_seg_hdr *seg_hdr;
453
454	seg_hdr = (struct ice_generic_seg_hdr *)
455		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
456
457	return seg_hdr ? true : false;
458}
459
460/**
461 * ice_get_pkg_segment_id - get correct package segment id, based on device
462 * @mac_type: MAC type of the device
463 */
464static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
465{
466	u32 seg_id;
467
468	switch (mac_type) {
469	case ICE_MAC_GENERIC:
470	case ICE_MAC_GENERIC_3K:
471	case ICE_MAC_GENERIC_3K_E825:
472	default:
473		seg_id = SEGMENT_TYPE_ICE_E810;
474		break;
475	}
476
477	return seg_id;
478}
479
480/**
481 * ice_get_pkg_sign_type - get package segment sign type, based on device
482 * @mac_type: MAC type of the device
483 */
484static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
485{
486	u32 sign_type;
487
488	switch (mac_type) {
489	case ICE_MAC_GENERIC_3K:
490		sign_type = SEGMENT_SIGN_TYPE_RSA3K;
491		break;
492	case ICE_MAC_GENERIC_3K_E825:
493		sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
494		break;
495	case ICE_MAC_GENERIC:
496	default:
497		sign_type = SEGMENT_SIGN_TYPE_RSA2K;
498		break;
499	}
500
501	return sign_type;
502}
503
504/**
505 * ice_get_signing_req - get correct package requirements, based on device
506 * @hw: pointer to the hardware structure
507 */
508static void ice_get_signing_req(struct ice_hw *hw)
509{
510	hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
511	hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
512}
513
514/**
515 * ice_download_pkg_sig_seg - download a signature segment
516 * @hw: pointer to the hardware structure
517 * @seg: pointer to signature segment
518 */
519static enum ice_ddp_state
520ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
521{
522	enum ice_ddp_state state;
523
524	state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
525					   LE32_TO_CPU(seg->buf_tbl.buf_count),
526					   false);
527
528	return state;
529}
530
531/**
532 * ice_download_pkg_config_seg - download a config segment
533 * @hw: pointer to the hardware structure
534 * @pkg_hdr: pointer to package header
535 * @idx: segment index
536 * @start: starting buffer
537 * @count: buffer count
538 *
539 * Note: idx must reference a ICE segment
540 */
541static enum ice_ddp_state
542ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
543			    u32 idx, u32 start, u32 count)
544{
545	struct ice_buf_table *bufs;
546	enum ice_ddp_state state;
547	struct ice_seg *seg;
548	u32 buf_count;
549
550	seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
551	if (!seg)
552		return ICE_DDP_PKG_ERR;
553
554	bufs = ice_find_buf_table(seg);
555	buf_count = LE32_TO_CPU(bufs->buf_count);
556
557	if (start >= buf_count || start + count > buf_count)
558		return ICE_DDP_PKG_ERR;
559
560	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
561					   true);
562
563	return state;
564}
565
566/**
567 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
568 * @hw: pointer to the hardware structure
569 * @pkg_hdr: pointer to package header
570 * @idx: segment index (must be a signature segment)
571 *
572 * Note: idx must reference a signature segment
573 */
574static enum ice_ddp_state
575ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
576			    u32 idx)
577{
578	enum ice_ddp_state state;
579	struct ice_sign_seg *seg;
580	u32 conf_idx;
581	u32 start;
582	u32 count;
583
584	seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
585	if (!seg) {
586		state = ICE_DDP_PKG_ERR;
587		goto exit;
588	}
589
590	conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
591	start = LE32_TO_CPU(seg->signed_buf_start);
592	count = LE32_TO_CPU(seg->signed_buf_count);
593
594	state = ice_download_pkg_sig_seg(hw, seg);
595	if (state)
596		goto exit;
597
598	state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
599					    count);
600
601exit:
602	return state;
603}
604
605/**
606 * ice_match_signing_seg - determine if a matching signing segment exists
607 * @pkg_hdr: pointer to package header
608 * @seg_id: segment id that is expected
609 * @sign_type: signing type
610 */
611static bool
612ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
613{
614	bool match = false;
615	u32 i;
616
617	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
618		if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
619						   sign_type)) {
620			match = true;
621			break;
622		}
623	}
624
625	return match;
626}
627
628/**
629 * ice_post_dwnld_pkg_actions - perform post download package actions
630 * @hw: pointer to the hardware structure
631 */
632static enum ice_ddp_state
633ice_post_dwnld_pkg_actions(struct ice_hw *hw)
634{
635	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
636	enum ice_status status;
637
638	status = ice_set_vlan_mode(hw);
639	if (status) {
640		ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
641			  status);
642		state = ICE_DDP_PKG_ERR;
643	}
644
645	return state;
646}
647
648/**
649 * ice_download_pkg_with_sig_seg - download package using signature segments
650 * @hw: pointer to the hardware structure
651 * @pkg_hdr: pointer to package header
652 */
653static enum ice_ddp_state
654ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
655{
656	enum ice_aq_err aq_err = hw->adminq.sq_last_status;
657	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
658	enum ice_status status;
659	u32 i;
660
661	ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
662	ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
663
664	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
665	if (status) {
666		if (status == ICE_ERR_AQ_NO_WORK)
667			state = ICE_DDP_PKG_ALREADY_LOADED;
668		else
669			state = ice_map_aq_err_to_ddp_state(aq_err);
670		return state;
671	}
672
673	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
674		if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
675						    hw->pkg_sign_type))
676			continue;
677
678		state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
679		if (state)
680			break;
681	}
682
683	if (!state)
684		state = ice_post_dwnld_pkg_actions(hw);
685
686	ice_release_global_cfg_lock(hw);
687
688	return state;
689}
690
691/**
692 * ice_dwnld_cfg_bufs
693 * @hw: pointer to the hardware structure
694 * @bufs: pointer to an array of buffers
695 * @count: the number of buffers in the array
696 *
697 * Obtains global config lock and downloads the package configuration buffers
698 * to the firmware.
699 */
700static enum ice_ddp_state
701ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
702{
703	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
704	enum ice_status status;
705	struct ice_buf_hdr *bh;
706
707	if (!bufs || !count)
708		return ICE_DDP_PKG_ERR;
709
710	/* If the first buffer's first section has its metadata bit set
711	 * then there are no buffers to be downloaded, and the operation is
712	 * considered a success.
713	 */
714	bh = (struct ice_buf_hdr *)bufs;
715	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
716		return ICE_DDP_PKG_SUCCESS;
717
718	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
719	if (status) {
720		if (status == ICE_ERR_AQ_NO_WORK)
721			return ICE_DDP_PKG_ALREADY_LOADED;
722		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
723	}
724
725	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
726	if (!state)
727		state = ice_post_dwnld_pkg_actions(hw);
728
729	ice_release_global_cfg_lock(hw);
730
731	return state;
732}
733
734/**
735 * ice_download_pkg_without_sig_seg
736 * @hw: pointer to the hardware structure
737 * @ice_seg: pointer to the segment of the package to be downloaded
738 *
739 * Handles the download of a complete package without signature segment.
740 */
741static enum ice_ddp_state
742ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
743{
744	struct ice_buf_table *ice_buf_tbl;
745	enum ice_ddp_state state;
746
747	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
748		  ice_seg->hdr.seg_format_ver.major,
749		  ice_seg->hdr.seg_format_ver.minor,
750		  ice_seg->hdr.seg_format_ver.update,
751		  ice_seg->hdr.seg_format_ver.draft);
752
753	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
754		  LE32_TO_CPU(ice_seg->hdr.seg_type),
755		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
756
757	ice_buf_tbl = ice_find_buf_table(ice_seg);
758
759	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
760		  LE32_TO_CPU(ice_buf_tbl->buf_count));
761
762	state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
763				   LE32_TO_CPU(ice_buf_tbl->buf_count));
764
765	return state;
766}
767
768/**
769 * ice_download_pkg
770 * @hw: pointer to the hardware structure
771 * @pkg_hdr: pointer to package header
772 * @ice_seg: pointer to the segment of the package to be downloaded
773 *
774 * Handles the download of a complete package.
775 */
776static enum ice_ddp_state
777ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
778		 struct ice_seg *ice_seg)
779{
780	enum ice_ddp_state state;
781
782	if (hw->pkg_has_signing_seg)
783		state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
784	else
785		state = ice_download_pkg_without_sig_seg(hw, ice_seg);
786
787	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
788
789	return state;
790}
791
792/**
793 * ice_init_pkg_info
794 * @hw: pointer to the hardware structure
795 * @pkg_hdr: pointer to the driver's package hdr
796 *
797 * Saves off the package details into the HW structure.
798 */
799static enum ice_ddp_state
800ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
801{
802	struct ice_generic_seg_hdr *seg_hdr;
803
804	if (!pkg_hdr)
805		return ICE_DDP_PKG_ERR;
806
807	hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
808	ice_get_signing_req(hw);
809
810	ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
811		  hw->pkg_seg_id);
812
813	seg_hdr = (struct ice_generic_seg_hdr *)
814		ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
815	if (seg_hdr) {
816		struct ice_meta_sect *meta;
817		struct ice_pkg_enum state;
818
819		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
820
821		/* Get package information from the Metadata Section */
822		meta = (struct ice_meta_sect *)
823			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
824					     ICE_SID_METADATA);
825		if (!meta) {
826			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
827			return ICE_DDP_PKG_INVALID_FILE;
828		}
829
830		hw->pkg_ver = meta->ver;
831		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
832			   ICE_NONDMA_TO_NONDMA);
833
834		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
835			  meta->ver.major, meta->ver.minor, meta->ver.update,
836			  meta->ver.draft, meta->name);
837
838		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
839		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
840			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
841
842		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
843			  seg_hdr->seg_format_ver.major,
844			  seg_hdr->seg_format_ver.minor,
845			  seg_hdr->seg_format_ver.update,
846			  seg_hdr->seg_format_ver.draft,
847			  seg_hdr->seg_id);
848	} else {
849		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
850		return ICE_DDP_PKG_INVALID_FILE;
851	}
852
853	return ICE_DDP_PKG_SUCCESS;
854}
855
856/**
857 * ice_get_pkg_info
858 * @hw: pointer to the hardware structure
859 *
860 * Store details of the package currently loaded in HW into the HW structure.
861 */
862enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
863{
864	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
865	struct ice_aqc_get_pkg_info_resp *pkg_info;
866	u16 size;
867	u32 i;
868
869	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
870	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
871	if (!pkg_info)
872		return ICE_DDP_PKG_ERR;
873
874	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
875		state = ICE_DDP_PKG_ERR;
876		goto init_pkg_free_alloc;
877	}
878
879	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
880#define ICE_PKG_FLAG_COUNT	4
881		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
882		u8 place = 0;
883
884		if (pkg_info->pkg_info[i].is_active) {
885			flags[place++] = 'A';
886			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
887			hw->active_track_id =
888				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
889			ice_memcpy(hw->active_pkg_name,
890				   pkg_info->pkg_info[i].name,
891				   sizeof(pkg_info->pkg_info[i].name),
892				   ICE_NONDMA_TO_NONDMA);
893			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
894		}
895		if (pkg_info->pkg_info[i].is_active_at_boot)
896			flags[place++] = 'B';
897		if (pkg_info->pkg_info[i].is_modified)
898			flags[place++] = 'M';
899		if (pkg_info->pkg_info[i].is_in_nvm)
900			flags[place++] = 'N';
901
902		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
903			  i, pkg_info->pkg_info[i].ver.major,
904			  pkg_info->pkg_info[i].ver.minor,
905			  pkg_info->pkg_info[i].ver.update,
906			  pkg_info->pkg_info[i].ver.draft,
907			  pkg_info->pkg_info[i].name, flags);
908	}
909
910init_pkg_free_alloc:
911	ice_free(hw, pkg_info);
912
913	return state;
914}
915
916/**
917 * ice_label_enum_handler
918 * @sect_type: section type
919 * @section: pointer to section
920 * @index: index of the label entry to be returned
921 * @offset: pointer to receive absolute offset, always zero for label sections
922 *
923 * This is a callback function that can be passed to ice_pkg_enum_entry.
924 * Handles enumeration of individual label entries.
925 */
926static void *
927ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
928		       u32 *offset)
929{
930	struct ice_label_section *labels;
931
932	if (!section)
933		return NULL;
934
935	if (index > ICE_MAX_LABELS_IN_BUF)
936		return NULL;
937
938	if (offset)
939		*offset = 0;
940
941	labels = (struct ice_label_section *)section;
942	if (index >= LE16_TO_CPU(labels->count))
943		return NULL;
944
945	return labels->label + index;
946}
947
948/**
949 * ice_enum_labels
950 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
951 * @type: the section type that will contain the label (0 on subsequent calls)
952 * @state: ice_pkg_enum structure that will hold the state of the enumeration
953 * @value: pointer to a value that will return the label's value if found
954 *
955 * Enumerates a list of labels in the package. The caller will call
956 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
957 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
958 * the end of the list has been reached.
959 */
960static char *
961ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
962		u16 *value)
963{
964	struct ice_label *label;
965
966	/* Check for valid label section on first call */
967	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
968		return NULL;
969
970	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
971						       NULL,
972						       ice_label_enum_handler);
973	if (!label)
974		return NULL;
975
976	*value = LE16_TO_CPU(label->value);
977	return label->name;
978}
979
980/**
981 * ice_find_label_value
982 * @ice_seg: pointer to the ice segment (non-NULL)
983 * @name: name of the label to search for
984 * @type: the section type that will contain the label
985 * @value: pointer to a value that will return the label's value if found
986 *
987 * Finds a label's value given the label name and the section type to search.
988 * The ice_seg parameter must not be NULL since the first call to
989 * ice_enum_labels requires a pointer to an actual ice_seg structure.
990 */
991enum ice_status
992ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
993		     u16 *value)
994{
995	struct ice_pkg_enum state;
996	char *label_name;
997	u16 val;
998
999	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1000
1001	if (!ice_seg)
1002		return ICE_ERR_PARAM;
1003
1004	do {
1005		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1006		if (label_name && !strcmp(label_name, name)) {
1007			*value = val;
1008			return ICE_SUCCESS;
1009		}
1010
1011		ice_seg = NULL;
1012	} while (label_name);
1013
1014	return ICE_ERR_CFG;
1015}
1016
1017/**
1018 * ice_verify_pkg - verify package
1019 * @pkg: pointer to the package buffer
1020 * @len: size of the package buffer
1021 *
1022 * Verifies various attributes of the package file, including length, format
1023 * version, and the requirement of at least one segment.
1024 */
1025enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1026{
1027	u32 seg_count;
1028	u32 i;
1029
1030	if (len < ice_struct_size(pkg, seg_offset, 1))
1031		return ICE_DDP_PKG_INVALID_FILE;
1032
1033	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1034	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1035	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1036	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1037		return ICE_DDP_PKG_INVALID_FILE;
1038
1039	/* pkg must have at least one segment */
1040	seg_count = LE32_TO_CPU(pkg->seg_count);
1041	if (seg_count < 1)
1042		return ICE_DDP_PKG_INVALID_FILE;
1043
1044	/* make sure segment array fits in package length */
1045	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1046		return ICE_DDP_PKG_INVALID_FILE;
1047
1048	/* all segments must fit within length */
1049	for (i = 0; i < seg_count; i++) {
1050		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1051		struct ice_generic_seg_hdr *seg;
1052
1053		/* segment header must fit */
1054		if (len < off + sizeof(*seg))
1055			return ICE_DDP_PKG_INVALID_FILE;
1056
1057		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1058
1059		/* segment body must fit */
1060		if (len < off + LE32_TO_CPU(seg->seg_size))
1061			return ICE_DDP_PKG_INVALID_FILE;
1062	}
1063
1064	return ICE_DDP_PKG_SUCCESS;
1065}
1066
1067/**
1068 * ice_free_seg - free package segment pointer
1069 * @hw: pointer to the hardware structure
1070 *
1071 * Frees the package segment pointer in the proper manner, depending on if the
1072 * segment was allocated or just the passed in pointer was stored.
1073 */
1074void ice_free_seg(struct ice_hw *hw)
1075{
1076	if (hw->pkg_copy) {
1077		ice_free(hw, hw->pkg_copy);
1078		hw->pkg_copy = NULL;
1079		hw->pkg_size = 0;
1080	}
1081	hw->seg = NULL;
1082}
1083
1084/**
1085 * ice_chk_pkg_version - check package version for compatibility with driver
1086 * @pkg_ver: pointer to a version structure to check
1087 *
1088 * Check to make sure that the package about to be downloaded is compatible with
1089 * the driver. To be compatible, the major and minor components of the package
1090 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1091 * definitions.
1092 */
1093static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1094{
1095	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
1096	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1097	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
1098		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
1099	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
1100		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1101		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
1102		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
1103
1104	return ICE_DDP_PKG_SUCCESS;
1105}
1106
1107/**
1108 * ice_chk_pkg_compat
1109 * @hw: pointer to the hardware structure
1110 * @ospkg: pointer to the package hdr
1111 * @seg: pointer to the package segment hdr
1112 *
1113 * This function checks the package version compatibility with driver and NVM
1114 */
1115static enum ice_ddp_state
1116ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1117		   struct ice_seg **seg)
1118{
1119	struct ice_aqc_get_pkg_info_resp *pkg;
1120	enum ice_ddp_state state;
1121	u16 size;
1122	u32 i;
1123
1124	/* Check package version compatibility */
1125	state = ice_chk_pkg_version(&hw->pkg_ver);
1126	if (state) {
1127		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1128		return state;
1129	}
1130
1131	/* find ICE segment in given package */
1132	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1133						     ospkg);
1134	if (!*seg) {
1135		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1136		return ICE_DDP_PKG_INVALID_FILE;
1137	}
1138
1139	/* Check if FW is compatible with the OS package */
1140	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1141	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1142	if (!pkg)
1143		return ICE_DDP_PKG_ERR;
1144
1145	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1146		state = ICE_DDP_PKG_ERR;
1147		goto fw_ddp_compat_free_alloc;
1148	}
1149
1150	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1151		/* loop till we find the NVM package */
1152		if (!pkg->pkg_info[i].is_in_nvm)
1153			continue;
1154		if ((*seg)->hdr.seg_format_ver.major !=
1155			pkg->pkg_info[i].ver.major ||
1156		    (*seg)->hdr.seg_format_ver.minor >
1157			pkg->pkg_info[i].ver.minor) {
1158			state = ICE_DDP_PKG_FW_MISMATCH;
1159			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1160		}
1161		/* done processing NVM package so break */
1162		break;
1163	}
1164fw_ddp_compat_free_alloc:
1165	ice_free(hw, pkg);
1166	return state;
1167}
1168
1169/**
1170 * ice_sw_fv_handler
1171 * @sect_type: section type
1172 * @section: pointer to section
1173 * @index: index of the field vector entry to be returned
1174 * @offset: ptr to variable that receives the offset in the field vector table
1175 *
1176 * This is a callback function that can be passed to ice_pkg_enum_entry.
1177 * This function treats the given section as of type ice_sw_fv_section and
1178 * enumerates offset field. "offset" is an index into the field vector table.
1179 */
1180static void *
1181ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1182{
1183	struct ice_sw_fv_section *fv_section =
1184		(struct ice_sw_fv_section *)section;
1185
1186	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1187		return NULL;
1188	if (index >= LE16_TO_CPU(fv_section->count))
1189		return NULL;
1190	if (offset)
1191		/* "index" passed in to this function is relative to a given
1192		 * 4k block. To get to the true index into the field vector
1193		 * table need to add the relative index to the base_offset
1194		 * field of this section
1195		 */
1196		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1197	return fv_section->fv + index;
1198}
1199
1200/**
1201 * ice_get_prof_index_max - get the max profile index for used profile
1202 * @hw: pointer to the HW struct
1203 *
1204 * Calling this function will get the max profile index for used profile
1205 * and store the index number in struct ice_switch_info *switch_info
1206 * in hw for following use.
1207 */
1208static int ice_get_prof_index_max(struct ice_hw *hw)
1209{
1210	u16 prof_index = 0, j, max_prof_index = 0;
1211	struct ice_pkg_enum state;
1212	struct ice_seg *ice_seg;
1213	bool flag = false;
1214	struct ice_fv *fv;
1215	u32 offset;
1216
1217	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1218
1219	if (!hw->seg)
1220		return ICE_ERR_PARAM;
1221
1222	ice_seg = hw->seg;
1223
1224	do {
1225		fv = (struct ice_fv *)
1226			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1227					   &offset, ice_sw_fv_handler);
1228		if (!fv)
1229			break;
1230		ice_seg = NULL;
1231
1232		/* in the profile that not be used, the prot_id is set to 0xff
1233		 * and the off is set to 0x1ff for all the field vectors.
1234		 */
1235		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1236			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1237			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1238				flag = true;
1239		if (flag && prof_index > max_prof_index)
1240			max_prof_index = prof_index;
1241
1242		prof_index++;
1243		flag = false;
1244	} while (fv);
1245
1246	hw->switch_info->max_used_prof_index = max_prof_index;
1247
1248	return ICE_SUCCESS;
1249}
1250
1251/**
1252 * ice_get_ddp_pkg_state - get DDP pkg state after download
1253 * @hw: pointer to the HW struct
1254 * @already_loaded: indicates if pkg was already loaded onto the device
1255 *
1256 */
1257static enum ice_ddp_state
1258ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
1259{
1260	if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
1261	    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
1262	    hw->pkg_ver.update == hw->active_pkg_ver.update &&
1263	    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
1264	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
1265		if (already_loaded)
1266			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
1267		else
1268			return ICE_DDP_PKG_SUCCESS;
1269	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
1270		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
1271		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
1272	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
1273		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
1274		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
1275	} else {
1276		return ICE_DDP_PKG_ERR;
1277	}
1278}
1279
1280/**
1281 * ice_init_pkg_regs - initialize additional package registers
1282 * @hw: pointer to the hardware structure
1283 */
1284static void ice_init_pkg_regs(struct ice_hw *hw)
1285{
1286#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1287#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1288#define ICE_SW_BLK_IDX	0
1289
1290	/* setup Switch block input mask, which is 48-bits in two parts */
1291	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1292	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1293}
1294
1295/**
1296 * ice_init_pkg - initialize/download package
1297 * @hw: pointer to the hardware structure
1298 * @buf: pointer to the package buffer
1299 * @len: size of the package buffer
1300 *
1301 * This function initializes a package. The package contains HW tables
1302 * required to do packet processing. First, the function extracts package
1303 * information such as version. Then it finds the ice configuration segment
1304 * within the package; this function then saves a copy of the segment pointer
1305 * within the supplied package buffer. Next, the function will cache any hints
1306 * from the package, followed by downloading the package itself. Note, that if
1307 * a previous PF driver has already downloaded the package successfully, then
1308 * the current driver will not have to download the package again.
1309 *
1310 * The local package contents will be used to query default behavior and to
1311 * update specific sections of the HW's version of the package (e.g. to update
1312 * the parse graph to understand new protocols).
1313 *
1314 * This function stores a pointer to the package buffer memory, and it is
1315 * expected that the supplied buffer will not be freed immediately. If the
1316 * package buffer needs to be freed, such as when read from a file, use
1317 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1318 * case.
1319 */
1320enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1321{
1322	bool already_loaded = false;
1323	enum ice_ddp_state state;
1324	struct ice_pkg_hdr *pkg;
1325	struct ice_seg *seg;
1326
1327	if (!buf || !len)
1328		return ICE_DDP_PKG_ERR;
1329
1330	pkg = (struct ice_pkg_hdr *)buf;
1331	state = ice_verify_pkg(pkg, len);
1332	if (state) {
1333		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1334			  state);
1335		return state;
1336	}
1337
1338	/* initialize package info */
1339	state = ice_init_pkg_info(hw, pkg);
1340	if (state)
1341		return state;
1342
1343	/* For packages with signing segments, must be a matching segment */
1344	if (hw->pkg_has_signing_seg)
1345		if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
1346					   hw->pkg_sign_type))
1347			return ICE_DDP_PKG_ERR;
1348
1349	/* before downloading the package, check package version for
1350	 * compatibility with driver
1351	 */
1352	state = ice_chk_pkg_compat(hw, pkg, &seg);
1353	if (state)
1354		return state;
1355
1356	/* initialize package hints and then download package */
1357	ice_init_pkg_hints(hw, seg);
1358	state = ice_download_pkg(hw, pkg, seg);
1359
1360	if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1361		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1362		already_loaded = true;
1363	}
1364
1365	/* Get information on the package currently loaded in HW, then make sure
1366	 * the driver is compatible with this version.
1367	 */
1368	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1369		state = ice_get_pkg_info(hw);
1370		if (!state)
1371			state = ice_get_ddp_pkg_state(hw, already_loaded);
1372	}
1373
1374	if (ice_is_init_pkg_successful(state)) {
1375		hw->seg = seg;
1376		/* on successful package download update other required
1377		 * registers to support the package and fill HW tables
1378		 * with package content.
1379		 */
1380		ice_init_pkg_regs(hw);
1381		ice_fill_blk_tbls(hw);
1382		ice_get_prof_index_max(hw);
1383	} else {
1384		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1385			  state);
1386	}
1387
1388	return state;
1389}
1390
1391/**
1392 * ice_copy_and_init_pkg - initialize/download a copy of the package
1393 * @hw: pointer to the hardware structure
1394 * @buf: pointer to the package buffer
1395 * @len: size of the package buffer
1396 *
1397 * This function copies the package buffer, and then calls ice_init_pkg() to
1398 * initialize the copied package contents.
1399 *
1400 * The copying is necessary if the package buffer supplied is constant, or if
1401 * the memory may disappear shortly after calling this function.
1402 *
1403 * If the package buffer resides in the data segment and can be modified, the
1404 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1405 *
1406 * However, if the package buffer needs to be copied first, such as when being
1407 * read from a file, the caller should use ice_copy_and_init_pkg().
1408 *
1409 * This function will first copy the package buffer, before calling
1410 * ice_init_pkg(). The caller is free to immediately destroy the original
1411 * package buffer, as the new copy will be managed by this function and
1412 * related routines.
1413 */
1414enum ice_ddp_state
1415ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1416{
1417	enum ice_ddp_state state;
1418	u8 *buf_copy;
1419
1420	if (!buf || !len)
1421		return ICE_DDP_PKG_ERR;
1422
1423	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1424
1425	state = ice_init_pkg(hw, buf_copy, len);
1426	if (!ice_is_init_pkg_successful(state)) {
1427		/* Free the copy, since we failed to initialize the package */
1428		ice_free(hw, buf_copy);
1429	} else {
1430		/* Track the copied pkg so we can free it later */
1431		hw->pkg_copy = buf_copy;
1432		hw->pkg_size = len;
1433	}
1434
1435	return state;
1436}
1437
1438/**
1439 * ice_is_init_pkg_successful - check if DDP init was successful
1440 * @state: state of the DDP pkg after download
1441 */
1442bool ice_is_init_pkg_successful(enum ice_ddp_state state)
1443{
1444	switch (state) {
1445	case ICE_DDP_PKG_SUCCESS:
1446	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
1447	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
1448		return true;
1449	default:
1450		return false;
1451	}
1452}
1453
1454/**
1455 * ice_pkg_buf_alloc
1456 * @hw: pointer to the HW structure
1457 *
1458 * Allocates a package buffer and returns a pointer to the buffer header.
1459 * Note: all package contents must be in Little Endian form.
1460 */
1461struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1462{
1463	struct ice_buf_build *bld;
1464	struct ice_buf_hdr *buf;
1465
1466	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1467	if (!bld)
1468		return NULL;
1469
1470	buf = (struct ice_buf_hdr *)bld;
1471	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1472					     section_entry));
1473	return bld;
1474}
1475
1476static bool ice_is_gtp_u_profile(u32 prof_idx)
1477{
1478	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
1479		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
1480	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
1481}
1482
1483static bool ice_is_gtp_c_profile(u32 prof_idx)
1484{
1485	switch (prof_idx) {
1486	case ICE_PROFID_IPV4_GTPC_TEID:
1487	case ICE_PROFID_IPV4_GTPC_NO_TEID:
1488	case ICE_PROFID_IPV6_GTPC_TEID:
1489	case ICE_PROFID_IPV6_GTPC_NO_TEID:
1490		return true;
1491	default:
1492		return false;
1493	}
1494}
1495
1496/**
1497 * ice_get_sw_prof_type - determine switch profile type
1498 * @hw: pointer to the HW structure
1499 * @fv: pointer to the switch field vector
1500 * @prof_idx: profile index to check
1501 */
1502static enum ice_prof_type
1503ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
1504{
1505	bool valid_prof = false;
1506	u16 i;
1507
1508	if (ice_is_gtp_c_profile(prof_idx))
1509		return ICE_PROF_TUN_GTPC;
1510
1511	if (ice_is_gtp_u_profile(prof_idx))
1512		return ICE_PROF_TUN_GTPU;
1513
1514	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1515		if (fv->ew[i].off != ICE_NAN_OFFSET)
1516			valid_prof = true;
1517
1518		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1519		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1520		    fv->ew[i].off == ICE_VNI_OFFSET)
1521			return ICE_PROF_TUN_UDP;
1522
1523		/* GRE tunnel will have GRE protocol */
1524		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1525			return ICE_PROF_TUN_GRE;
1526	}
1527
1528	return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
1529}
1530
1531/**
1532 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1533 * @hw: pointer to hardware structure
1534 * @req_profs: type of profiles requested
1535 * @bm: pointer to memory for returning the bitmap of field vectors
1536 */
1537void
1538ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1539		     ice_bitmap_t *bm)
1540{
1541	struct ice_pkg_enum state;
1542	struct ice_seg *ice_seg;
1543	struct ice_fv *fv;
1544
1545	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1546	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1547	ice_seg = hw->seg;
1548	do {
1549		enum ice_prof_type prof_type;
1550		u32 offset;
1551
1552		fv = (struct ice_fv *)
1553			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1554					   &offset, ice_sw_fv_handler);
1555		ice_seg = NULL;
1556
1557		if (fv) {
1558			/* Determine field vector type */
1559			prof_type = ice_get_sw_prof_type(hw, fv, offset);
1560
1561			if (req_profs & prof_type)
1562				ice_set_bit((u16)offset, bm);
1563		}
1564	} while (fv);
1565}
1566
1567/**
1568 * ice_get_sw_fv_list
1569 * @hw: pointer to the HW structure
1570 * @lkups: lookup elements or match criteria for the advanced recipe, one
1571 *	   structure per protocol header
1572 * @bm: bitmap of field vectors to consider
1573 * @fv_list: Head of a list
1574 *
1575 * Finds all the field vector entries from switch block that contain
1576 * a given protocol ID and offset and returns a list of structures of type
1577 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1578 * definition and profile ID information
1579 * NOTE: The caller of the function is responsible for freeing the memory
1580 * allocated for every list entry.
1581 */
1582enum ice_status
1583ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
1584		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1585{
1586	struct ice_sw_fv_list_entry *fvl;
1587	struct ice_sw_fv_list_entry *tmp;
1588	struct ice_pkg_enum state;
1589	struct ice_seg *ice_seg;
1590	struct ice_fv *fv;
1591	u32 offset;
1592
1593	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1594
1595	if (!lkups->n_val_words || !hw->seg)
1596		return ICE_ERR_PARAM;
1597
1598	ice_seg = hw->seg;
1599	do {
1600		u16 i;
1601
1602		fv = (struct ice_fv *)
1603			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1604					   &offset, ice_sw_fv_handler);
1605		if (!fv)
1606			break;
1607		ice_seg = NULL;
1608
1609		/* If field vector is not in the bitmap list, then skip this
1610		 * profile.
1611		 */
1612		if (!ice_is_bit_set(bm, (u16)offset))
1613			continue;
1614
1615		for (i = 0; i < lkups->n_val_words; i++) {
1616			int j;
1617
1618			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1619				if (fv->ew[j].prot_id ==
1620				    lkups->fv_words[i].prot_id &&
1621				    fv->ew[j].off == lkups->fv_words[i].off)
1622					break;
1623			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1624				break;
1625			if (i + 1 == lkups->n_val_words) {
1626				fvl = (struct ice_sw_fv_list_entry *)
1627					ice_malloc(hw, sizeof(*fvl));
1628				if (!fvl)
1629					goto err;
1630				fvl->fv_ptr = fv;
1631				fvl->profile_id = offset;
1632				LIST_ADD(&fvl->list_entry, fv_list);
1633				break;
1634			}
1635		}
1636	} while (fv);
1637	if (LIST_EMPTY(fv_list)) {
1638		ice_warn(hw, "Required profiles not found in currently loaded DDP package");
1639		return ICE_ERR_CFG;
1640	}
1641	return ICE_SUCCESS;
1642
1643err:
1644	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1645				 list_entry) {
1646		LIST_DEL(&fvl->list_entry);
1647		ice_free(hw, fvl);
1648	}
1649
1650	return ICE_ERR_NO_MEMORY;
1651}
1652
1653/**
1654 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1655 * @hw: pointer to hardware structure
1656 */
1657void ice_init_prof_result_bm(struct ice_hw *hw)
1658{
1659	struct ice_pkg_enum state;
1660	struct ice_seg *ice_seg;
1661	struct ice_fv *fv;
1662
1663	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1664
1665	if (!hw->seg)
1666		return;
1667
1668	ice_seg = hw->seg;
1669	do {
1670		u32 off;
1671		u16 i;
1672
1673		fv = (struct ice_fv *)
1674			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1675					   &off, ice_sw_fv_handler);
1676		ice_seg = NULL;
1677		if (!fv)
1678			break;
1679
1680		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1681				ICE_MAX_FV_WORDS);
1682
1683		/* Determine empty field vector indices, these can be
1684		 * used for recipe results. Skip index 0, since it is
1685		 * always used for Switch ID.
1686		 */
1687		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1688			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1689			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1690				ice_set_bit(i,
1691					    hw->switch_info->prof_res_bm[off]);
1692	} while (fv);
1693}
1694
1695/**
1696 * ice_pkg_buf_free
1697 * @hw: pointer to the HW structure
1698 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1699 *
1700 * Frees a package buffer
1701 */
1702void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1703{
1704	ice_free(hw, bld);
1705}
1706
1707/**
1708 * ice_pkg_buf_reserve_section
1709 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1710 * @count: the number of sections to reserve
1711 *
1712 * Reserves one or more section table entries in a package buffer. This routine
1713 * can be called multiple times as long as they are made before calling
1714 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1715 * is called once, the number of sections that can be allocated will not be able
1716 * to be increased; not using all reserved sections is fine, but this will
1717 * result in some wasted space in the buffer.
1718 * Note: all package contents must be in Little Endian form.
1719 */
1720enum ice_status
1721ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1722{
1723	struct ice_buf_hdr *buf;
1724	u16 section_count;
1725	u16 data_end;
1726
1727	if (!bld)
1728		return ICE_ERR_PARAM;
1729
1730	buf = (struct ice_buf_hdr *)&bld->buf;
1731
1732	/* already an active section, can't increase table size */
1733	section_count = LE16_TO_CPU(buf->section_count);
1734	if (section_count > 0)
1735		return ICE_ERR_CFG;
1736
1737	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1738		return ICE_ERR_CFG;
1739	bld->reserved_section_table_entries += count;
1740
1741	data_end = LE16_TO_CPU(buf->data_end) +
1742		FLEX_ARRAY_SIZE(buf, section_entry, count);
1743	buf->data_end = CPU_TO_LE16(data_end);
1744
1745	return ICE_SUCCESS;
1746}
1747
1748/**
1749 * ice_pkg_buf_alloc_section
1750 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1751 * @type: the section type value
1752 * @size: the size of the section to reserve (in bytes)
1753 *
1754 * Reserves memory in the buffer for a section's content and updates the
1755 * buffers' status accordingly. This routine returns a pointer to the first
1756 * byte of the section start within the buffer, which is used to fill in the
1757 * section contents.
1758 * Note: all package contents must be in Little Endian form.
1759 */
1760void *
1761ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1762{
1763	struct ice_buf_hdr *buf;
1764	u16 sect_count;
1765	u16 data_end;
1766
1767	if (!bld || !type || !size)
1768		return NULL;
1769
1770	buf = (struct ice_buf_hdr *)&bld->buf;
1771
1772	/* check for enough space left in buffer */
1773	data_end = LE16_TO_CPU(buf->data_end);
1774
1775	/* section start must align on 4 byte boundary */
1776	data_end = ICE_ALIGN(data_end, 4);
1777
1778	if ((data_end + size) > ICE_MAX_S_DATA_END)
1779		return NULL;
1780
1781	/* check for more available section table entries */
1782	sect_count = LE16_TO_CPU(buf->section_count);
1783	if (sect_count < bld->reserved_section_table_entries) {
1784		void *section_ptr = ((u8 *)buf) + data_end;
1785
1786		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1787		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1788		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1789
1790		data_end += size;
1791		buf->data_end = CPU_TO_LE16(data_end);
1792
1793		buf->section_count = CPU_TO_LE16(sect_count + 1);
1794		return section_ptr;
1795	}
1796
1797	/* no free section table entries */
1798	return NULL;
1799}
1800
1801/**
1802 * ice_pkg_buf_alloc_single_section
1803 * @hw: pointer to the HW structure
1804 * @type: the section type value
1805 * @size: the size of the section to reserve (in bytes)
1806 * @section: returns pointer to the section
1807 *
1808 * Allocates a package buffer with a single section.
1809 * Note: all package contents must be in Little Endian form.
1810 */
1811struct ice_buf_build *
1812ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
1813				 void **section)
1814{
1815	struct ice_buf_build *buf;
1816
1817	if (!section)
1818		return NULL;
1819
1820	buf = ice_pkg_buf_alloc(hw);
1821	if (!buf)
1822		return NULL;
1823
1824	if (ice_pkg_buf_reserve_section(buf, 1))
1825		goto ice_pkg_buf_alloc_single_section_err;
1826
1827	*section = ice_pkg_buf_alloc_section(buf, type, size);
1828	if (!*section)
1829		goto ice_pkg_buf_alloc_single_section_err;
1830
1831	return buf;
1832
1833ice_pkg_buf_alloc_single_section_err:
1834	ice_pkg_buf_free(hw, buf);
1835	return NULL;
1836}
1837
1838/**
1839 * ice_pkg_buf_unreserve_section
1840 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1841 * @count: the number of sections to unreserve
1842 *
1843 * Unreserves one or more section table entries in a package buffer, releasing
1844 * space that can be used for section data. This routine can be called
1845 * multiple times as long as they are made before calling
1846 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1847 * is called once, the number of sections that can be allocated will not be able
1848 * to be increased; not using all reserved sections is fine, but this will
1849 * result in some wasted space in the buffer.
1850 * Note: all package contents must be in Little Endian form.
1851 */
1852enum ice_status
1853ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
1854{
1855	struct ice_buf_hdr *buf;
1856	u16 section_count;
1857	u16 data_end;
1858
1859	if (!bld)
1860		return ICE_ERR_PARAM;
1861
1862	buf = (struct ice_buf_hdr *)&bld->buf;
1863
1864	/* already an active section, can't decrease table size */
1865	section_count = LE16_TO_CPU(buf->section_count);
1866	if (section_count > 0)
1867		return ICE_ERR_CFG;
1868
1869	if (count > bld->reserved_section_table_entries)
1870		return ICE_ERR_CFG;
1871	bld->reserved_section_table_entries -= count;
1872
1873	data_end = LE16_TO_CPU(buf->data_end) -
1874		FLEX_ARRAY_SIZE(buf, section_entry, count);
1875	buf->data_end = CPU_TO_LE16(data_end);
1876
1877	return ICE_SUCCESS;
1878}
1879
1880/**
1881 * ice_pkg_buf_get_free_space
1882 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1883 *
1884 * Returns the number of free bytes remaining in the buffer.
1885 * Note: all package contents must be in Little Endian form.
1886 */
1887u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
1888{
1889	struct ice_buf_hdr *buf;
1890
1891	if (!bld)
1892		return 0;
1893
1894	buf = (struct ice_buf_hdr *)&bld->buf;
1895	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
1896}
1897
1898/**
1899 * ice_pkg_buf_get_active_sections
1900 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1901 *
1902 * Returns the number of active sections. Before using the package buffer
1903 * in an update package command, the caller should make sure that there is at
1904 * least one active section - otherwise, the buffer is not legal and should
1905 * not be used.
1906 * Note: all package contents must be in Little Endian form.
1907 */
1908u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1909{
1910	struct ice_buf_hdr *buf;
1911
1912	if (!bld)
1913		return 0;
1914
1915	buf = (struct ice_buf_hdr *)&bld->buf;
1916	return LE16_TO_CPU(buf->section_count);
1917}
1918
1919/**
1920 * ice_pkg_buf
1921 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1922 *
1923 * Return a pointer to the buffer's header
1924 */
1925struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1926{
1927	if (bld)
1928		return &bld->buf;
1929
1930	return NULL;
1931}
1932
1933/**
1934 * ice_find_buf_table
1935 * @ice_seg: pointer to the ice segment
1936 *
1937 * Returns the address of the buffer table within the ice segment.
1938 */
1939struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
1940{
1941	struct ice_nvm_table *nvms;
1942
1943	nvms = (struct ice_nvm_table *)
1944		(ice_seg->device_table +
1945		 LE32_TO_CPU(ice_seg->device_table_count));
1946
1947	return (_FORCE_ struct ice_buf_table *)
1948		(nvms->vers + LE32_TO_CPU(nvms->table_count));
1949}
1950
1951/**
1952 * ice_pkg_val_buf
1953 * @buf: pointer to the ice buffer
1954 *
1955 * This helper function validates a buffer's header.
1956 */
1957static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
1958{
1959	struct ice_buf_hdr *hdr;
1960	u16 section_count;
1961	u16 data_end;
1962
1963	hdr = (struct ice_buf_hdr *)buf->buf;
1964	/* verify data */
1965	section_count = LE16_TO_CPU(hdr->section_count);
1966	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
1967		return NULL;
1968
1969	data_end = LE16_TO_CPU(hdr->data_end);
1970	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
1971		return NULL;
1972
1973	return hdr;
1974}
1975
1976/**
1977 * ice_pkg_enum_buf
1978 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
1979 * @state: pointer to the enum state
1980 *
1981 * This function will enumerate all the buffers in the ice segment. The first
1982 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
1983 * ice_seg is set to NULL which continues the enumeration. When the function
1984 * returns a NULL pointer, then the end of the buffers has been reached, or an
1985 * unexpected value has been detected (for example an invalid section count or
1986 * an invalid buffer end value).
1987 */
1988struct ice_buf_hdr *
1989ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
1990{
1991	if (ice_seg) {
1992		state->buf_table = ice_find_buf_table(ice_seg);
1993		if (!state->buf_table)
1994			return NULL;
1995
1996		state->buf_idx = 0;
1997		return ice_pkg_val_buf(state->buf_table->buf_array);
1998	}
1999
2000	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
2001		return ice_pkg_val_buf(state->buf_table->buf_array +
2002				       state->buf_idx);
2003	else
2004		return NULL;
2005}
2006
2007/**
2008 * ice_pkg_advance_sect
2009 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2010 * @state: pointer to the enum state
2011 *
2012 * This helper function will advance the section within the ice segment,
2013 * also advancing the buffer if needed.
2014 */
2015bool
2016ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
2017{
2018	if (!ice_seg && !state->buf)
2019		return false;
2020
2021	if (!ice_seg && state->buf)
2022		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
2023			return true;
2024
2025	state->buf = ice_pkg_enum_buf(ice_seg, state);
2026	if (!state->buf)
2027		return false;
2028
2029	/* start of new buffer, reset section index */
2030	state->sect_idx = 0;
2031	return true;
2032}
2033
2034/**
2035 * ice_pkg_enum_section
2036 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2037 * @state: pointer to the enum state
2038 * @sect_type: section type to enumerate
2039 *
2040 * This function will enumerate all the sections of a particular type in the
2041 * ice segment. The first call is made with the ice_seg parameter non-NULL;
2042 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2043 * When the function returns a NULL pointer, then the end of the matching
2044 * sections has been reached.
2045 */
2046void *
2047ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2048		     u32 sect_type)
2049{
2050	u16 offset, size;
2051
2052	if (ice_seg)
2053		state->type = sect_type;
2054
2055	if (!ice_pkg_advance_sect(ice_seg, state))
2056		return NULL;
2057
2058	/* scan for next matching section */
2059	while (state->buf->section_entry[state->sect_idx].type !=
2060	       CPU_TO_LE32(state->type))
2061		if (!ice_pkg_advance_sect(NULL, state))
2062			return NULL;
2063
2064	/* validate section */
2065	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2066	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
2067		return NULL;
2068
2069	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
2070	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
2071		return NULL;
2072
2073	/* make sure the section fits in the buffer */
2074	if (offset + size > ICE_PKG_BUF_SIZE)
2075		return NULL;
2076
2077	state->sect_type =
2078		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
2079
2080	/* calc pointer to this section */
2081	state->sect = ((u8 *)state->buf) +
2082		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2083
2084	return state->sect;
2085}
2086
2087/**
2088 * ice_pkg_enum_entry
2089 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2090 * @state: pointer to the enum state
2091 * @sect_type: section type to enumerate
2092 * @offset: pointer to variable that receives the offset in the table (optional)
2093 * @handler: function that handles access to the entries into the section type
2094 *
2095 * This function will enumerate all the entries in particular section type in
2096 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
2097 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2098 * When the function returns a NULL pointer, then the end of the entries has
2099 * been reached.
2100 *
2101 * Since each section may have a different header and entry size, the handler
2102 * function is needed to determine the number and location entries in each
2103 * section.
2104 *
2105 * The offset parameter is optional, but should be used for sections that
2106 * contain an offset for each section table. For such cases, the section handler
2107 * function must return the appropriate offset + index to give the absolution
2108 * offset for each entry. For example, if the base for a section's header
2109 * indicates a base offset of 10, and the index for the entry is 2, then
2110 * section handler function should set the offset to 10 + 2 = 12.
2111 */
2112void *
2113ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2114		   u32 sect_type, u32 *offset,
2115		   void *(*handler)(u32 sect_type, void *section,
2116				    u32 index, u32 *offset))
2117{
2118	void *entry;
2119
2120	if (ice_seg) {
2121		if (!handler)
2122			return NULL;
2123
2124		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
2125			return NULL;
2126
2127		state->entry_idx = 0;
2128		state->handler = handler;
2129	} else {
2130		state->entry_idx++;
2131	}
2132
2133	if (!state->handler)
2134		return NULL;
2135
2136	/* get entry */
2137	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
2138			       offset);
2139	if (!entry) {
2140		/* end of a section, look for another section of this type */
2141		if (!ice_pkg_enum_section(NULL, state, 0))
2142			return NULL;
2143
2144		state->entry_idx = 0;
2145		entry = state->handler(state->sect_type, state->sect,
2146				       state->entry_idx, offset);
2147	}
2148
2149	return entry;
2150}
2151
2152/**
2153 * ice_boost_tcam_handler
2154 * @sect_type: section type
2155 * @section: pointer to section
2156 * @index: index of the boost TCAM entry to be returned
2157 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
2158 *
2159 * This is a callback function that can be passed to ice_pkg_enum_entry.
2160 * Handles enumeration of individual boost TCAM entries.
2161 */
2162static void *
2163ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
2164{
2165	struct ice_boost_tcam_section *boost;
2166
2167	if (!section)
2168		return NULL;
2169
2170	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
2171		return NULL;
2172
2173	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
2174		return NULL;
2175
2176	if (offset)
2177		*offset = 0;
2178
2179	boost = (struct ice_boost_tcam_section *)section;
2180	if (index >= LE16_TO_CPU(boost->count))
2181		return NULL;
2182
2183	return boost->tcam + index;
2184}
2185
2186/**
2187 * ice_find_boost_entry
2188 * @ice_seg: pointer to the ice segment (non-NULL)
2189 * @addr: Boost TCAM address of entry to search for
2190 * @entry: returns pointer to the entry
2191 *
2192 * Finds a particular Boost TCAM entry and returns a pointer to that entry
2193 * if it is found. The ice_seg parameter must not be NULL since the first call
2194 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
2195 */
2196static enum ice_status
2197ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
2198		     struct ice_boost_tcam_entry **entry)
2199{
2200	struct ice_boost_tcam_entry *tcam;
2201	struct ice_pkg_enum state;
2202
2203	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2204
2205	if (!ice_seg)
2206		return ICE_ERR_PARAM;
2207
2208	do {
2209		tcam = (struct ice_boost_tcam_entry *)
2210		       ice_pkg_enum_entry(ice_seg, &state,
2211					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
2212					  ice_boost_tcam_handler);
2213		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
2214			*entry = tcam;
2215			return ICE_SUCCESS;
2216		}
2217
2218		ice_seg = NULL;
2219	} while (tcam);
2220
2221	*entry = NULL;
2222	return ICE_ERR_CFG;
2223}
2224
2225/**
2226 * ice_init_pkg_hints
2227 * @hw: pointer to the HW structure
2228 * @ice_seg: pointer to the segment of the package scan (non-NULL)
2229 *
2230 * This function will scan the package and save off relevant information
2231 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
2232 * since the first call to ice_enum_labels requires a pointer to an actual
2233 * ice_seg structure.
2234 */
2235void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
2236{
2237	struct ice_pkg_enum state;
2238	char *label_name;
2239	u16 val;
2240	int i;
2241
2242	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
2243	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2244
2245	if (!ice_seg)
2246		return;
2247
2248	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
2249				     &val);
2250
2251	while (label_name) {
2252/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
2253		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
2254			/* check for a tunnel entry */
2255			ice_add_tunnel_hint(hw, label_name, val);
2256
2257		label_name = ice_enum_labels(NULL, 0, &state, &val);
2258	}
2259
2260	/* Cache the appropriate boost TCAM entry pointers for tunnels */
2261	for (i = 0; i < hw->tnl.count; i++) {
2262		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
2263				     &hw->tnl.tbl[i].boost_entry);
2264		if (hw->tnl.tbl[i].boost_entry)
2265			hw->tnl.tbl[i].valid = true;
2266	}
2267}
2268
2269/**
2270 * ice_acquire_global_cfg_lock
2271 * @hw: pointer to the HW structure
2272 * @access: access type (read or write)
2273 *
2274 * This function will request ownership of the global config lock for reading
2275 * or writing of the package. When attempting to obtain write access, the
2276 * caller must check for the following two return values:
2277 *
2278 * ICE_SUCCESS        - Means the caller has acquired the global config lock
2279 *                      and can perform writing of the package.
2280 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
2281 *                      package or has found that no update was necessary; in
2282 *                      this case, the caller can just skip performing any
2283 *                      update of the package.
2284 */
2285enum ice_status
2286ice_acquire_global_cfg_lock(struct ice_hw *hw,
2287			    enum ice_aq_res_access_type access)
2288{
2289	enum ice_status status;
2290
2291	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
2292				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2293
2294	if (status == ICE_ERR_AQ_NO_WORK)
2295		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
2296
2297	return status;
2298}
2299
2300/**
2301 * ice_release_global_cfg_lock
2302 * @hw: pointer to the HW structure
2303 *
2304 * This function will release the global config lock.
2305 */
2306void ice_release_global_cfg_lock(struct ice_hw *hw)
2307{
2308	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
2309}
2310
2311/**
2312 * ice_acquire_change_lock
2313 * @hw: pointer to the HW structure
2314 * @access: access type (read or write)
2315 *
2316 * This function will request ownership of the change lock.
2317 */
2318enum ice_status
2319ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
2320{
2321	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
2322			       ICE_CHANGE_LOCK_TIMEOUT);
2323}
2324
2325/**
2326 * ice_release_change_lock
2327 * @hw: pointer to the HW structure
2328 *
2329 * This function will release the change lock using the proper Admin Command.
2330 */
2331void ice_release_change_lock(struct ice_hw *hw)
2332{
2333	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
2334}
2335
2336/**
2337 * ice_get_set_tx_topo - get or set tx topology
2338 * @hw: pointer to the HW struct
2339 * @buf: pointer to tx topology buffer
2340 * @buf_size: buffer size
2341 * @cd: pointer to command details structure or NULL
2342 * @flags: pointer to descriptor flags
2343 * @set: 0-get, 1-set topology
2344 *
2345 * The function will get or set tx topology
2346 */
2347static enum ice_status
2348ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
2349		    struct ice_sq_cd *cd, u8 *flags, bool set)
2350{
2351	struct ice_aqc_get_set_tx_topo *cmd;
2352	struct ice_aq_desc desc;
2353	enum ice_status status;
2354
2355	cmd = &desc.params.get_set_tx_topo;
2356	if (set) {
2357		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
2358		cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
2359		/* requested to update a new topology, not a default topolgy */
2360		if (buf)
2361			cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
2362					  ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
2363	} else {
2364		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
2365		cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
2366	}
2367	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2368	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2369	if (status)
2370		return status;
2371	/* read the return flag values (first byte) for get operation */
2372	if (!set && flags)
2373		*flags = desc.params.get_set_tx_topo.set_flags;
2374
2375	return ICE_SUCCESS;
2376}
2377
2378/**
2379 * ice_cfg_tx_topo - Initialize new tx topology if available
2380 * @hw: pointer to the HW struct
2381 * @buf: pointer to Tx topology buffer
2382 * @len: buffer size
2383 *
2384 * The function will apply the new Tx topology from the package buffer
2385 * if available.
2386 */
2387enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
2388{
2389	u8 *current_topo, *new_topo = NULL;
2390	struct ice_run_time_cfg_seg *seg;
2391	struct ice_buf_hdr *section;
2392	struct ice_pkg_hdr *pkg_hdr;
2393	enum ice_ddp_state state;
2394	u16 i, size = 0, offset;
2395	enum ice_status status;
2396	u32 reg = 0;
2397	u8 flags;
2398
2399	if (!buf || !len)
2400		return ICE_ERR_PARAM;
2401
2402	/* Does FW support new Tx topology mode ? */
2403	if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
2404		ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
2405		return ICE_ERR_NOT_SUPPORTED;
2406	}
2407
2408	current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2409	if (!current_topo)
2410		return ICE_ERR_NO_MEMORY;
2411
2412	/* get the current Tx topology */
2413	status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
2414				     &flags, false);
2415	ice_free(hw, current_topo);
2416
2417	if (status) {
2418		ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
2419		return status;
2420	}
2421
2422	/* Is default topology already applied ? */
2423	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2424	    hw->num_tx_sched_layers == 9) {
2425		ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
2426		/* Already default topology is loaded */
2427		return ICE_ERR_ALREADY_EXISTS;
2428	}
2429
2430	/* Is new topology already applied ? */
2431	if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2432	    hw->num_tx_sched_layers == 5) {
2433		ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
2434		/* Already new topology is loaded */
2435		return ICE_ERR_ALREADY_EXISTS;
2436	}
2437
2438	/* Is set topology issued already ? */
2439	if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
2440		ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
2441		/* add a small delay before exiting */
2442		for (i = 0; i < 20; i++)
2443			ice_msec_delay(100, true);
2444		return ICE_ERR_ALREADY_EXISTS;
2445	}
2446
2447	/* Change the topology from new to default (5 to 9) */
2448	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2449	    hw->num_tx_sched_layers == 5) {
2450		ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
2451		goto update_topo;
2452	}
2453
2454	pkg_hdr = (struct ice_pkg_hdr *)buf;
2455	state = ice_verify_pkg(pkg_hdr, len);
2456	if (state) {
2457		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
2458			  state);
2459		return ICE_ERR_CFG;
2460	}
2461
2462	/* find run time configuration segment */
2463	seg = (struct ice_run_time_cfg_seg *)
2464		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
2465	if (!seg) {
2466		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
2467		return ICE_ERR_CFG;
2468	}
2469
2470	if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
2471		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
2472			  seg->buf_table.buf_count);
2473		return ICE_ERR_CFG;
2474	}
2475
2476	section = ice_pkg_val_buf(seg->buf_table.buf_array);
2477
2478	if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
2479		ICE_SID_TX_5_LAYER_TOPO) {
2480		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
2481		return ICE_ERR_CFG;
2482	}
2483
2484	size = LE16_TO_CPU(section->section_entry[0].size);
2485	offset = LE16_TO_CPU(section->section_entry[0].offset);
2486	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
2487		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
2488		return ICE_ERR_CFG;
2489	}
2490
2491	/* make sure the section fits in the buffer */
2492	if (offset + size > ICE_PKG_BUF_SIZE) {
2493		ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
2494		return ICE_ERR_CFG;
2495	}
2496
2497	/* Get the new topology buffer */
2498	new_topo = ((u8 *)section) + offset;
2499
2500update_topo:
2501	/* acquire global lock to make sure that set topology issued
2502	 * by one PF
2503	 */
2504	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
2505				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2506	if (status) {
2507		ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
2508		return status;
2509	}
2510
2511	/* check reset was triggered already or not */
2512	reg = rd32(hw, GLGEN_RSTAT);
2513	if (reg & GLGEN_RSTAT_DEVSTATE_M) {
2514		/* Reset is in progress, re-init the hw again */
2515		ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
2516		ice_check_reset(hw);
2517		return ICE_SUCCESS;
2518	}
2519
2520	/* set new topology */
2521	status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
2522	if (status) {
2523		ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
2524		return status;
2525	}
2526
2527	/* new topology is updated, delay 1 second before issuing the CORRER */
2528	for (i = 0; i < 10; i++)
2529		ice_msec_delay(100, true);
2530	ice_reset(hw, ICE_RESET_CORER);
2531	/* CORER will clear the global lock, so no explicit call
2532	 * required for release
2533	 */
2534	return ICE_SUCCESS;
2535}
2536