1316485Sdavidcs/*
2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc.
3316485Sdavidcs * All rights reserved.
4316485Sdavidcs *
5316485Sdavidcs *  Redistribution and use in source and binary forms, with or without
6316485Sdavidcs *  modification, are permitted provided that the following conditions
7316485Sdavidcs *  are met:
8316485Sdavidcs *
9316485Sdavidcs *  1. Redistributions of source code must retain the above copyright
10316485Sdavidcs *     notice, this list of conditions and the following disclaimer.
11316485Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12316485Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13316485Sdavidcs *     documentation and/or other materials provided with the distribution.
14316485Sdavidcs *
15316485Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16316485Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17316485Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18316485Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19316485Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20316485Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21316485Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22316485Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23316485Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24316485Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25316485Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26316485Sdavidcs *
27316485Sdavidcs * $FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_sriov.h 337519 2018-08-09 01:39:47Z davidcs $
28316485Sdavidcs *
29316485Sdavidcs */
30316485Sdavidcs
31316485Sdavidcs#ifndef __ECORE_SRIOV_H__
32316485Sdavidcs#define __ECORE_SRIOV_H__
33316485Sdavidcs
34316485Sdavidcs#include "ecore_status.h"
35316485Sdavidcs#include "ecore_vfpf_if.h"
36316485Sdavidcs#include "ecore_iov_api.h"
37316485Sdavidcs#include "ecore_hsi_common.h"
38316485Sdavidcs#include "ecore_l2.h"
39316485Sdavidcs
40316485Sdavidcs#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
41320162Sdavidcs	(MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
42316485Sdavidcs
43316485Sdavidcs/* Represents a full message. Both the request filled by VF
44316485Sdavidcs * and the response filled by the PF. The VF needs one copy
45316485Sdavidcs * of this message, it fills the request part and sends it to
46316485Sdavidcs * the PF. The PF will copy the response to the response part for
47316485Sdavidcs * the VF to later read it. The PF needs to hold a message like this
48316485Sdavidcs * per VF, the request that is copied to the PF is placed in the
49316485Sdavidcs * request size, and the response is filled by the PF before sending
50316485Sdavidcs * it to the VF.
51316485Sdavidcs */
52316485Sdavidcsstruct ecore_vf_mbx_msg {
53316485Sdavidcs	union vfpf_tlvs req;
54316485Sdavidcs	union pfvf_tlvs resp;
55316485Sdavidcs};
56316485Sdavidcs
57316485Sdavidcs/* This mailbox is maintained per VF in its PF
58316485Sdavidcs * contains all information required for sending / receiving
59316485Sdavidcs * a message
60316485Sdavidcs */
61316485Sdavidcsstruct ecore_iov_vf_mbx {
62316485Sdavidcs	union vfpf_tlvs		*req_virt;
63316485Sdavidcs	dma_addr_t		req_phys;
64316485Sdavidcs	union pfvf_tlvs		*reply_virt;
65316485Sdavidcs	dma_addr_t		reply_phys;
66316485Sdavidcs
67316485Sdavidcs	/* Address in VF where a pending message is located */
68316485Sdavidcs	dma_addr_t		pending_req;
69316485Sdavidcs
70316485Sdavidcs	/* Message from VF awaits handling */
71316485Sdavidcs	bool			b_pending_msg;
72316485Sdavidcs
73316485Sdavidcs	u8 *offset;
74316485Sdavidcs
75316485Sdavidcs#ifdef CONFIG_ECORE_SW_CHANNEL
76316485Sdavidcs	struct ecore_iov_sw_mbx sw_mbx;
77316485Sdavidcs#endif
78316485Sdavidcs
79316485Sdavidcs	/* VF GPA address */
80316485Sdavidcs	u32			vf_addr_lo;
81316485Sdavidcs	u32			vf_addr_hi;
82316485Sdavidcs
83316485Sdavidcs	struct vfpf_first_tlv	first_tlv;	/* saved VF request header */
84316485Sdavidcs
85316485Sdavidcs	u8			flags;
86316485Sdavidcs#define VF_MSG_INPROCESS	0x1	/* failsafe - the FW should prevent
87316485Sdavidcs					 * more then one pending msg
88316485Sdavidcs					 */
89316485Sdavidcs};
90316485Sdavidcs
91316485Sdavidcs#define ECORE_IOV_LEGACY_QID_RX (0)
92316485Sdavidcs#define ECORE_IOV_LEGACY_QID_TX (1)
93316485Sdavidcs#define ECORE_IOV_QID_INVALID (0xFE)
94316485Sdavidcs
95316485Sdavidcsstruct ecore_vf_queue_cid {
96316485Sdavidcs	bool b_is_tx;
97316485Sdavidcs	struct ecore_queue_cid *p_cid;
98316485Sdavidcs};
99316485Sdavidcs
100316485Sdavidcs/* Describes a qzone associated with the VF */
101316485Sdavidcsstruct ecore_vf_queue {
102316485Sdavidcs	/* Input from upper-layer, mapping relateive queue to queue-zone */
103316485Sdavidcs	u16 fw_rx_qid;
104316485Sdavidcs	u16 fw_tx_qid;
105316485Sdavidcs
106316485Sdavidcs	struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
107316485Sdavidcs};
108316485Sdavidcs
109316485Sdavidcsenum vf_state {
110316485Sdavidcs	VF_FREE		= 0,	/* VF ready to be acquired holds no resc */
111320162Sdavidcs	VF_ACQUIRED	= 1,	/* VF, aquired, but not initalized */
112316485Sdavidcs	VF_ENABLED	= 2,	/* VF, Enabled */
113316485Sdavidcs	VF_RESET	= 3,	/* VF, FLR'd, pending cleanup */
114316485Sdavidcs	VF_STOPPED      = 4     /* VF, Stopped */
115316485Sdavidcs};
116316485Sdavidcs
117316485Sdavidcsstruct ecore_vf_vlan_shadow {
118316485Sdavidcs	bool used;
119316485Sdavidcs	u16 vid;
120316485Sdavidcs};
121316485Sdavidcs
122316485Sdavidcsstruct ecore_vf_shadow_config {
123316485Sdavidcs	/* Shadow copy of all guest vlans */
124316485Sdavidcs	struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
125316485Sdavidcs
126316485Sdavidcs	/* Shadow copy of all configured MACs; Empty if forcing MACs */
127316485Sdavidcs	u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
128316485Sdavidcs	u8 inner_vlan_removal;
129316485Sdavidcs};
130316485Sdavidcs
131316485Sdavidcs/* PFs maintain an array of this structure, per VF */
132316485Sdavidcsstruct ecore_vf_info {
133316485Sdavidcs	struct ecore_iov_vf_mbx vf_mbx;
134316485Sdavidcs	enum vf_state state;
135316485Sdavidcs	bool b_init;
136316485Sdavidcs	bool b_malicious;
137316485Sdavidcs	u8			to_disable;
138316485Sdavidcs
139316485Sdavidcs	struct ecore_bulletin	bulletin;
140316485Sdavidcs	dma_addr_t		vf_bulletin;
141316485Sdavidcs
142337519Sdavidcs#ifdef CONFIG_ECORE_SW_CHANNEL
143337519Sdavidcs	/* Determine whether PF communicate with VF using HW/SW channel */
144337519Sdavidcs	bool	b_hw_channel;
145337519Sdavidcs#endif
146337519Sdavidcs
147316485Sdavidcs	/* PF saves a copy of the last VF acquire message */
148316485Sdavidcs	struct vfpf_acquire_tlv acquire;
149316485Sdavidcs
150316485Sdavidcs	u32			concrete_fid;
151316485Sdavidcs	u16			opaque_fid;
152316485Sdavidcs	u16			mtu;
153316485Sdavidcs
154316485Sdavidcs	u8			vport_id;
155316485Sdavidcs	u8			rss_eng_id;
156316485Sdavidcs	u8			relative_vf_id;
157316485Sdavidcs	u8			abs_vf_id;
158316485Sdavidcs#define ECORE_VF_ABS_ID(p_hwfn, p_vf)	(ECORE_PATH_ID(p_hwfn) ? \
159316485Sdavidcs					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
160316485Sdavidcs					 (p_vf)->abs_vf_id)
161316485Sdavidcs
162316485Sdavidcs	u8			vport_instance; /* Number of active vports */
163316485Sdavidcs	u8			num_rxqs;
164316485Sdavidcs	u8			num_txqs;
165316485Sdavidcs
166316485Sdavidcs	u16			rx_coal;
167316485Sdavidcs	u16			tx_coal;
168316485Sdavidcs
169316485Sdavidcs	u8			num_sbs;
170316485Sdavidcs
171316485Sdavidcs	u8			num_mac_filters;
172316485Sdavidcs	u8			num_vlan_filters;
173316485Sdavidcs
174316485Sdavidcs	struct ecore_vf_queue	vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
175316485Sdavidcs	u16			igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
176316485Sdavidcs
177316485Sdavidcs	/* TODO - Only windows is using it - should be removed */
178316485Sdavidcs	u8 was_malicious;
179316485Sdavidcs	u8 num_active_rxqs;
180316485Sdavidcs	void *ctx;
181316485Sdavidcs	struct ecore_public_vf_info p_vf_info;
182316485Sdavidcs	bool spoof_chk;		/* Current configured on HW */
183316485Sdavidcs	bool req_spoofchk_val;  /* Requested value */
184316485Sdavidcs
185316485Sdavidcs	/* Stores the configuration requested by VF */
186316485Sdavidcs	struct ecore_vf_shadow_config shadow_config;
187316485Sdavidcs
188316485Sdavidcs	/* A bitfield using bulletin's valid-map bits, used to indicate
189316485Sdavidcs	 * which of the bulletin board features have been configured.
190316485Sdavidcs	 */
191316485Sdavidcs	u64 configured_features;
192316485Sdavidcs#define ECORE_IOV_CONFIGURED_FEATURES_MASK	((1 << MAC_ADDR_FORCED) | \
193316485Sdavidcs						 (1 << VLAN_ADDR_FORCED))
194316485Sdavidcs};
195316485Sdavidcs
196316485Sdavidcs/* This structure is part of ecore_hwfn and used only for PFs that have sriov
197316485Sdavidcs * capability enabled.
198316485Sdavidcs */
199316485Sdavidcsstruct ecore_pf_iov {
200320162Sdavidcs	struct ecore_vf_info	vfs_array[MAX_NUM_VFS_E4];
201316485Sdavidcs	u64			pending_flr[ECORE_VF_ARRAY_LENGTH];
202316485Sdavidcs
203316485Sdavidcs#ifndef REMOVE_DBG
204316485Sdavidcs	/* This doesn't serve anything functionally, but it makes windows
205316485Sdavidcs	 * debugging of IOV related issues easier.
206316485Sdavidcs	 */
207316485Sdavidcs	u64			active_vfs[ECORE_VF_ARRAY_LENGTH];
208316485Sdavidcs#endif
209316485Sdavidcs
210316485Sdavidcs	/* Allocate message address continuosuly and split to each VF */
211316485Sdavidcs	void			*mbx_msg_virt_addr;
212316485Sdavidcs	dma_addr_t		mbx_msg_phys_addr;
213316485Sdavidcs	u32			mbx_msg_size;
214316485Sdavidcs	void			*mbx_reply_virt_addr;
215316485Sdavidcs	dma_addr_t		mbx_reply_phys_addr;
216316485Sdavidcs	u32			mbx_reply_size;
217316485Sdavidcs	void			*p_bulletins;
218316485Sdavidcs	dma_addr_t		bulletins_phys;
219316485Sdavidcs	u32			bulletins_size;
220316485Sdavidcs};
221316485Sdavidcs
222316485Sdavidcs#ifdef CONFIG_ECORE_SRIOV
223316485Sdavidcs/**
224316485Sdavidcs * @brief Read sriov related information and allocated resources
225316485Sdavidcs *  reads from configuraiton space, shmem, etc.
226316485Sdavidcs *
227316485Sdavidcs * @param p_hwfn
228316485Sdavidcs *
229316485Sdavidcs * @return enum _ecore_status_t
230316485Sdavidcs */
231316485Sdavidcsenum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
232316485Sdavidcs
233316485Sdavidcs/**
234316485Sdavidcs * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
235316485Sdavidcs *
236320162Sdavidcs * @param offset
237316485Sdavidcs * @param type
238316485Sdavidcs * @param length
239316485Sdavidcs *
240316485Sdavidcs * @return pointer to the newly placed tlv
241316485Sdavidcs */
242320162Sdavidcsvoid *ecore_add_tlv(u8 **offset, u16 type, u16 length);
243316485Sdavidcs
244316485Sdavidcs/**
245316485Sdavidcs * @brief list the types and lengths of the tlvs on the buffer
246316485Sdavidcs *
247316485Sdavidcs * @param p_hwfn
248316485Sdavidcs * @param tlvs_list
249316485Sdavidcs */
250316485Sdavidcsvoid ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
251316485Sdavidcs		       void *tlvs_list);
252316485Sdavidcs
253316485Sdavidcs/**
254316485Sdavidcs * @brief ecore_iov_alloc - allocate sriov related resources
255316485Sdavidcs *
256316485Sdavidcs * @param p_hwfn
257316485Sdavidcs *
258316485Sdavidcs * @return enum _ecore_status_t
259316485Sdavidcs */
260316485Sdavidcsenum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
261316485Sdavidcs
262316485Sdavidcs/**
263316485Sdavidcs * @brief ecore_iov_setup - setup sriov related resources
264316485Sdavidcs *
265316485Sdavidcs * @param p_hwfn
266316485Sdavidcs */
267320162Sdavidcsvoid ecore_iov_setup(struct ecore_hwfn	*p_hwfn);
268316485Sdavidcs
269316485Sdavidcs/**
270316485Sdavidcs * @brief ecore_iov_free - free sriov related resources
271316485Sdavidcs *
272316485Sdavidcs * @param p_hwfn
273316485Sdavidcs */
274316485Sdavidcsvoid ecore_iov_free(struct ecore_hwfn *p_hwfn);
275316485Sdavidcs
276316485Sdavidcs/**
277316485Sdavidcs * @brief free sriov related memory that was allocated during hw_prepare
278316485Sdavidcs *
279316485Sdavidcs * @param p_dev
280316485Sdavidcs */
281316485Sdavidcsvoid ecore_iov_free_hw_info(struct ecore_dev *p_dev);
282316485Sdavidcs
283316485Sdavidcs/**
284316485Sdavidcs * @brief Mark structs of vfs that have been FLR-ed.
285316485Sdavidcs *
286316485Sdavidcs * @param p_hwfn
287316485Sdavidcs * @param disabled_vfs - bitmask of all VFs on path that were FLRed
288316485Sdavidcs *
289320162Sdavidcs * @return true iff one of the PF's vfs got FLRed. false otherwise.
290316485Sdavidcs */
291316485Sdavidcsbool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
292316485Sdavidcs			  u32 *disabled_vfs);
293316485Sdavidcs
294316485Sdavidcs/**
295316485Sdavidcs * @brief Search extended TLVs in request/reply buffer.
296316485Sdavidcs *
297316485Sdavidcs * @param p_hwfn
298316485Sdavidcs * @param p_tlvs_list - Pointer to tlvs list
299316485Sdavidcs * @param req_type - Type of TLV
300316485Sdavidcs *
301316485Sdavidcs * @return pointer to tlv type if found, otherwise returns NULL.
302316485Sdavidcs */
303316485Sdavidcsvoid *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
304316485Sdavidcs				 void *p_tlvs_list, u16 req_type);
305316485Sdavidcs
306316485Sdavidcs/**
307316485Sdavidcs * @brief ecore_iov_get_vf_info - return the database of a
308316485Sdavidcs *        specific VF
309316485Sdavidcs *
310316485Sdavidcs * @param p_hwfn
311316485Sdavidcs * @param relative_vf_id - relative id of the VF for which info
312316485Sdavidcs *			 is requested
313316485Sdavidcs * @param b_enabled_only - false iff want to access even if vf is disabled
314316485Sdavidcs *
315316485Sdavidcs * @return struct ecore_vf_info*
316316485Sdavidcs */
317316485Sdavidcsstruct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
318316485Sdavidcs					    u16 relative_vf_id,
319316485Sdavidcs					    bool b_enabled_only);
320316485Sdavidcs#else
321320162Sdavidcsstatic OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
322337519Sdavidcsstatic OSAL_INLINE void *ecore_add_tlv(u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
323320162Sdavidcsstatic OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *tlvs_list) {}
324320162Sdavidcsstatic OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
325320162Sdavidcsstatic OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
326320162Sdavidcsstatic OSAL_INLINE void ecore_iov_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
327320162Sdavidcsstatic OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED *p_dev) {}
328320162Sdavidcsstatic OSAL_INLINE u32 ecore_crc32(u32 OSAL_UNUSED crc, u8 OSAL_UNUSED *ptr, u32 OSAL_UNUSED length) {return 0;}
329320162Sdavidcsstatic OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u32 OSAL_UNUSED *disabled_vfs) {return false;}
330320162Sdavidcsstatic OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *p_tlvs_list, u16 OSAL_UNUSED req_type) {return OSAL_NULL;}
331320162Sdavidcsstatic OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED relative_vf_id, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
332316485Sdavidcs
333316485Sdavidcs#endif
334316485Sdavidcs#endif /* __ECORE_SRIOV_H__ */
335