1/*
2 * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_ESWITCH_H__
34#define __MLX5_ESWITCH_H__
35
36#include <linux/if_ether.h>
37#include <linux/if_link.h>
38#include <linux/atomic.h>
39#include <linux/xarray.h>
40#include <net/devlink.h>
41#include <linux/mlx5/device.h>
42#include <linux/mlx5/eswitch.h>
43#include <linux/mlx5/vport.h>
44#include <linux/mlx5/fs.h>
45#include "lib/mpfs.h"
46#include "lib/fs_chains.h"
47#include "sf/sf.h"
48#include "en/tc_ct.h"
49#include "en/tc/sample.h"
50
51enum mlx5_mapped_obj_type {
52	MLX5_MAPPED_OBJ_CHAIN,
53	MLX5_MAPPED_OBJ_SAMPLE,
54	MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55	MLX5_MAPPED_OBJ_ACT_MISS,
56};
57
58struct mlx5_mapped_obj {
59	enum mlx5_mapped_obj_type type;
60	union {
61		u32 chain;
62		u64 act_miss_cookie;
63		struct {
64			u32 group_id;
65			u32 rate;
66			u32 trunc_size;
67			u32 tunnel_id;
68		} sample;
69		u32 int_port_metadata;
70	};
71};
72
73#ifdef CONFIG_MLX5_ESWITCH
74
75#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
76
77#define MLX5_MAX_UC_PER_VPORT(dev) \
78	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
79
80#define MLX5_MAX_MC_PER_VPORT(dev) \
81	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
82
83#define mlx5_esw_has_fwd_fdb(dev) \
84	MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
85
86#define esw_chains(esw) \
87	((esw)->fdb_table.offloads.esw_chains_priv)
88
89enum {
90	MAPPING_TYPE_CHAIN,
91	MAPPING_TYPE_TUNNEL,
92	MAPPING_TYPE_TUNNEL_ENC_OPTS,
93	MAPPING_TYPE_LABELS,
94	MAPPING_TYPE_ZONE,
95	MAPPING_TYPE_INT_PORT,
96};
97
98struct vport_ingress {
99	struct mlx5_flow_table *acl;
100	struct mlx5_flow_handle *allow_rule;
101	struct {
102		struct mlx5_flow_group *allow_spoofchk_only_grp;
103		struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104		struct mlx5_flow_group *allow_untagged_only_grp;
105		struct mlx5_flow_group *drop_grp;
106		struct mlx5_flow_handle *drop_rule;
107		struct mlx5_fc *drop_counter;
108	} legacy;
109	struct {
110		/* Optional group to add an FTE to do internal priority
111		 * tagging on ingress packets.
112		 */
113		struct mlx5_flow_group *metadata_prio_tag_grp;
114		/* Group to add default match-all FTE entry to tag ingress
115		 * packet with metadata.
116		 */
117		struct mlx5_flow_group *metadata_allmatch_grp;
118		/* Optional group to add a drop all rule */
119		struct mlx5_flow_group *drop_grp;
120		struct mlx5_modify_hdr *modify_metadata;
121		struct mlx5_flow_handle *modify_metadata_rule;
122		struct mlx5_flow_handle *drop_rule;
123	} offloads;
124};
125
126enum vport_egress_acl_type {
127	VPORT_EGRESS_ACL_TYPE_DEFAULT,
128	VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
129};
130
131struct vport_egress {
132	struct mlx5_flow_table *acl;
133	enum vport_egress_acl_type type;
134	struct mlx5_flow_handle  *allowed_vlan;
135	struct mlx5_flow_group *vlan_grp;
136	union {
137		struct {
138			struct mlx5_flow_group *drop_grp;
139			struct mlx5_flow_handle *drop_rule;
140			struct mlx5_fc *drop_counter;
141		} legacy;
142		struct {
143			struct mlx5_flow_group *fwd_grp;
144			struct mlx5_flow_handle *fwd_rule;
145			struct xarray bounce_rules;
146			struct mlx5_flow_group *bounce_grp;
147		} offloads;
148	};
149};
150
151struct mlx5_vport_drop_stats {
152	u64 rx_dropped;
153	u64 tx_dropped;
154};
155
156struct mlx5_vport_info {
157	u8                      mac[ETH_ALEN];
158	u16                     vlan;
159	u64                     node_guid;
160	int                     link_state;
161	u8                      qos;
162	u8                      spoofchk: 1;
163	u8                      trusted: 1;
164	u8                      roce_enabled: 1;
165	u8                      mig_enabled: 1;
166	u8                      ipsec_crypto_enabled: 1;
167	u8                      ipsec_packet_enabled: 1;
168};
169
170/* Vport context events */
171enum mlx5_eswitch_vport_event {
172	MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
173	MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
174	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
175};
176
177struct mlx5_vport;
178
179struct mlx5_devlink_port {
180	struct devlink_port dl_port;
181	struct mlx5_vport *vport;
182};
183
184static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
185					  struct mlx5_vport *vport)
186{
187	dl_port->vport = vport;
188}
189
190static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
191{
192	return container_of(dl_port, struct mlx5_devlink_port, dl_port);
193}
194
195static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
196{
197	return mlx5_devlink_port_get(dl_port)->vport;
198}
199
200struct mlx5_vport {
201	struct mlx5_core_dev    *dev;
202	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
203	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
204	struct mlx5_flow_handle *promisc_rule;
205	struct mlx5_flow_handle *allmulti_rule;
206	struct work_struct      vport_change_handler;
207
208	struct vport_ingress    ingress;
209	struct vport_egress     egress;
210	u32                     default_metadata;
211	u32                     metadata;
212
213	struct mlx5_vport_info  info;
214
215	struct {
216		bool            enabled;
217		u32             esw_tsar_ix;
218		u32             bw_share;
219		u32 min_rate;
220		u32 max_rate;
221		struct mlx5_esw_rate_group *group;
222	} qos;
223
224	u16 vport;
225	bool                    enabled;
226	enum mlx5_eswitch_vport_event enabled_events;
227	int index;
228	struct mlx5_devlink_port *dl_port;
229};
230
231struct mlx5_esw_indir_table;
232
233struct mlx5_eswitch_fdb {
234	union {
235		struct legacy_fdb {
236			struct mlx5_flow_table *fdb;
237			struct mlx5_flow_group *addr_grp;
238			struct mlx5_flow_group *allmulti_grp;
239			struct mlx5_flow_group *promisc_grp;
240			struct mlx5_flow_table *vepa_fdb;
241			struct mlx5_flow_handle *vepa_uplink_rule;
242			struct mlx5_flow_handle *vepa_star_rule;
243		} legacy;
244
245		struct offloads_fdb {
246			struct mlx5_flow_namespace *ns;
247			struct mlx5_flow_table *tc_miss_table;
248			struct mlx5_flow_table *slow_fdb;
249			struct mlx5_flow_group *send_to_vport_grp;
250			struct mlx5_flow_group *send_to_vport_meta_grp;
251			struct mlx5_flow_group *peer_miss_grp;
252			struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
253			struct mlx5_flow_group *miss_grp;
254			struct mlx5_flow_handle **send_to_vport_meta_rules;
255			struct mlx5_flow_handle *miss_rule_uni;
256			struct mlx5_flow_handle *miss_rule_multi;
257
258			struct mlx5_fs_chains *esw_chains_priv;
259			struct {
260				DECLARE_HASHTABLE(table, 8);
261				/* Protects vports.table */
262				struct mutex lock;
263			} vports;
264
265			struct mlx5_esw_indir_table *indir;
266
267		} offloads;
268	};
269	u32 flags;
270};
271
272struct mlx5_esw_offload {
273	struct mlx5_flow_table *ft_offloads_restore;
274	struct mlx5_flow_group *restore_group;
275	struct mlx5_modify_hdr *restore_copy_hdr_id;
276	struct mapping_ctx *reg_c0_obj_pool;
277
278	struct mlx5_flow_table *ft_offloads;
279	struct mlx5_flow_group *vport_rx_group;
280	struct mlx5_flow_group *vport_rx_drop_group;
281	struct mlx5_flow_handle *vport_rx_drop_rule;
282	struct mlx5_flow_table *ft_ipsec_tx_pol;
283	struct xarray vport_reps;
284	struct list_head peer_flows[MLX5_MAX_PORTS];
285	struct mutex peer_mutex;
286	struct mutex encap_tbl_lock; /* protects encap_tbl */
287	DECLARE_HASHTABLE(encap_tbl, 8);
288	struct mutex decap_tbl_lock; /* protects decap_tbl */
289	DECLARE_HASHTABLE(decap_tbl, 8);
290	struct mod_hdr_tbl mod_hdr;
291	DECLARE_HASHTABLE(termtbl_tbl, 8);
292	struct mutex termtbl_mutex; /* protects termtbl hash */
293	struct xarray vhca_map;
294	const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
295	u8 inline_mode;
296	atomic64_t num_flows;
297	u64 num_block_encap;
298	u64 num_block_mode;
299	enum devlink_eswitch_encap_mode encap;
300	struct ida vport_metadata_ida;
301	unsigned int host_number; /* ECPF supports one external host */
302};
303
304/* E-Switch MC FDB table hash node */
305struct esw_mc_addr { /* SRIOV only */
306	struct l2addr_node     node;
307	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
308	u32                    refcnt;
309};
310
311struct mlx5_host_work {
312	struct work_struct	work;
313	struct mlx5_eswitch	*esw;
314};
315
316struct mlx5_esw_functions {
317	struct mlx5_nb		nb;
318	u16			num_vfs;
319	u16			num_ec_vfs;
320};
321
322enum {
323	MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
324	MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
325	MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
326};
327
328struct mlx5_esw_bridge_offloads;
329
330enum {
331	MLX5_ESW_FDB_CREATED = BIT(0),
332};
333
334struct dentry;
335
336struct mlx5_eswitch {
337	struct mlx5_core_dev    *dev;
338	struct mlx5_nb          nb;
339	struct mlx5_eswitch_fdb fdb_table;
340	/* legacy data structures */
341	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
342	struct esw_mc_addr mc_promisc;
343	/* end of legacy */
344	struct dentry *debugfs_root;
345	struct workqueue_struct *work_queue;
346	struct xarray vports;
347	u32 flags;
348	int                     total_vports;
349	int                     enabled_vports;
350	/* Synchronize between vport change events
351	 * and async SRIOV admin state changes
352	 */
353	struct mutex            state_lock;
354
355	/* Protects eswitch mode change that occurs via one or more
356	 * user commands, i.e. sriov state change, devlink commands.
357	 */
358	struct rw_semaphore mode_lock;
359	atomic64_t user_count;
360
361	struct {
362		u32             root_tsar_ix;
363		struct mlx5_esw_rate_group *group0;
364		struct list_head groups; /* Protected by esw->state_lock */
365
366		/* Protected by esw->state_lock.
367		 * Initially 0, meaning no QoS users and QoS is disabled.
368		 */
369		refcount_t refcnt;
370	} qos;
371
372	struct mlx5_esw_bridge_offloads *br_offloads;
373	struct mlx5_esw_offload offloads;
374	int                     mode;
375	u16                     manager_vport;
376	u16                     first_host_vport;
377	u8			num_peers;
378	struct mlx5_esw_functions esw_funcs;
379	struct {
380		u32             large_group_num;
381	}  params;
382	struct blocking_notifier_head n_head;
383	struct xarray paired;
384	struct mlx5_devcom_comp_dev *devcom;
385	u16 enabled_ipsec_vf_count;
386	bool eswitch_operation_in_progress;
387};
388
389void esw_offloads_disable(struct mlx5_eswitch *esw);
390int esw_offloads_enable(struct mlx5_eswitch *esw);
391void esw_offloads_cleanup(struct mlx5_eswitch *esw);
392int esw_offloads_init(struct mlx5_eswitch *esw);
393
394struct mlx5_flow_handle *
395mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
396void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
397
398bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
399u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
400void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
401
402int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
403
404/* E-Switch API */
405int mlx5_eswitch_init(struct mlx5_core_dev *dev);
406void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
407
408#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
409int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
410int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
411void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
412void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
413void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
414void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
415void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
416bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
417int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
418			       u16 vport, const u8 *mac);
419int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
420				 u16 vport, int link_state);
421int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
422				u16 vport, u16 vlan, u8 qos);
423int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
424				    u16 vport, bool spoofchk);
425int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
426				 u16 vport_num, bool setting);
427int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
428				u32 max_rate, u32 min_rate);
429int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
430				    struct mlx5_vport *vport,
431				    struct mlx5_esw_rate_group *group,
432				    struct netlink_ext_ack *extack);
433int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
434int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
435int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
436				  u16 vport, struct ifla_vf_info *ivi);
437int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
438				 u16 vport,
439				 struct ifla_vf_stats *vf_stats);
440void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
441
442int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
443					  bool other_vport, void *in);
444
445struct mlx5_flow_spec;
446struct mlx5_esw_flow_attr;
447struct mlx5_termtbl_handle;
448
449bool
450mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
451			      struct mlx5_flow_attr *attr,
452			      struct mlx5_flow_act *flow_act,
453			      struct mlx5_flow_spec *spec);
454
455struct mlx5_flow_handle *
456mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
457			      struct mlx5_flow_table *ft,
458			      struct mlx5_flow_spec *spec,
459			      struct mlx5_esw_flow_attr *attr,
460			      struct mlx5_flow_act *flow_act,
461			      struct mlx5_flow_destination *dest,
462			      int num_dest);
463
464void
465mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
466			 struct mlx5_termtbl_handle *tt);
467
468void
469mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
470
471struct mlx5_flow_handle *
472mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
473				struct mlx5_flow_spec *spec,
474				struct mlx5_flow_attr *attr);
475struct mlx5_flow_handle *
476mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
477			  struct mlx5_flow_spec *spec,
478			  struct mlx5_flow_attr *attr);
479void
480mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
481				struct mlx5_flow_handle *rule,
482				struct mlx5_flow_attr *attr);
483void
484mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
485			  struct mlx5_flow_handle *rule,
486			  struct mlx5_flow_attr *attr);
487
488struct mlx5_flow_handle *
489mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
490				  struct mlx5_flow_destination *dest);
491
492enum {
493	SET_VLAN_STRIP	= BIT(0),
494	SET_VLAN_INSERT	= BIT(1)
495};
496
497enum mlx5_flow_match_level {
498	MLX5_MATCH_NONE	= MLX5_INLINE_MODE_NONE,
499	MLX5_MATCH_L2	= MLX5_INLINE_MODE_L2,
500	MLX5_MATCH_L3	= MLX5_INLINE_MODE_IP,
501	MLX5_MATCH_L4	= MLX5_INLINE_MODE_TCP_UDP,
502};
503
504/* current maximum for flow based vport multicasting */
505#define MLX5_MAX_FLOW_FWD_VPORTS 32
506
507enum {
508	MLX5_ESW_DEST_ENCAP         = BIT(0),
509	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
510	MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE  = BIT(2),
511};
512
513struct mlx5_esw_flow_attr {
514	struct mlx5_eswitch_rep *in_rep;
515	struct mlx5_core_dev	*in_mdev;
516	struct mlx5_core_dev    *counter_dev;
517	struct mlx5e_tc_int_port *dest_int_port;
518	struct mlx5e_tc_int_port *int_port;
519
520	int split_count;
521	int out_count;
522
523	__be16	vlan_proto[MLX5_FS_VLAN_DEPTH];
524	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
525	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
526	u8	total_vlan;
527	struct {
528		u32 flags;
529		bool vport_valid;
530		u16 vport;
531		struct mlx5_pkt_reformat *pkt_reformat;
532		struct mlx5_core_dev *mdev;
533		struct mlx5_termtbl_handle *termtbl;
534		int src_port_rewrite_act_id;
535	} dests[MLX5_MAX_FLOW_FWD_VPORTS];
536	struct mlx5_rx_tun_attr *rx_tun_attr;
537	struct ethhdr eth;
538	struct mlx5_pkt_reformat *decap_pkt_reformat;
539};
540
541int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
542				  struct netlink_ext_ack *extack);
543int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
544int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
545					 struct netlink_ext_ack *extack);
546int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
547int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
548					enum devlink_eswitch_encap_mode encap,
549					struct netlink_ext_ack *extack);
550int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
551					enum devlink_eswitch_encap_mode *encap);
552int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
553				     u8 *hw_addr, int *hw_addr_len,
554				     struct netlink_ext_ack *extack);
555int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
556				     const u8 *hw_addr, int hw_addr_len,
557				     struct netlink_ext_ack *extack);
558int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
559				  struct netlink_ext_ack *extack);
560int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
561				  struct netlink_ext_ack *extack);
562int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
563					struct netlink_ext_ack *extack);
564int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
565					struct netlink_ext_ack *extack);
566#ifdef CONFIG_XFRM_OFFLOAD
567int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
568					  struct netlink_ext_ack *extack);
569int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
570					  struct netlink_ext_ack *extack);
571int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
572					  struct netlink_ext_ack *extack);
573int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
574					  struct netlink_ext_ack *extack);
575#endif /* CONFIG_XFRM_OFFLOAD */
576void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
577
578int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
579				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
580
581static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
582{
583	return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
584		MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
585}
586
587static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
588						       u8 vlan_depth)
589{
590	bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
591		   MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
592
593	if (vlan_depth == 1)
594		return ret;
595
596	return  ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
597		MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
598}
599
600bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
601			       struct mlx5_core_dev *dev1);
602
603const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
604
605#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
606
607#define esw_info(__dev, format, ...)			\
608	dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
609
610#define esw_warn(__dev, format, ...)			\
611	dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
612
613#define esw_debug(dev, format, ...)				\
614	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
615
616static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
617{
618	return esw && MLX5_ESWITCH_MANAGER(esw->dev);
619}
620
621static inline bool
622mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
623{
624	return esw->manager_vport == vport_num;
625}
626
627static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
628				     u16 esw_owner_vhca_id)
629{
630	return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
631		(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
632}
633
634static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
635{
636	return mlx5_core_is_ecpf_esw_manager(dev) ?
637		MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
638}
639
640static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
641{
642	return mlx5_core_is_ecpf_esw_manager(dev);
643}
644
645static inline unsigned int
646mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
647				     u16 vport_num)
648{
649	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
650}
651
652static inline u16
653mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
654{
655	return dl_port_index & 0xffff;
656}
657
658static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
659{
660	return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
661}
662
663/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
664void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
665
666/* Each mark identifies eswitch vport type.
667 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
668 * a single mark.
669 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
670 * MLX5_ESW_VPT_SF identifies SF vport.
671 */
672#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
673#define MLX5_ESW_VPT_VF XA_MARK_1
674#define MLX5_ESW_VPT_SF XA_MARK_2
675
676/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
677 * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
678 */
679
680#define mlx5_esw_for_each_vport(esw, index, vport) \
681	xa_for_each(&((esw)->vports), index, vport)
682
683#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter)	\
684	for (index = 0, entry = xa_find(xa, &index, last, filter); \
685	     entry; entry = xa_find_after(xa, &index, last, filter))
686
687#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter)	\
688	mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
689
690#define mlx5_esw_for_each_vf_vport(esw, index, vport, last)	\
691	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
692
693#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last)	\
694	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
695
696/* This macro should only be used if EC SRIOV is enabled.
697 *
698 * Because there were no more marks available on the xarray this uses a
699 * for_each_range approach. The range is only valid when EC SRIOV is enabled
700 */
701#define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last)		\
702	xa_for_each_range(&((esw)->vports),				\
703			  index,					\
704			  vport,					\
705			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base),	\
706			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
707			  (last) - 1)
708
709struct mlx5_eswitch *__must_check
710mlx5_devlink_eswitch_get(struct devlink *devlink);
711
712struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
713
714struct mlx5_vport *__must_check
715mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
716
717bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
718bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
719bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
720
721int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
722
723int
724mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
725				 enum mlx5_eswitch_vport_event enabled_events);
726void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
727
728int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
729			  enum mlx5_eswitch_vport_event enabled_events);
730void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
731
732int
733esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
734				     struct mlx5_vport *vport);
735void
736esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
737				      struct mlx5_vport *vport);
738
739struct esw_vport_tbl_namespace {
740	int max_fte;
741	int max_num_groups;
742	u32 flags;
743};
744
745struct mlx5_vport_tbl_attr {
746	u32 chain;
747	u16 prio;
748	u16 vport;
749	struct esw_vport_tbl_namespace *vport_ns;
750};
751
752struct mlx5_flow_table *
753mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
754void
755mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
756
757struct mlx5_flow_handle *
758esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
759
760void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
761					 u32 *flow_group_in,
762					 int match_params);
763
764void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
765				   u16 vport,
766				   struct mlx5_flow_spec *spec);
767
768int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
769void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
770
771int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
772				  struct mlx5_devlink_port *dl_port,
773				  u32 controller, u32 sfnum);
774void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
775
776int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
777void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
778
779int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
780			       enum mlx5_eswitch_vport_event enabled_events,
781			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
782void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
783
784int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
785				enum mlx5_eswitch_vport_event enabled_events);
786void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
787
788int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
789					      struct mlx5_vport *vport);
790void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
791						  struct mlx5_vport *vport);
792
793int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
794					   struct mlx5_devlink_port *dl_port,
795					   u32 controller, u32 sfnum);
796void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
797
798int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
799void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
800struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
801
802int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
803
804int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
805void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
806int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
807
808/**
809 * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
810 *
811 * @new_mode: New mode of eswitch.
812 */
813struct mlx5_esw_event_info {
814	u16 new_mode;
815};
816
817int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
818void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
819
820bool mlx5_esw_hold(struct mlx5_core_dev *dev);
821void mlx5_esw_release(struct mlx5_core_dev *dev);
822void mlx5_esw_get(struct mlx5_core_dev *dev);
823void mlx5_esw_put(struct mlx5_core_dev *dev);
824int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
825int mlx5_esw_lock(struct mlx5_eswitch *esw);
826void mlx5_esw_unlock(struct mlx5_eswitch *esw);
827
828void esw_vport_change_handle_locked(struct mlx5_vport *vport);
829
830bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
831
832int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
833					     struct mlx5_eswitch *slave_esw, int max_slaves);
834void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
835					      struct mlx5_eswitch *slave_esw);
836int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
837
838bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
839void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
840
841int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
842void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
843
844static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
845{
846	if (mlx5_esw_allowed(esw))
847		return esw->esw_funcs.num_vfs;
848
849	return 0;
850}
851
852static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
853{
854	if (mlx5_esw_allowed(esw))
855		return esw->num_peers;
856	return 0;
857}
858
859static inline struct mlx5_flow_table *
860mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
861{
862	return esw->fdb_table.offloads.slow_fdb;
863}
864
865int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
866				    struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
867bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
868void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
869bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
870int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
871				  struct mlx5_vport *vport);
872int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
873					       u16 vport_num);
874int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
875					 bool enable);
876int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
877					 bool enable);
878int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
879					       u16 vport_num);
880void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
881void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
882
883#else  /* CONFIG_MLX5_ESWITCH */
884/* eswitch API stubs */
885static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
886static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
887static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
888static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
889static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
890static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
891static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
892static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
893static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
894static inline
895int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
896static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
897{
898	return ERR_PTR(-EOPNOTSUPP);
899}
900
901static inline struct mlx5_flow_handle *
902esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
903{
904	return ERR_PTR(-EOPNOTSUPP);
905}
906
907static inline unsigned int
908mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
909				     u16 vport_num)
910{
911	return vport_num;
912}
913
914static inline int
915mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
916					 struct mlx5_eswitch *slave_esw, int max_slaves)
917{
918	return 0;
919}
920
921static inline void
922mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
923					 struct mlx5_eswitch *slave_esw) {}
924
925static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
926
927static inline int
928mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
929{
930	return 0;
931}
932
933static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
934{
935	return true;
936}
937
938static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
939{
940}
941
942static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
943static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
944static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
945{
946	return false;
947}
948
949static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
950#endif /* CONFIG_MLX5_ESWITCH */
951
952#endif /* __MLX5_ESWITCH_H__ */
953