1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * include/linux/if_team.h - Network team device driver header
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 */
6#ifndef _LINUX_IF_TEAM_H_
7#define _LINUX_IF_TEAM_H_
8
9#include <linux/netpoll.h>
10#include <net/sch_generic.h>
11#include <linux/types.h>
12#include <uapi/linux/if_team.h>
13
14struct team_pcpu_stats {
15	u64_stats_t		rx_packets;
16	u64_stats_t		rx_bytes;
17	u64_stats_t		rx_multicast;
18	u64_stats_t		tx_packets;
19	u64_stats_t		tx_bytes;
20	struct u64_stats_sync	syncp;
21	u32			rx_dropped;
22	u32			tx_dropped;
23	u32			rx_nohandler;
24};
25
26struct team;
27
28struct team_port {
29	struct net_device *dev;
30	struct hlist_node hlist; /* node in enabled ports hash list */
31	struct list_head list; /* node in ordinary list */
32	struct team *team;
33	int index; /* index of enabled port. If disabled, it's set to -1 */
34
35	bool linkup; /* either state.linkup or user.linkup */
36
37	struct {
38		bool linkup;
39		u32 speed;
40		u8 duplex;
41	} state;
42
43	/* Values set by userspace */
44	struct {
45		bool linkup;
46		bool linkup_enabled;
47	} user;
48
49	/* Custom gennetlink interface related flags */
50	bool changed;
51	bool removed;
52
53	/*
54	 * A place for storing original values of the device before it
55	 * become a port.
56	 */
57	struct {
58		unsigned char dev_addr[MAX_ADDR_LEN];
59		unsigned int mtu;
60	} orig;
61
62#ifdef CONFIG_NET_POLL_CONTROLLER
63	struct netpoll *np;
64#endif
65
66	s32 priority; /* lower number ~ higher priority */
67	u16 queue_id;
68	struct list_head qom_list; /* node in queue override mapping list */
69	struct rcu_head	rcu;
70	long mode_priv[];
71};
72
73static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
74{
75	return rcu_dereference(dev->rx_handler_data);
76}
77
78static inline bool team_port_enabled(struct team_port *port)
79{
80	return port->index != -1;
81}
82
83static inline bool team_port_txable(struct team_port *port)
84{
85	return port->linkup && team_port_enabled(port);
86}
87
88static inline bool team_port_dev_txable(const struct net_device *port_dev)
89{
90	struct team_port *port;
91	bool txable;
92
93	rcu_read_lock();
94	port = team_port_get_rcu(port_dev);
95	txable = port ? team_port_txable(port) : false;
96	rcu_read_unlock();
97
98	return txable;
99}
100
101#ifdef CONFIG_NET_POLL_CONTROLLER
102static inline void team_netpoll_send_skb(struct team_port *port,
103					 struct sk_buff *skb)
104{
105	netpoll_send_skb(port->np, skb);
106}
107#else
108static inline void team_netpoll_send_skb(struct team_port *port,
109					 struct sk_buff *skb)
110{
111}
112#endif
113
114struct team_mode_ops {
115	int (*init)(struct team *team);
116	void (*exit)(struct team *team);
117	rx_handler_result_t (*receive)(struct team *team,
118				       struct team_port *port,
119				       struct sk_buff *skb);
120	bool (*transmit)(struct team *team, struct sk_buff *skb);
121	int (*port_enter)(struct team *team, struct team_port *port);
122	void (*port_leave)(struct team *team, struct team_port *port);
123	void (*port_change_dev_addr)(struct team *team, struct team_port *port);
124	void (*port_enabled)(struct team *team, struct team_port *port);
125	void (*port_disabled)(struct team *team, struct team_port *port);
126};
127
128extern int team_modeop_port_enter(struct team *team, struct team_port *port);
129extern void team_modeop_port_change_dev_addr(struct team *team,
130					     struct team_port *port);
131
132enum team_option_type {
133	TEAM_OPTION_TYPE_U32,
134	TEAM_OPTION_TYPE_STRING,
135	TEAM_OPTION_TYPE_BINARY,
136	TEAM_OPTION_TYPE_BOOL,
137	TEAM_OPTION_TYPE_S32,
138};
139
140struct team_option_inst_info {
141	u32 array_index;
142	struct team_port *port; /* != NULL if per-port */
143};
144
145struct team_gsetter_ctx {
146	union {
147		u32 u32_val;
148		const char *str_val;
149		struct {
150			const void *ptr;
151			u32 len;
152		} bin_val;
153		bool bool_val;
154		s32 s32_val;
155	} data;
156	struct team_option_inst_info *info;
157};
158
159struct team_option {
160	struct list_head list;
161	const char *name;
162	bool per_port;
163	unsigned int array_size; /* != 0 means the option is array */
164	enum team_option_type type;
165	void (*init)(struct team *team, struct team_option_inst_info *info);
166	void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
167	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
168};
169
170extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
171extern void team_options_change_check(struct team *team);
172
173struct team_mode {
174	const char *kind;
175	struct module *owner;
176	size_t priv_size;
177	size_t port_priv_size;
178	const struct team_mode_ops *ops;
179	enum netdev_lag_tx_type lag_tx_type;
180};
181
182#define TEAM_PORT_HASHBITS 4
183#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
184
185#define TEAM_MODE_PRIV_LONGS 4
186#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
187
188struct team {
189	struct net_device *dev; /* associated netdevice */
190	struct team_pcpu_stats __percpu *pcpu_stats;
191
192	const struct header_ops *header_ops_cache;
193
194	struct mutex lock; /* used for overall locking, e.g. port lists write */
195
196	/*
197	 * List of enabled ports and their count
198	 */
199	int en_port_count;
200	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
201
202	struct list_head port_list; /* list of all ports */
203
204	struct list_head option_list;
205	struct list_head option_inst_list; /* list of option instances */
206
207	const struct team_mode *mode;
208	struct team_mode_ops ops;
209	bool user_carrier_enabled;
210	bool queue_override_enabled;
211	struct list_head *qom_lists; /* array of queue override mapping lists */
212	bool port_mtu_change_allowed;
213	bool notifier_ctx;
214	struct {
215		unsigned int count;
216		unsigned int interval; /* in ms */
217		atomic_t count_pending;
218		struct delayed_work dw;
219	} notify_peers;
220	struct {
221		unsigned int count;
222		unsigned int interval; /* in ms */
223		atomic_t count_pending;
224		struct delayed_work dw;
225	} mcast_rejoin;
226	struct lock_class_key team_lock_key;
227	long mode_priv[TEAM_MODE_PRIV_LONGS];
228};
229
230static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
231				      struct sk_buff *skb)
232{
233	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
234		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
235	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
236
237	skb->dev = port->dev;
238	if (unlikely(netpoll_tx_running(team->dev))) {
239		team_netpoll_send_skb(port, skb);
240		return 0;
241	}
242	return dev_queue_xmit(skb);
243}
244
245static inline struct hlist_head *team_port_index_hash(struct team *team,
246						      int port_index)
247{
248	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
249}
250
251static inline struct team_port *team_get_port_by_index(struct team *team,
252						       int port_index)
253{
254	struct team_port *port;
255	struct hlist_head *head = team_port_index_hash(team, port_index);
256
257	hlist_for_each_entry(port, head, hlist)
258		if (port->index == port_index)
259			return port;
260	return NULL;
261}
262
263static inline int team_num_to_port_index(struct team *team, unsigned int num)
264{
265	int en_port_count = READ_ONCE(team->en_port_count);
266
267	if (unlikely(!en_port_count))
268		return 0;
269	return num % en_port_count;
270}
271
272static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
273							   int port_index)
274{
275	struct team_port *port;
276	struct hlist_head *head = team_port_index_hash(team, port_index);
277
278	hlist_for_each_entry_rcu(port, head, hlist)
279		if (port->index == port_index)
280			return port;
281	return NULL;
282}
283
284static inline struct team_port *
285team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
286{
287	struct team_port *cur;
288
289	if (likely(team_port_txable(port)))
290		return port;
291	cur = port;
292	list_for_each_entry_continue_rcu(cur, &team->port_list, list)
293		if (team_port_txable(cur))
294			return cur;
295	list_for_each_entry_rcu(cur, &team->port_list, list) {
296		if (cur == port)
297			break;
298		if (team_port_txable(cur))
299			return cur;
300	}
301	return NULL;
302}
303
304extern int team_options_register(struct team *team,
305				 const struct team_option *option,
306				 size_t option_count);
307extern void team_options_unregister(struct team *team,
308				    const struct team_option *option,
309				    size_t option_count);
310extern int team_mode_register(const struct team_mode *mode);
311extern void team_mode_unregister(const struct team_mode *mode);
312
313#define TEAM_DEFAULT_NUM_TX_QUEUES 16
314#define TEAM_DEFAULT_NUM_RX_QUEUES 16
315
316#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
317
318#endif /* _LINUX_IF_TEAM_H_ */
319