1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/page.h>
48#include <linux/debugfs.h>
49
50typedef unsigned int pending_ring_idx_t;
51
52struct pending_tx_info {
53	struct xen_netif_tx_request req; /* tx request */
54	unsigned int extra_count;
55	/* Callback data for released SKBs. The callback is always
56	 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
57	 * also an index in pending_tx_info array. It is initialized in
58	 * xenvif_alloc and it never changes.
59	 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
60	 * callback_struct in this array of struct pending_tx_info's, then ctx
61	 * to the next, or NULL if there is no more slot for this skb.
62	 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
63	 * to this field.
64	 */
65	struct ubuf_info_msgzc callback_struct;
66};
67
68#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
69#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
70
71struct xenvif_rx_meta {
72	int id;
73	int size;
74	int gso_type;
75	int gso_size;
76};
77
78#define GSO_BIT(type) \
79	(1 << XEN_NETIF_GSO_TYPE_ ## type)
80
81/* Discriminate from any valid pending_idx value. */
82#define INVALID_PENDING_IDX 0xFFFF
83
84#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
85
86/* The maximum number of frags is derived from the size of a grant (same
87 * as a Xen page size for now).
88 */
89#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
90
91#define NETBACK_INVALID_HANDLE -1
92
93/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
94 * the maximum slots a valid packet can use. Now this value is defined
95 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
96 * all backend.
97 */
98#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
99
100/* Queue name is interface name with "-qNNN" appended */
101#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
102
103/* IRQ name is queue name with "-tx" or "-rx" appended */
104#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
105
106struct xenvif;
107
108struct xenvif_stats {
109	/* Stats fields to be updated per-queue.
110	 * A subset of struct net_device_stats that contains only the
111	 * fields that are updated in netback.c for each queue.
112	 */
113	u64 rx_bytes;
114	u64 rx_packets;
115	u64 tx_bytes;
116	u64 tx_packets;
117
118	/* Additional stats used by xenvif */
119	unsigned long rx_gso_checksum_fixup;
120	unsigned long tx_zerocopy_sent;
121	unsigned long tx_zerocopy_success;
122	unsigned long tx_zerocopy_fail;
123	unsigned long tx_frag_overflow;
124};
125
126#define COPY_BATCH_SIZE 64
127
128struct xenvif_copy_state {
129	struct gnttab_copy op[COPY_BATCH_SIZE];
130	RING_IDX idx[COPY_BATCH_SIZE];
131	unsigned int num;
132	struct sk_buff_head *completed;
133};
134
135struct xenvif_queue { /* Per-queue data for xenvif */
136	unsigned int id; /* Queue ID, 0-based */
137	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
138	struct xenvif *vif; /* Parent VIF */
139
140	/*
141	 * TX/RX common EOI handling.
142	 * When feature-split-event-channels = 0, interrupt handler sets
143	 * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
144	 * by the RX and TX interrupt handlers.
145	 * RX and TX handler threads will issue an EOI when either
146	 * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
147	 * NETBK_TX_EOI) are set and they will reset those bits.
148	 */
149	atomic_t eoi_pending;
150#define NETBK_RX_EOI		0x01
151#define NETBK_TX_EOI		0x02
152#define NETBK_COMMON_EOI	0x04
153
154	/* Use NAPI for guest TX */
155	struct napi_struct napi;
156	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
157	unsigned int tx_irq;
158	/* Only used when feature-split-event-channels = 1 */
159	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
160	struct xen_netif_tx_back_ring tx;
161	struct sk_buff_head tx_queue;
162	struct page *mmap_pages[MAX_PENDING_REQS];
163	pending_ring_idx_t pending_prod;
164	pending_ring_idx_t pending_cons;
165	u16 pending_ring[MAX_PENDING_REQS];
166	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
167	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
168
169	struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
170	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
171	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
172	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
173	struct page *pages_to_map[MAX_PENDING_REQS];
174	struct page *pages_to_unmap[MAX_PENDING_REQS];
175
176	/* This prevents zerocopy callbacks  to race over dealloc_ring */
177	spinlock_t callback_lock;
178	/* This prevents dealloc thread and NAPI instance to race over response
179	 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
180	 * it only protect response creation
181	 */
182	spinlock_t response_lock;
183	pending_ring_idx_t dealloc_prod;
184	pending_ring_idx_t dealloc_cons;
185	u16 dealloc_ring[MAX_PENDING_REQS];
186	struct task_struct *dealloc_task;
187	wait_queue_head_t dealloc_wq;
188	atomic_t inflight_packets;
189
190	/* Use kthread for guest RX */
191	struct task_struct *task;
192	wait_queue_head_t wq;
193	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
194	unsigned int rx_irq;
195	/* Only used when feature-split-event-channels = 1 */
196	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
197	struct xen_netif_rx_back_ring rx;
198	struct sk_buff_head rx_queue;
199
200	unsigned int rx_queue_max;
201	unsigned int rx_queue_len;
202	unsigned long last_rx_time;
203	unsigned int rx_slots_needed;
204	bool stalled;
205
206	struct xenvif_copy_state rx_copy;
207
208	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
209	unsigned long   credit_bytes;
210	unsigned long   credit_usec;
211	unsigned long   remaining_credit;
212	struct timer_list credit_timeout;
213	u64 credit_window_start;
214	bool rate_limited;
215
216	/* Statistics */
217	struct xenvif_stats stats;
218};
219
220enum state_bit_shift {
221	/* This bit marks that the vif is connected */
222	VIF_STATUS_CONNECTED,
223};
224
225struct xenvif_mcast_addr {
226	struct list_head entry;
227	struct rcu_head rcu;
228	u8 addr[6];
229};
230
231#define XEN_NETBK_MCAST_MAX 64
232
233#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
234#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
235#define XEN_NETBK_HASH_TAG_SIZE 40
236
237struct xenvif_hash_cache_entry {
238	struct list_head link;
239	struct rcu_head rcu;
240	u8 tag[XEN_NETBK_HASH_TAG_SIZE];
241	unsigned int len;
242	u32 val;
243	int seq;
244};
245
246struct xenvif_hash_cache {
247	spinlock_t lock;
248	struct list_head list;
249	unsigned int count;
250	atomic_t seq;
251};
252
253struct xenvif_hash {
254	unsigned int alg;
255	u32 flags;
256	bool mapping_sel;
257	u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
258	u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
259	unsigned int size;
260	struct xenvif_hash_cache cache;
261};
262
263struct backend_info {
264	struct xenbus_device *dev;
265	struct xenvif *vif;
266
267	/* This is the state that will be reflected in xenstore when any
268	 * active hotplug script completes.
269	 */
270	enum xenbus_state state;
271
272	enum xenbus_state frontend_state;
273	struct xenbus_watch hotplug_status_watch;
274	u8 have_hotplug_status_watch:1;
275
276	const char *hotplug_script;
277};
278
279struct xenvif {
280	/* Unique identifier for this interface. */
281	domid_t          domid;
282	unsigned int     handle;
283
284	u8               fe_dev_addr[6];
285	struct list_head fe_mcast_addr;
286	unsigned int     fe_mcast_count;
287
288	/* Frontend feature information. */
289	int gso_mask;
290
291	u8 can_sg:1;
292	u8 ip_csum:1;
293	u8 ipv6_csum:1;
294	u8 multicast_control:1;
295
296	/* headroom requested by xen-netfront */
297	u16 xdp_headroom;
298
299	/* Is this interface disabled? True when backend discovers
300	 * frontend is rogue.
301	 */
302	bool disabled;
303	unsigned long status;
304	unsigned long drain_timeout;
305	unsigned long stall_timeout;
306
307	/* Queues */
308	struct xenvif_queue *queues;
309	unsigned int num_queues; /* active queues, resource allocated */
310	unsigned int stalled_queues;
311
312	struct xenvif_hash hash;
313
314	struct xenbus_watch credit_watch;
315	struct xenbus_watch mcast_ctrl_watch;
316
317	struct backend_info *be;
318
319	spinlock_t lock;
320
321#ifdef CONFIG_DEBUG_FS
322	struct dentry *xenvif_dbg_root;
323#endif
324
325	struct xen_netif_ctrl_back_ring ctrl;
326	unsigned int ctrl_irq;
327
328	/* Miscellaneous private stuff. */
329	struct net_device *dev;
330};
331
332struct xenvif_rx_cb {
333	unsigned long expires;
334	int meta_slots_used;
335};
336
337#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
338
339static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
340{
341	return to_xenbus_device(vif->dev->dev.parent);
342}
343
344void xenvif_tx_credit_callback(struct timer_list *t);
345
346struct xenvif *xenvif_alloc(struct device *parent,
347			    domid_t domid,
348			    unsigned int handle);
349
350int xenvif_init_queue(struct xenvif_queue *queue);
351void xenvif_deinit_queue(struct xenvif_queue *queue);
352
353int xenvif_connect_data(struct xenvif_queue *queue,
354			unsigned long tx_ring_ref,
355			unsigned long rx_ring_ref,
356			unsigned int tx_evtchn,
357			unsigned int rx_evtchn);
358void xenvif_disconnect_data(struct xenvif *vif);
359int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
360			unsigned int evtchn);
361void xenvif_disconnect_ctrl(struct xenvif *vif);
362void xenvif_free(struct xenvif *vif);
363
364int xenvif_xenbus_init(void);
365void xenvif_xenbus_fini(void);
366
367/* (Un)Map communication rings. */
368void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
369int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
370				   grant_ref_t tx_ring_ref,
371				   grant_ref_t rx_ring_ref);
372
373/* Check for SKBs from frontend and schedule backend processing */
374void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
375
376/* Prevent the device from generating any further traffic. */
377void xenvif_carrier_off(struct xenvif *vif);
378
379int xenvif_tx_action(struct xenvif_queue *queue, int budget);
380
381int xenvif_kthread_guest_rx(void *data);
382void xenvif_kick_thread(struct xenvif_queue *queue);
383
384int xenvif_dealloc_kthread(void *data);
385
386irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
387
388bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
389bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
390
391void xenvif_carrier_on(struct xenvif *vif);
392
393/* Callback from stack when TX packet can be released */
394void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
395			      bool zerocopy_success);
396
397static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
398{
399	return MAX_PENDING_REQS -
400		queue->pending_prod + queue->pending_cons;
401}
402
403irqreturn_t xenvif_interrupt(int irq, void *dev_id);
404
405extern bool separate_tx_rx_irq;
406extern bool provides_xdp_headroom;
407
408extern unsigned int rx_drain_timeout_msecs;
409extern unsigned int rx_stall_timeout_msecs;
410extern unsigned int xenvif_max_queues;
411extern unsigned int xenvif_hash_cache_size;
412
413#ifdef CONFIG_DEBUG_FS
414extern struct dentry *xen_netback_dbg_root;
415#endif
416
417void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
418				 struct sk_buff *skb);
419void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
420
421/* Multicast control */
422bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
423void xenvif_mcast_addr_list_free(struct xenvif *vif);
424
425/* Hash */
426void xenvif_init_hash(struct xenvif *vif);
427void xenvif_deinit_hash(struct xenvif *vif);
428
429u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
430u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
431u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
432u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
433u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
434u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
435			    u32 off);
436
437void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
438
439#ifdef CONFIG_DEBUG_FS
440void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
441#endif
442
443#endif /* __XEN_NETBACK__COMMON_H__ */
444