io_uring.c revision b97e736a
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 *	git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <net/compat.h>
48#include <linux/refcount.h>
49#include <linux/uio.h>
50#include <linux/bits.h>
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
60#include <linux/blkdev.h>
61#include <linux/bvec.h>
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
65#include <net/scm.h>
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
72#include <linux/highmem.h>
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
75#include <linux/fadvise.h>
76#include <linux/eventpoll.h>
77#include <linux/splice.h>
78#include <linux/task_work.h>
79#include <linux/pagemap.h>
80#include <linux/io_uring.h>
81#include <linux/tracehook.h>
82
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
89#include "io-wq.h"
90
91#define IORING_MAX_ENTRIES	32768
92#define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
93#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
94
95/* 512 entries per page on 64-bit archs, 64 pages max */
96#define IORING_MAX_FIXED_FILES	(1U << 15)
97#define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
98				 IORING_REGISTER_LAST + IORING_OP_LAST)
99
100#define IO_RSRC_TAG_TABLE_SHIFT	9
101#define IO_RSRC_TAG_TABLE_MAX	(1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK	(IO_RSRC_TAG_TABLE_MAX - 1)
103
104#define IORING_MAX_REG_BUFFERS	(1U << 14)
105
106#define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
107				IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108				IOSQE_BUFFER_SELECT)
109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
111
112#define IO_TCTX_REFS_CACHE_NR	(1U << 10)
113
114struct io_uring {
115	u32 head ____cacheline_aligned_in_smp;
116	u32 tail ____cacheline_aligned_in_smp;
117};
118
119/*
120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
126struct io_rings {
127	/*
128	 * Head and tail offsets into the ring; the offsets need to be
129	 * masked to get valid indices.
130	 *
131	 * The kernel controls head of the sq ring and the tail of the cq ring,
132	 * and the application controls tail of the sq ring and the head of the
133	 * cq ring.
134	 */
135	struct io_uring		sq, cq;
136	/*
137	 * Bitmasks to apply to head and tail offsets (constant, equals
138	 * ring_entries - 1)
139	 */
140	u32			sq_ring_mask, cq_ring_mask;
141	/* Ring sizes (constant, power of 2) */
142	u32			sq_ring_entries, cq_ring_entries;
143	/*
144	 * Number of invalid entries dropped by the kernel due to
145	 * invalid index stored in array
146	 *
147	 * Written by the kernel, shouldn't be modified by the
148	 * application (i.e. get number of "new events" by comparing to
149	 * cached value).
150	 *
151	 * After a new SQ head value was read by the application this
152	 * counter includes all submissions that were dropped reaching
153	 * the new SQ head (and possibly more).
154	 */
155	u32			sq_dropped;
156	/*
157	 * Runtime SQ flags
158	 *
159	 * Written by the kernel, shouldn't be modified by the
160	 * application.
161	 *
162	 * The application needs a full memory barrier before checking
163	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164	 */
165	u32			sq_flags;
166	/*
167	 * Runtime CQ flags
168	 *
169	 * Written by the application, shouldn't be modified by the
170	 * kernel.
171	 */
172	u32			cq_flags;
173	/*
174	 * Number of completion events lost because the queue was full;
175	 * this should be avoided by the application by making sure
176	 * there are not more requests pending than there is space in
177	 * the completion queue.
178	 *
179	 * Written by the kernel, shouldn't be modified by the
180	 * application (i.e. get number of "new events" by comparing to
181	 * cached value).
182	 *
183	 * As completion events come in out of order this counter is not
184	 * ordered with any other data.
185	 */
186	u32			cq_overflow;
187	/*
188	 * Ring buffer of completion events.
189	 *
190	 * The kernel writes completion events fresh every time they are
191	 * produced, so the application is allowed to modify pending
192	 * entries.
193	 */
194	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
195};
196
197enum io_uring_cmd_flags {
198	IO_URING_F_NONBLOCK		= 1,
199	IO_URING_F_COMPLETE_DEFER	= 2,
200};
201
202struct io_mapped_ubuf {
203	u64		ubuf;
204	u64		ubuf_end;
205	unsigned int	nr_bvecs;
206	unsigned long	acct_pages;
207	struct bio_vec	bvec[];
208};
209
210struct io_ring_ctx;
211
212struct io_overflow_cqe {
213	struct io_uring_cqe cqe;
214	struct list_head list;
215};
216
217struct io_fixed_file {
218	/* file * with additional FFS_* flags */
219	unsigned long file_ptr;
220};
221
222struct io_rsrc_put {
223	struct list_head list;
224	u64 tag;
225	union {
226		void *rsrc;
227		struct file *file;
228		struct io_mapped_ubuf *buf;
229	};
230};
231
232struct io_file_table {
233	struct io_fixed_file *files;
234};
235
236struct io_rsrc_node {
237	struct percpu_ref		refs;
238	struct list_head		node;
239	struct list_head		rsrc_list;
240	struct io_rsrc_data		*rsrc_data;
241	struct llist_node		llist;
242	bool				done;
243};
244
245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
247struct io_rsrc_data {
248	struct io_ring_ctx		*ctx;
249
250	u64				**tags;
251	unsigned int			nr;
252	rsrc_put_fn			*do_put;
253	atomic_t			refs;
254	struct completion		done;
255	bool				quiesce;
256};
257
258struct io_buffer {
259	struct list_head list;
260	__u64 addr;
261	__u32 len;
262	__u16 bid;
263};
264
265struct io_restriction {
266	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268	u8 sqe_flags_allowed;
269	u8 sqe_flags_required;
270	bool registered;
271};
272
273enum {
274	IO_SQ_THREAD_SHOULD_STOP = 0,
275	IO_SQ_THREAD_SHOULD_PARK,
276};
277
278struct io_sq_data {
279	refcount_t		refs;
280	atomic_t		park_pending;
281	struct mutex		lock;
282
283	/* ctx's that are using this sqd */
284	struct list_head	ctx_list;
285
286	struct task_struct	*thread;
287	struct wait_queue_head	wait;
288
289	unsigned		sq_thread_idle;
290	int			sq_cpu;
291	pid_t			task_pid;
292	pid_t			task_tgid;
293
294	unsigned long		state;
295	struct completion	exited;
296};
297
298#define IO_COMPL_BATCH			32
299#define IO_REQ_CACHE_SIZE		32
300#define IO_REQ_ALLOC_BATCH		8
301
302struct io_submit_link {
303	struct io_kiocb		*head;
304	struct io_kiocb		*last;
305};
306
307struct io_submit_state {
308	struct blk_plug		plug;
309	struct io_submit_link	link;
310
311	/*
312	 * io_kiocb alloc cache
313	 */
314	void			*reqs[IO_REQ_CACHE_SIZE];
315	unsigned int		free_reqs;
316
317	bool			plug_started;
318
319	/*
320	 * Batch completion logic
321	 */
322	struct io_kiocb		*compl_reqs[IO_COMPL_BATCH];
323	unsigned int		compl_nr;
324	/* inline/task_work completion list, under ->uring_lock */
325	struct list_head	free_list;
326
327	unsigned int		ios_left;
328};
329
330struct io_ring_ctx {
331	/* const or read-mostly hot data */
332	struct {
333		struct percpu_ref	refs;
334
335		struct io_rings		*rings;
336		unsigned int		flags;
337		unsigned int		compat: 1;
338		unsigned int		drain_next: 1;
339		unsigned int		eventfd_async: 1;
340		unsigned int		restricted: 1;
341		unsigned int		off_timeout_used: 1;
342		unsigned int		drain_active: 1;
343	} ____cacheline_aligned_in_smp;
344
345	/* submission data */
346	struct {
347		struct mutex		uring_lock;
348
349		/*
350		 * Ring buffer of indices into array of io_uring_sqe, which is
351		 * mmapped by the application using the IORING_OFF_SQES offset.
352		 *
353		 * This indirection could e.g. be used to assign fixed
354		 * io_uring_sqe entries to operations and only submit them to
355		 * the queue when needed.
356		 *
357		 * The kernel modifies neither the indices array nor the entries
358		 * array.
359		 */
360		u32			*sq_array;
361		struct io_uring_sqe	*sq_sqes;
362		unsigned		cached_sq_head;
363		unsigned		sq_entries;
364		struct list_head	defer_list;
365
366		/*
367		 * Fixed resources fast path, should be accessed only under
368		 * uring_lock, and updated through io_uring_register(2)
369		 */
370		struct io_rsrc_node	*rsrc_node;
371		struct io_file_table	file_table;
372		unsigned		nr_user_files;
373		unsigned		nr_user_bufs;
374		struct io_mapped_ubuf	**user_bufs;
375
376		struct io_submit_state	submit_state;
377		struct list_head	timeout_list;
378		struct list_head	cq_overflow_list;
379		struct xarray		io_buffers;
380		struct xarray		personalities;
381		u32			pers_next;
382		unsigned		sq_thread_idle;
383	} ____cacheline_aligned_in_smp;
384
385	/* IRQ completion list, under ->completion_lock */
386	struct list_head	locked_free_list;
387	unsigned int		locked_free_nr;
388
389	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
390	struct io_sq_data	*sq_data;	/* if using sq thread polling */
391
392	struct wait_queue_head	sqo_sq_wait;
393	struct list_head	sqd_list;
394
395	unsigned long		check_cq_overflow;
396
397	struct {
398		unsigned		cached_cq_tail;
399		unsigned		cq_entries;
400		struct eventfd_ctx	*cq_ev_fd;
401		struct wait_queue_head	poll_wait;
402		struct wait_queue_head	cq_wait;
403		unsigned		cq_extra;
404		atomic_t		cq_timeouts;
405		struct fasync_struct	*cq_fasync;
406		unsigned		cq_last_tm_flush;
407	} ____cacheline_aligned_in_smp;
408
409	struct {
410		spinlock_t		completion_lock;
411
412		spinlock_t		timeout_lock;
413
414		/*
415		 * ->iopoll_list is protected by the ctx->uring_lock for
416		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417		 * For SQPOLL, only the single threaded io_sq_thread() will
418		 * manipulate the list, hence no extra locking is needed there.
419		 */
420		struct list_head	iopoll_list;
421		struct hlist_head	*cancel_hash;
422		unsigned		cancel_hash_bits;
423		bool			poll_multi_queue;
424	} ____cacheline_aligned_in_smp;
425
426	struct io_restriction		restrictions;
427
428	/* slow path rsrc auxilary data, used by update/register */
429	struct {
430		struct io_rsrc_node		*rsrc_backup_node;
431		struct io_mapped_ubuf		*dummy_ubuf;
432		struct io_rsrc_data		*file_data;
433		struct io_rsrc_data		*buf_data;
434
435		struct delayed_work		rsrc_put_work;
436		struct llist_head		rsrc_put_llist;
437		struct list_head		rsrc_ref_list;
438		spinlock_t			rsrc_ref_lock;
439	};
440
441	/* Keep this last, we don't need it for the fast path */
442	struct {
443		#if defined(CONFIG_UNIX)
444			struct socket		*ring_sock;
445		#endif
446		/* hashed buffered write serialization */
447		struct io_wq_hash		*hash_map;
448
449		/* Only used for accounting purposes */
450		struct user_struct		*user;
451		struct mm_struct		*mm_account;
452
453		/* ctx exit and cancelation */
454		struct llist_head		fallback_llist;
455		struct delayed_work		fallback_work;
456		struct work_struct		exit_work;
457		struct list_head		tctx_list;
458		struct completion		ref_comp;
459	};
460};
461
462struct io_uring_task {
463	/* submission side */
464	int			cached_refs;
465	struct xarray		xa;
466	struct wait_queue_head	wait;
467	const struct io_ring_ctx *last;
468	struct io_wq		*io_wq;
469	struct percpu_counter	inflight;
470	atomic_t		inflight_tracked;
471	atomic_t		in_idle;
472
473	spinlock_t		task_lock;
474	struct io_wq_work_list	task_list;
475	struct callback_head	task_work;
476	bool			task_running;
477};
478
479/*
480 * First field must be the file pointer in all the
481 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
482 */
483struct io_poll_iocb {
484	struct file			*file;
485	struct wait_queue_head		*head;
486	__poll_t			events;
487	bool				done;
488	bool				canceled;
489	struct wait_queue_entry		wait;
490};
491
492struct io_poll_update {
493	struct file			*file;
494	u64				old_user_data;
495	u64				new_user_data;
496	__poll_t			events;
497	bool				update_events;
498	bool				update_user_data;
499};
500
501struct io_close {
502	struct file			*file;
503	int				fd;
504};
505
506struct io_timeout_data {
507	struct io_kiocb			*req;
508	struct hrtimer			timer;
509	struct timespec64		ts;
510	enum hrtimer_mode		mode;
511};
512
513struct io_accept {
514	struct file			*file;
515	struct sockaddr __user		*addr;
516	int __user			*addr_len;
517	int				flags;
518	unsigned long			nofile;
519};
520
521struct io_sync {
522	struct file			*file;
523	loff_t				len;
524	loff_t				off;
525	int				flags;
526	int				mode;
527};
528
529struct io_cancel {
530	struct file			*file;
531	u64				addr;
532};
533
534struct io_timeout {
535	struct file			*file;
536	u32				off;
537	u32				target_seq;
538	struct list_head		list;
539	/* head of the link, used by linked timeouts only */
540	struct io_kiocb			*head;
541	/* for linked completions */
542	struct io_kiocb			*prev;
543};
544
545struct io_timeout_rem {
546	struct file			*file;
547	u64				addr;
548
549	/* timeout update */
550	struct timespec64		ts;
551	u32				flags;
552};
553
554struct io_rw {
555	/* NOTE: kiocb has the file as the first member, so don't do it here */
556	struct kiocb			kiocb;
557	u64				addr;
558	u64				len;
559};
560
561struct io_connect {
562	struct file			*file;
563	struct sockaddr __user		*addr;
564	int				addr_len;
565};
566
567struct io_sr_msg {
568	struct file			*file;
569	union {
570		struct compat_msghdr __user	*umsg_compat;
571		struct user_msghdr __user	*umsg;
572		void __user			*buf;
573	};
574	int				msg_flags;
575	int				bgid;
576	size_t				len;
577	struct io_buffer		*kbuf;
578};
579
580struct io_open {
581	struct file			*file;
582	int				dfd;
583	struct filename			*filename;
584	struct open_how			how;
585	unsigned long			nofile;
586};
587
588struct io_rsrc_update {
589	struct file			*file;
590	u64				arg;
591	u32				nr_args;
592	u32				offset;
593};
594
595struct io_fadvise {
596	struct file			*file;
597	u64				offset;
598	u32				len;
599	u32				advice;
600};
601
602struct io_madvise {
603	struct file			*file;
604	u64				addr;
605	u32				len;
606	u32				advice;
607};
608
609struct io_epoll {
610	struct file			*file;
611	int				epfd;
612	int				op;
613	int				fd;
614	struct epoll_event		event;
615};
616
617struct io_splice {
618	struct file			*file_out;
619	struct file			*file_in;
620	loff_t				off_out;
621	loff_t				off_in;
622	u64				len;
623	unsigned int			flags;
624};
625
626struct io_provide_buf {
627	struct file			*file;
628	__u64				addr;
629	__u32				len;
630	__u32				bgid;
631	__u16				nbufs;
632	__u16				bid;
633};
634
635struct io_statx {
636	struct file			*file;
637	int				dfd;
638	unsigned int			mask;
639	unsigned int			flags;
640	const char __user		*filename;
641	struct statx __user		*buffer;
642};
643
644struct io_shutdown {
645	struct file			*file;
646	int				how;
647};
648
649struct io_rename {
650	struct file			*file;
651	int				old_dfd;
652	int				new_dfd;
653	struct filename			*oldpath;
654	struct filename			*newpath;
655	int				flags;
656};
657
658struct io_unlink {
659	struct file			*file;
660	int				dfd;
661	int				flags;
662	struct filename			*filename;
663};
664
665struct io_completion {
666	struct file			*file;
667	u32				cflags;
668};
669
670struct io_async_connect {
671	struct sockaddr_storage		address;
672};
673
674struct io_async_msghdr {
675	struct iovec			fast_iov[UIO_FASTIOV];
676	/* points to an allocated iov, if NULL we use fast_iov instead */
677	struct iovec			*free_iov;
678	struct sockaddr __user		*uaddr;
679	struct msghdr			msg;
680	struct sockaddr_storage		addr;
681};
682
683struct io_async_rw {
684	struct iovec			fast_iov[UIO_FASTIOV];
685	const struct iovec		*free_iovec;
686	struct iov_iter			iter;
687	size_t				bytes_done;
688	struct wait_page_queue		wpq;
689};
690
691enum {
692	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
693	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
694	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
695	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
696	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
697	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
698
699	/* first byte is taken by user flags, shift it to not overlap */
700	REQ_F_FAIL_BIT		= 8,
701	REQ_F_INFLIGHT_BIT,
702	REQ_F_CUR_POS_BIT,
703	REQ_F_NOWAIT_BIT,
704	REQ_F_LINK_TIMEOUT_BIT,
705	REQ_F_NEED_CLEANUP_BIT,
706	REQ_F_POLLED_BIT,
707	REQ_F_BUFFER_SELECTED_BIT,
708	REQ_F_COMPLETE_INLINE_BIT,
709	REQ_F_REISSUE_BIT,
710	REQ_F_DONT_REISSUE_BIT,
711	REQ_F_CREDS_BIT,
712	REQ_F_REFCOUNT_BIT,
713	/* keep async read/write and isreg together and in order */
714	REQ_F_NOWAIT_READ_BIT,
715	REQ_F_NOWAIT_WRITE_BIT,
716	REQ_F_ISREG_BIT,
717
718	/* not a real bit, just to check we're not overflowing the space */
719	__REQ_F_LAST_BIT,
720};
721
722enum {
723	/* ctx owns file */
724	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
725	/* drain existing IO first */
726	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
727	/* linked sqes */
728	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
729	/* doesn't sever on completion < 0 */
730	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
731	/* IOSQE_ASYNC */
732	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
733	/* IOSQE_BUFFER_SELECT */
734	REQ_F_BUFFER_SELECT	= BIT(REQ_F_BUFFER_SELECT_BIT),
735
736	/* fail rest of links */
737	REQ_F_FAIL		= BIT(REQ_F_FAIL_BIT),
738	/* on inflight list, should be cancelled and waited on exit reliably */
739	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
740	/* read/write uses file position */
741	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
742	/* must not punt to workers */
743	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
744	/* has or had linked timeout */
745	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
746	/* needs cleanup */
747	REQ_F_NEED_CLEANUP	= BIT(REQ_F_NEED_CLEANUP_BIT),
748	/* already went through poll handler */
749	REQ_F_POLLED		= BIT(REQ_F_POLLED_BIT),
750	/* buffer already selected */
751	REQ_F_BUFFER_SELECTED	= BIT(REQ_F_BUFFER_SELECTED_BIT),
752	/* completion is deferred through io_comp_state */
753	REQ_F_COMPLETE_INLINE	= BIT(REQ_F_COMPLETE_INLINE_BIT),
754	/* caller should reissue async */
755	REQ_F_REISSUE		= BIT(REQ_F_REISSUE_BIT),
756	/* don't attempt request reissue, see io_rw_reissue() */
757	REQ_F_DONT_REISSUE	= BIT(REQ_F_DONT_REISSUE_BIT),
758	/* supports async reads */
759	REQ_F_NOWAIT_READ	= BIT(REQ_F_NOWAIT_READ_BIT),
760	/* supports async writes */
761	REQ_F_NOWAIT_WRITE	= BIT(REQ_F_NOWAIT_WRITE_BIT),
762	/* regular file */
763	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
764	/* has creds assigned */
765	REQ_F_CREDS		= BIT(REQ_F_CREDS_BIT),
766	/* skip refcounting if not set */
767	REQ_F_REFCOUNT		= BIT(REQ_F_REFCOUNT_BIT),
768};
769
770struct async_poll {
771	struct io_poll_iocb	poll;
772	struct io_poll_iocb	*double_poll;
773};
774
775typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
776
777struct io_task_work {
778	union {
779		struct io_wq_work_node	node;
780		struct llist_node	fallback_node;
781	};
782	io_req_tw_func_t		func;
783};
784
785enum {
786	IORING_RSRC_FILE		= 0,
787	IORING_RSRC_BUFFER		= 1,
788};
789
790/*
791 * NOTE! Each of the iocb union members has the file pointer
792 * as the first entry in their struct definition. So you can
793 * access the file pointer through any of the sub-structs,
794 * or directly as just 'ki_filp' in this struct.
795 */
796struct io_kiocb {
797	union {
798		struct file		*file;
799		struct io_rw		rw;
800		struct io_poll_iocb	poll;
801		struct io_poll_update	poll_update;
802		struct io_accept	accept;
803		struct io_sync		sync;
804		struct io_cancel	cancel;
805		struct io_timeout	timeout;
806		struct io_timeout_rem	timeout_rem;
807		struct io_connect	connect;
808		struct io_sr_msg	sr_msg;
809		struct io_open		open;
810		struct io_close		close;
811		struct io_rsrc_update	rsrc_update;
812		struct io_fadvise	fadvise;
813		struct io_madvise	madvise;
814		struct io_epoll		epoll;
815		struct io_splice	splice;
816		struct io_provide_buf	pbuf;
817		struct io_statx		statx;
818		struct io_shutdown	shutdown;
819		struct io_rename	rename;
820		struct io_unlink	unlink;
821		/* use only after cleaning per-op data, see io_clean_op() */
822		struct io_completion	compl;
823	};
824
825	/* opcode allocated if it needs to store data for async defer */
826	void				*async_data;
827	u8				opcode;
828	/* polled IO has completed */
829	u8				iopoll_completed;
830
831	u16				buf_index;
832	u32				result;
833
834	struct io_ring_ctx		*ctx;
835	unsigned int			flags;
836	atomic_t			refs;
837	struct task_struct		*task;
838	u64				user_data;
839
840	struct io_kiocb			*link;
841	struct percpu_ref		*fixed_rsrc_refs;
842
843	/* used with ctx->iopoll_list with reads/writes */
844	struct list_head		inflight_entry;
845	struct io_task_work		io_task_work;
846	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
847	struct hlist_node		hash_node;
848	struct async_poll		*apoll;
849	struct io_wq_work		work;
850	const struct cred		*creds;
851
852	/* store used ubuf, so we can prevent reloading */
853	struct io_mapped_ubuf		*imu;
854};
855
856struct io_tctx_node {
857	struct list_head	ctx_node;
858	struct task_struct	*task;
859	struct io_ring_ctx	*ctx;
860};
861
862struct io_defer_entry {
863	struct list_head	list;
864	struct io_kiocb		*req;
865	u32			seq;
866};
867
868struct io_op_def {
869	/* needs req->file assigned */
870	unsigned		needs_file : 1;
871	/* hash wq insertion if file is a regular file */
872	unsigned		hash_reg_file : 1;
873	/* unbound wq insertion if file is a non-regular file */
874	unsigned		unbound_nonreg_file : 1;
875	/* opcode is not supported by this kernel */
876	unsigned		not_supported : 1;
877	/* set if opcode supports polled "wait" */
878	unsigned		pollin : 1;
879	unsigned		pollout : 1;
880	/* op supports buffer selection */
881	unsigned		buffer_select : 1;
882	/* do prep async if is going to be punted */
883	unsigned		needs_async_setup : 1;
884	/* should block plug */
885	unsigned		plug : 1;
886	/* size of async data needed, if any */
887	unsigned short		async_size;
888};
889
890static const struct io_op_def io_op_defs[] = {
891	[IORING_OP_NOP] = {},
892	[IORING_OP_READV] = {
893		.needs_file		= 1,
894		.unbound_nonreg_file	= 1,
895		.pollin			= 1,
896		.buffer_select		= 1,
897		.needs_async_setup	= 1,
898		.plug			= 1,
899		.async_size		= sizeof(struct io_async_rw),
900	},
901	[IORING_OP_WRITEV] = {
902		.needs_file		= 1,
903		.hash_reg_file		= 1,
904		.unbound_nonreg_file	= 1,
905		.pollout		= 1,
906		.needs_async_setup	= 1,
907		.plug			= 1,
908		.async_size		= sizeof(struct io_async_rw),
909	},
910	[IORING_OP_FSYNC] = {
911		.needs_file		= 1,
912	},
913	[IORING_OP_READ_FIXED] = {
914		.needs_file		= 1,
915		.unbound_nonreg_file	= 1,
916		.pollin			= 1,
917		.plug			= 1,
918		.async_size		= sizeof(struct io_async_rw),
919	},
920	[IORING_OP_WRITE_FIXED] = {
921		.needs_file		= 1,
922		.hash_reg_file		= 1,
923		.unbound_nonreg_file	= 1,
924		.pollout		= 1,
925		.plug			= 1,
926		.async_size		= sizeof(struct io_async_rw),
927	},
928	[IORING_OP_POLL_ADD] = {
929		.needs_file		= 1,
930		.unbound_nonreg_file	= 1,
931	},
932	[IORING_OP_POLL_REMOVE] = {},
933	[IORING_OP_SYNC_FILE_RANGE] = {
934		.needs_file		= 1,
935	},
936	[IORING_OP_SENDMSG] = {
937		.needs_file		= 1,
938		.unbound_nonreg_file	= 1,
939		.pollout		= 1,
940		.needs_async_setup	= 1,
941		.async_size		= sizeof(struct io_async_msghdr),
942	},
943	[IORING_OP_RECVMSG] = {
944		.needs_file		= 1,
945		.unbound_nonreg_file	= 1,
946		.pollin			= 1,
947		.buffer_select		= 1,
948		.needs_async_setup	= 1,
949		.async_size		= sizeof(struct io_async_msghdr),
950	},
951	[IORING_OP_TIMEOUT] = {
952		.async_size		= sizeof(struct io_timeout_data),
953	},
954	[IORING_OP_TIMEOUT_REMOVE] = {
955		/* used by timeout updates' prep() */
956	},
957	[IORING_OP_ACCEPT] = {
958		.needs_file		= 1,
959		.unbound_nonreg_file	= 1,
960		.pollin			= 1,
961	},
962	[IORING_OP_ASYNC_CANCEL] = {},
963	[IORING_OP_LINK_TIMEOUT] = {
964		.async_size		= sizeof(struct io_timeout_data),
965	},
966	[IORING_OP_CONNECT] = {
967		.needs_file		= 1,
968		.unbound_nonreg_file	= 1,
969		.pollout		= 1,
970		.needs_async_setup	= 1,
971		.async_size		= sizeof(struct io_async_connect),
972	},
973	[IORING_OP_FALLOCATE] = {
974		.needs_file		= 1,
975	},
976	[IORING_OP_OPENAT] = {},
977	[IORING_OP_CLOSE] = {},
978	[IORING_OP_FILES_UPDATE] = {},
979	[IORING_OP_STATX] = {},
980	[IORING_OP_READ] = {
981		.needs_file		= 1,
982		.unbound_nonreg_file	= 1,
983		.pollin			= 1,
984		.buffer_select		= 1,
985		.plug			= 1,
986		.async_size		= sizeof(struct io_async_rw),
987	},
988	[IORING_OP_WRITE] = {
989		.needs_file		= 1,
990		.unbound_nonreg_file	= 1,
991		.pollout		= 1,
992		.plug			= 1,
993		.async_size		= sizeof(struct io_async_rw),
994	},
995	[IORING_OP_FADVISE] = {
996		.needs_file		= 1,
997	},
998	[IORING_OP_MADVISE] = {},
999	[IORING_OP_SEND] = {
1000		.needs_file		= 1,
1001		.unbound_nonreg_file	= 1,
1002		.pollout		= 1,
1003	},
1004	[IORING_OP_RECV] = {
1005		.needs_file		= 1,
1006		.unbound_nonreg_file	= 1,
1007		.pollin			= 1,
1008		.buffer_select		= 1,
1009	},
1010	[IORING_OP_OPENAT2] = {
1011	},
1012	[IORING_OP_EPOLL_CTL] = {
1013		.unbound_nonreg_file	= 1,
1014	},
1015	[IORING_OP_SPLICE] = {
1016		.needs_file		= 1,
1017		.hash_reg_file		= 1,
1018		.unbound_nonreg_file	= 1,
1019	},
1020	[IORING_OP_PROVIDE_BUFFERS] = {},
1021	[IORING_OP_REMOVE_BUFFERS] = {},
1022	[IORING_OP_TEE] = {
1023		.needs_file		= 1,
1024		.hash_reg_file		= 1,
1025		.unbound_nonreg_file	= 1,
1026	},
1027	[IORING_OP_SHUTDOWN] = {
1028		.needs_file		= 1,
1029	},
1030	[IORING_OP_RENAMEAT] = {},
1031	[IORING_OP_UNLINKAT] = {},
1032};
1033
1034static bool io_disarm_next(struct io_kiocb *req);
1035static void io_uring_del_tctx_node(unsigned long index);
1036static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1037					 struct task_struct *task,
1038					 bool cancel_all);
1039static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1040
1041static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1042				 long res, unsigned int cflags);
1043static void io_put_req(struct io_kiocb *req);
1044static void io_put_req_deferred(struct io_kiocb *req);
1045static void io_dismantle_req(struct io_kiocb *req);
1046static void io_queue_linked_timeout(struct io_kiocb *req);
1047static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
1048				     struct io_uring_rsrc_update2 *up,
1049				     unsigned nr_args);
1050static void io_clean_op(struct io_kiocb *req);
1051static struct file *io_file_get(struct io_ring_ctx *ctx,
1052				struct io_kiocb *req, int fd, bool fixed);
1053static void __io_queue_sqe(struct io_kiocb *req);
1054static void io_rsrc_put_work(struct work_struct *work);
1055
1056static void io_req_task_queue(struct io_kiocb *req);
1057static void io_submit_flush_completions(struct io_ring_ctx *ctx);
1058static int io_req_prep_async(struct io_kiocb *req);
1059
1060static struct kmem_cache *req_cachep;
1061
1062static const struct file_operations io_uring_fops;
1063
1064struct sock *io_uring_get_socket(struct file *file)
1065{
1066#if defined(CONFIG_UNIX)
1067	if (file->f_op == &io_uring_fops) {
1068		struct io_ring_ctx *ctx = file->private_data;
1069
1070		return ctx->ring_sock->sk;
1071	}
1072#endif
1073	return NULL;
1074}
1075EXPORT_SYMBOL(io_uring_get_socket);
1076
1077#define io_for_each_link(pos, head) \
1078	for (pos = (head); pos; pos = pos->link)
1079
1080/*
1081 * Shamelessly stolen from the mm implementation of page reference checking,
1082 * see commit f958d7b528b1 for details.
1083 */
1084#define req_ref_zero_or_close_to_overflow(req)	\
1085	((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1086
1087static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1088{
1089	WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1090	return atomic_inc_not_zero(&req->refs);
1091}
1092
1093static inline bool req_ref_put_and_test(struct io_kiocb *req)
1094{
1095	if (likely(!(req->flags & REQ_F_REFCOUNT)))
1096		return true;
1097
1098	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1099	return atomic_dec_and_test(&req->refs);
1100}
1101
1102static inline void req_ref_put(struct io_kiocb *req)
1103{
1104	WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1105	WARN_ON_ONCE(req_ref_put_and_test(req));
1106}
1107
1108static inline void req_ref_get(struct io_kiocb *req)
1109{
1110	WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1111	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1112	atomic_inc(&req->refs);
1113}
1114
1115static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
1116{
1117	if (!(req->flags & REQ_F_REFCOUNT)) {
1118		req->flags |= REQ_F_REFCOUNT;
1119		atomic_set(&req->refs, nr);
1120	}
1121}
1122
1123static inline void io_req_set_refcount(struct io_kiocb *req)
1124{
1125	__io_req_set_refcount(req, 1);
1126}
1127
1128static inline void io_req_set_rsrc_node(struct io_kiocb *req)
1129{
1130	struct io_ring_ctx *ctx = req->ctx;
1131
1132	if (!req->fixed_rsrc_refs) {
1133		req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
1134		percpu_ref_get(req->fixed_rsrc_refs);
1135	}
1136}
1137
1138static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1139{
1140	bool got = percpu_ref_tryget(ref);
1141
1142	/* already at zero, wait for ->release() */
1143	if (!got)
1144		wait_for_completion(compl);
1145	percpu_ref_resurrect(ref);
1146	if (got)
1147		percpu_ref_put(ref);
1148}
1149
1150static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1151			  bool cancel_all)
1152{
1153	struct io_kiocb *req;
1154
1155	if (task && head->task != task)
1156		return false;
1157	if (cancel_all)
1158		return true;
1159
1160	io_for_each_link(req, head) {
1161		if (req->flags & REQ_F_INFLIGHT)
1162			return true;
1163	}
1164	return false;
1165}
1166
1167static inline void req_set_fail(struct io_kiocb *req)
1168{
1169	req->flags |= REQ_F_FAIL;
1170}
1171
1172static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1173{
1174	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1175
1176	complete(&ctx->ref_comp);
1177}
1178
1179static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1180{
1181	return !req->timeout.off;
1182}
1183
1184static void io_fallback_req_func(struct work_struct *work)
1185{
1186	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1187						fallback_work.work);
1188	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1189	struct io_kiocb *req, *tmp;
1190
1191	percpu_ref_get(&ctx->refs);
1192	llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
1193		req->io_task_work.func(req);
1194	percpu_ref_put(&ctx->refs);
1195}
1196
1197static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1198{
1199	struct io_ring_ctx *ctx;
1200	int hash_bits;
1201
1202	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1203	if (!ctx)
1204		return NULL;
1205
1206	/*
1207	 * Use 5 bits less than the max cq entries, that should give us around
1208	 * 32 entries per hash list if totally full and uniformly spread.
1209	 */
1210	hash_bits = ilog2(p->cq_entries);
1211	hash_bits -= 5;
1212	if (hash_bits <= 0)
1213		hash_bits = 1;
1214	ctx->cancel_hash_bits = hash_bits;
1215	ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1216					GFP_KERNEL);
1217	if (!ctx->cancel_hash)
1218		goto err;
1219	__hash_init(ctx->cancel_hash, 1U << hash_bits);
1220
1221	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1222	if (!ctx->dummy_ubuf)
1223		goto err;
1224	/* set invalid range, so io_import_fixed() fails meeting it */
1225	ctx->dummy_ubuf->ubuf = -1UL;
1226
1227	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1228			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1229		goto err;
1230
1231	ctx->flags = p->flags;
1232	init_waitqueue_head(&ctx->sqo_sq_wait);
1233	INIT_LIST_HEAD(&ctx->sqd_list);
1234	init_waitqueue_head(&ctx->poll_wait);
1235	INIT_LIST_HEAD(&ctx->cq_overflow_list);
1236	init_completion(&ctx->ref_comp);
1237	xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
1238	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
1239	mutex_init(&ctx->uring_lock);
1240	init_waitqueue_head(&ctx->cq_wait);
1241	spin_lock_init(&ctx->completion_lock);
1242	spin_lock_init(&ctx->timeout_lock);
1243	INIT_LIST_HEAD(&ctx->iopoll_list);
1244	INIT_LIST_HEAD(&ctx->defer_list);
1245	INIT_LIST_HEAD(&ctx->timeout_list);
1246	spin_lock_init(&ctx->rsrc_ref_lock);
1247	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1248	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1249	init_llist_head(&ctx->rsrc_put_llist);
1250	INIT_LIST_HEAD(&ctx->tctx_list);
1251	INIT_LIST_HEAD(&ctx->submit_state.free_list);
1252	INIT_LIST_HEAD(&ctx->locked_free_list);
1253	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
1254	return ctx;
1255err:
1256	kfree(ctx->dummy_ubuf);
1257	kfree(ctx->cancel_hash);
1258	kfree(ctx);
1259	return NULL;
1260}
1261
1262static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1263{
1264	struct io_rings *r = ctx->rings;
1265
1266	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1267	ctx->cq_extra--;
1268}
1269
1270static bool req_need_defer(struct io_kiocb *req, u32 seq)
1271{
1272	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1273		struct io_ring_ctx *ctx = req->ctx;
1274
1275		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
1276	}
1277
1278	return false;
1279}
1280
1281#define FFS_ASYNC_READ		0x1UL
1282#define FFS_ASYNC_WRITE		0x2UL
1283#ifdef CONFIG_64BIT
1284#define FFS_ISREG		0x4UL
1285#else
1286#define FFS_ISREG		0x0UL
1287#endif
1288#define FFS_MASK		~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1289
1290static inline bool io_req_ffs_set(struct io_kiocb *req)
1291{
1292	return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1293}
1294
1295static void io_req_track_inflight(struct io_kiocb *req)
1296{
1297	if (!(req->flags & REQ_F_INFLIGHT)) {
1298		req->flags |= REQ_F_INFLIGHT;
1299		atomic_inc(&current->io_uring->inflight_tracked);
1300	}
1301}
1302
1303static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1304{
1305	struct io_kiocb *nxt = req->link;
1306
1307	if (req->flags & REQ_F_LINK_TIMEOUT)
1308		return NULL;
1309
1310	/* linked timeouts should have two refs once prep'ed */
1311	io_req_set_refcount(req);
1312	__io_req_set_refcount(nxt, 2);
1313
1314	nxt->timeout.head = req;
1315	req->flags |= REQ_F_LINK_TIMEOUT;
1316	return nxt;
1317}
1318
1319static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1320{
1321	if (likely(!req->link || req->link->opcode != IORING_OP_LINK_TIMEOUT))
1322		return NULL;
1323	return __io_prep_linked_timeout(req);
1324}
1325
1326static void io_prep_async_work(struct io_kiocb *req)
1327{
1328	const struct io_op_def *def = &io_op_defs[req->opcode];
1329	struct io_ring_ctx *ctx = req->ctx;
1330
1331	if (!(req->flags & REQ_F_CREDS)) {
1332		req->flags |= REQ_F_CREDS;
1333		req->creds = get_current_cred();
1334	}
1335
1336	req->work.list.next = NULL;
1337	req->work.flags = 0;
1338	if (req->flags & REQ_F_FORCE_ASYNC)
1339		req->work.flags |= IO_WQ_WORK_CONCURRENT;
1340
1341	if (req->flags & REQ_F_ISREG) {
1342		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1343			io_wq_hash_work(&req->work, file_inode(req->file));
1344	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1345		if (def->unbound_nonreg_file)
1346			req->work.flags |= IO_WQ_WORK_UNBOUND;
1347	}
1348
1349	switch (req->opcode) {
1350	case IORING_OP_SPLICE:
1351	case IORING_OP_TEE:
1352		if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1353			req->work.flags |= IO_WQ_WORK_UNBOUND;
1354		break;
1355	}
1356}
1357
1358static void io_prep_async_link(struct io_kiocb *req)
1359{
1360	struct io_kiocb *cur;
1361
1362	if (req->flags & REQ_F_LINK_TIMEOUT) {
1363		struct io_ring_ctx *ctx = req->ctx;
1364
1365		spin_lock(&ctx->completion_lock);
1366		io_for_each_link(cur, req)
1367			io_prep_async_work(cur);
1368		spin_unlock(&ctx->completion_lock);
1369	} else {
1370		io_for_each_link(cur, req)
1371			io_prep_async_work(cur);
1372	}
1373}
1374
1375static void io_queue_async_work(struct io_kiocb *req)
1376{
1377	struct io_ring_ctx *ctx = req->ctx;
1378	struct io_kiocb *link = io_prep_linked_timeout(req);
1379	struct io_uring_task *tctx = req->task->io_uring;
1380
1381	BUG_ON(!tctx);
1382	BUG_ON(!tctx->io_wq);
1383
1384	/* init ->work of the whole link before punting */
1385	io_prep_async_link(req);
1386
1387	/*
1388	 * Not expected to happen, but if we do have a bug where this _can_
1389	 * happen, catch it here and ensure the request is marked as
1390	 * canceled. That will make io-wq go through the usual work cancel
1391	 * procedure rather than attempt to run this request (or create a new
1392	 * worker for it).
1393	 */
1394	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1395		req->work.flags |= IO_WQ_WORK_CANCEL;
1396
1397	trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1398					&req->work, req->flags);
1399	io_wq_enqueue(tctx->io_wq, &req->work);
1400	if (link)
1401		io_queue_linked_timeout(link);
1402}
1403
1404static void io_kill_timeout(struct io_kiocb *req, int status)
1405	__must_hold(&req->ctx->completion_lock)
1406	__must_hold(&req->ctx->timeout_lock)
1407{
1408	struct io_timeout_data *io = req->async_data;
1409
1410	if (hrtimer_try_to_cancel(&io->timer) != -1) {
1411		atomic_set(&req->ctx->cq_timeouts,
1412			atomic_read(&req->ctx->cq_timeouts) + 1);
1413		list_del_init(&req->timeout.list);
1414		io_cqring_fill_event(req->ctx, req->user_data, status, 0);
1415		io_put_req_deferred(req);
1416	}
1417}
1418
1419static void io_queue_deferred(struct io_ring_ctx *ctx)
1420{
1421	while (!list_empty(&ctx->defer_list)) {
1422		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1423						struct io_defer_entry, list);
1424
1425		if (req_need_defer(de->req, de->seq))
1426			break;
1427		list_del_init(&de->list);
1428		io_req_task_queue(de->req);
1429		kfree(de);
1430	}
1431}
1432
1433static void io_flush_timeouts(struct io_ring_ctx *ctx)
1434	__must_hold(&ctx->completion_lock)
1435{
1436	u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1437
1438	spin_lock_irq(&ctx->timeout_lock);
1439	while (!list_empty(&ctx->timeout_list)) {
1440		u32 events_needed, events_got;
1441		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1442						struct io_kiocb, timeout.list);
1443
1444		if (io_is_timeout_noseq(req))
1445			break;
1446
1447		/*
1448		 * Since seq can easily wrap around over time, subtract
1449		 * the last seq at which timeouts were flushed before comparing.
1450		 * Assuming not more than 2^31-1 events have happened since,
1451		 * these subtractions won't have wrapped, so we can check if
1452		 * target is in [last_seq, current_seq] by comparing the two.
1453		 */
1454		events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1455		events_got = seq - ctx->cq_last_tm_flush;
1456		if (events_got < events_needed)
1457			break;
1458
1459		list_del_init(&req->timeout.list);
1460		io_kill_timeout(req, 0);
1461	}
1462	ctx->cq_last_tm_flush = seq;
1463	spin_unlock_irq(&ctx->timeout_lock);
1464}
1465
1466static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
1467{
1468	if (ctx->off_timeout_used)
1469		io_flush_timeouts(ctx);
1470	if (ctx->drain_active)
1471		io_queue_deferred(ctx);
1472}
1473
1474static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1475{
1476	if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1477		__io_commit_cqring_flush(ctx);
1478	/* order cqe stores with ring update */
1479	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1480}
1481
1482static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1483{
1484	struct io_rings *r = ctx->rings;
1485
1486	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
1487}
1488
1489static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1490{
1491	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1492}
1493
1494static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
1495{
1496	struct io_rings *rings = ctx->rings;
1497	unsigned tail, mask = ctx->cq_entries - 1;
1498
1499	/*
1500	 * writes to the cq entry need to come after reading head; the
1501	 * control dependency is enough as we're using WRITE_ONCE to
1502	 * fill the cq entry
1503	 */
1504	if (__io_cqring_events(ctx) == ctx->cq_entries)
1505		return NULL;
1506
1507	tail = ctx->cached_cq_tail++;
1508	return &rings->cqes[tail & mask];
1509}
1510
1511static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1512{
1513	if (likely(!ctx->cq_ev_fd))
1514		return false;
1515	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1516		return false;
1517	return !ctx->eventfd_async || io_wq_current_is_worker();
1518}
1519
1520static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1521{
1522	/*
1523	 * wake_up_all() may seem excessive, but io_wake_function() and
1524	 * io_should_wake() handle the termination of the loop and only
1525	 * wake as many waiters as we need to.
1526	 */
1527	if (wq_has_sleeper(&ctx->cq_wait))
1528		wake_up_all(&ctx->cq_wait);
1529	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1530		wake_up(&ctx->sq_data->wait);
1531	if (io_should_trigger_evfd(ctx))
1532		eventfd_signal(ctx->cq_ev_fd, 1);
1533	if (waitqueue_active(&ctx->poll_wait)) {
1534		wake_up_interruptible(&ctx->poll_wait);
1535		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1536	}
1537}
1538
1539static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1540{
1541	if (ctx->flags & IORING_SETUP_SQPOLL) {
1542		if (wq_has_sleeper(&ctx->cq_wait))
1543			wake_up_all(&ctx->cq_wait);
1544	}
1545	if (io_should_trigger_evfd(ctx))
1546		eventfd_signal(ctx->cq_ev_fd, 1);
1547	if (waitqueue_active(&ctx->poll_wait)) {
1548		wake_up_interruptible(&ctx->poll_wait);
1549		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1550	}
1551}
1552
1553/* Returns true if there are no backlogged entries after the flush */
1554static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1555{
1556	bool all_flushed, posted;
1557
1558	if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
1559		return false;
1560
1561	posted = false;
1562	spin_lock(&ctx->completion_lock);
1563	while (!list_empty(&ctx->cq_overflow_list)) {
1564		struct io_uring_cqe *cqe = io_get_cqe(ctx);
1565		struct io_overflow_cqe *ocqe;
1566
1567		if (!cqe && !force)
1568			break;
1569		ocqe = list_first_entry(&ctx->cq_overflow_list,
1570					struct io_overflow_cqe, list);
1571		if (cqe)
1572			memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1573		else
1574			io_account_cq_overflow(ctx);
1575
1576		posted = true;
1577		list_del(&ocqe->list);
1578		kfree(ocqe);
1579	}
1580
1581	all_flushed = list_empty(&ctx->cq_overflow_list);
1582	if (all_flushed) {
1583		clear_bit(0, &ctx->check_cq_overflow);
1584		WRITE_ONCE(ctx->rings->sq_flags,
1585			   ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
1586	}
1587
1588	if (posted)
1589		io_commit_cqring(ctx);
1590	spin_unlock(&ctx->completion_lock);
1591	if (posted)
1592		io_cqring_ev_posted(ctx);
1593	return all_flushed;
1594}
1595
1596static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
1597{
1598	bool ret = true;
1599
1600	if (test_bit(0, &ctx->check_cq_overflow)) {
1601		/* iopoll syncs against uring_lock, not completion_lock */
1602		if (ctx->flags & IORING_SETUP_IOPOLL)
1603			mutex_lock(&ctx->uring_lock);
1604		ret = __io_cqring_overflow_flush(ctx, false);
1605		if (ctx->flags & IORING_SETUP_IOPOLL)
1606			mutex_unlock(&ctx->uring_lock);
1607	}
1608
1609	return ret;
1610}
1611
1612/* must to be called somewhat shortly after putting a request */
1613static inline void io_put_task(struct task_struct *task, int nr)
1614{
1615	struct io_uring_task *tctx = task->io_uring;
1616
1617	percpu_counter_sub(&tctx->inflight, nr);
1618	if (unlikely(atomic_read(&tctx->in_idle)))
1619		wake_up(&tctx->wait);
1620	put_task_struct_many(task, nr);
1621}
1622
1623static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1624				     long res, unsigned int cflags)
1625{
1626	struct io_overflow_cqe *ocqe;
1627
1628	ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1629	if (!ocqe) {
1630		/*
1631		 * If we're in ring overflow flush mode, or in task cancel mode,
1632		 * or cannot allocate an overflow entry, then we need to drop it
1633		 * on the floor.
1634		 */
1635		io_account_cq_overflow(ctx);
1636		return false;
1637	}
1638	if (list_empty(&ctx->cq_overflow_list)) {
1639		set_bit(0, &ctx->check_cq_overflow);
1640		WRITE_ONCE(ctx->rings->sq_flags,
1641			   ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1642
1643	}
1644	ocqe->cqe.user_data = user_data;
1645	ocqe->cqe.res = res;
1646	ocqe->cqe.flags = cflags;
1647	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1648	return true;
1649}
1650
1651static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1652					  long res, unsigned int cflags)
1653{
1654	struct io_uring_cqe *cqe;
1655
1656	trace_io_uring_complete(ctx, user_data, res, cflags);
1657
1658	/*
1659	 * If we can't get a cq entry, userspace overflowed the
1660	 * submission (by quite a lot). Increment the overflow count in
1661	 * the ring.
1662	 */
1663	cqe = io_get_cqe(ctx);
1664	if (likely(cqe)) {
1665		WRITE_ONCE(cqe->user_data, user_data);
1666		WRITE_ONCE(cqe->res, res);
1667		WRITE_ONCE(cqe->flags, cflags);
1668		return true;
1669	}
1670	return io_cqring_event_overflow(ctx, user_data, res, cflags);
1671}
1672
1673/* not as hot to bloat with inlining */
1674static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1675					  long res, unsigned int cflags)
1676{
1677	return __io_cqring_fill_event(ctx, user_data, res, cflags);
1678}
1679
1680static void io_req_complete_post(struct io_kiocb *req, long res,
1681				 unsigned int cflags)
1682{
1683	struct io_ring_ctx *ctx = req->ctx;
1684
1685	spin_lock(&ctx->completion_lock);
1686	__io_cqring_fill_event(ctx, req->user_data, res, cflags);
1687	/*
1688	 * If we're the last reference to this request, add to our locked
1689	 * free_list cache.
1690	 */
1691	if (req_ref_put_and_test(req)) {
1692		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1693			if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
1694				io_disarm_next(req);
1695			if (req->link) {
1696				io_req_task_queue(req->link);
1697				req->link = NULL;
1698			}
1699		}
1700		io_dismantle_req(req);
1701		io_put_task(req->task, 1);
1702		list_add(&req->inflight_entry, &ctx->locked_free_list);
1703		ctx->locked_free_nr++;
1704	} else {
1705		if (!percpu_ref_tryget(&ctx->refs))
1706			req = NULL;
1707	}
1708	io_commit_cqring(ctx);
1709	spin_unlock(&ctx->completion_lock);
1710
1711	if (req) {
1712		io_cqring_ev_posted(ctx);
1713		percpu_ref_put(&ctx->refs);
1714	}
1715}
1716
1717static inline bool io_req_needs_clean(struct io_kiocb *req)
1718{
1719	return req->flags & IO_REQ_CLEAN_FLAGS;
1720}
1721
1722static void io_req_complete_state(struct io_kiocb *req, long res,
1723				  unsigned int cflags)
1724{
1725	if (io_req_needs_clean(req))
1726		io_clean_op(req);
1727	req->result = res;
1728	req->compl.cflags = cflags;
1729	req->flags |= REQ_F_COMPLETE_INLINE;
1730}
1731
1732static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1733				     long res, unsigned cflags)
1734{
1735	if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1736		io_req_complete_state(req, res, cflags);
1737	else
1738		io_req_complete_post(req, res, cflags);
1739}
1740
1741static inline void io_req_complete(struct io_kiocb *req, long res)
1742{
1743	__io_req_complete(req, 0, res, 0);
1744}
1745
1746static void io_req_complete_failed(struct io_kiocb *req, long res)
1747{
1748	req_set_fail(req);
1749	io_req_complete_post(req, res, 0);
1750}
1751
1752/*
1753 * Don't initialise the fields below on every allocation, but do that in
1754 * advance and keep them valid across allocations.
1755 */
1756static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1757{
1758	req->ctx = ctx;
1759	req->link = NULL;
1760	req->async_data = NULL;
1761	/* not necessary, but safer to zero */
1762	req->result = 0;
1763}
1764
1765static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1766					struct io_submit_state *state)
1767{
1768	spin_lock(&ctx->completion_lock);
1769	list_splice_init(&ctx->locked_free_list, &state->free_list);
1770	ctx->locked_free_nr = 0;
1771	spin_unlock(&ctx->completion_lock);
1772}
1773
1774/* Returns true IFF there are requests in the cache */
1775static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
1776{
1777	struct io_submit_state *state = &ctx->submit_state;
1778	int nr;
1779
1780	/*
1781	 * If we have more than a batch's worth of requests in our IRQ side
1782	 * locked cache, grab the lock and move them over to our submission
1783	 * side cache.
1784	 */
1785	if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
1786		io_flush_cached_locked_reqs(ctx, state);
1787
1788	nr = state->free_reqs;
1789	while (!list_empty(&state->free_list)) {
1790		struct io_kiocb *req = list_first_entry(&state->free_list,
1791					struct io_kiocb, inflight_entry);
1792
1793		list_del(&req->inflight_entry);
1794		state->reqs[nr++] = req;
1795		if (nr == ARRAY_SIZE(state->reqs))
1796			break;
1797	}
1798
1799	state->free_reqs = nr;
1800	return nr != 0;
1801}
1802
1803/*
1804 * A request might get retired back into the request caches even before opcode
1805 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1806 * Because of that, io_alloc_req() should be called only under ->uring_lock
1807 * and with extra caution to not get a request that is still worked on.
1808 */
1809static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
1810	__must_hold(&ctx->uring_lock)
1811{
1812	struct io_submit_state *state = &ctx->submit_state;
1813	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1814	int ret, i;
1815
1816	BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
1817
1818	if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1819		goto got_req;
1820
1821	ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1822				    state->reqs);
1823
1824	/*
1825	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1826	 * retry single alloc to be on the safe side.
1827	 */
1828	if (unlikely(ret <= 0)) {
1829		state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1830		if (!state->reqs[0])
1831			return NULL;
1832		ret = 1;
1833	}
1834
1835	for (i = 0; i < ret; i++)
1836		io_preinit_req(state->reqs[i], ctx);
1837	state->free_reqs = ret;
1838got_req:
1839	state->free_reqs--;
1840	return state->reqs[state->free_reqs];
1841}
1842
1843static inline void io_put_file(struct file *file)
1844{
1845	if (file)
1846		fput(file);
1847}
1848
1849static void io_dismantle_req(struct io_kiocb *req)
1850{
1851	unsigned int flags = req->flags;
1852
1853	if (io_req_needs_clean(req))
1854		io_clean_op(req);
1855	if (!(flags & REQ_F_FIXED_FILE))
1856		io_put_file(req->file);
1857	if (req->fixed_rsrc_refs)
1858		percpu_ref_put(req->fixed_rsrc_refs);
1859	if (req->async_data) {
1860		kfree(req->async_data);
1861		req->async_data = NULL;
1862	}
1863}
1864
1865static void __io_free_req(struct io_kiocb *req)
1866{
1867	struct io_ring_ctx *ctx = req->ctx;
1868
1869	io_dismantle_req(req);
1870	io_put_task(req->task, 1);
1871
1872	spin_lock(&ctx->completion_lock);
1873	list_add(&req->inflight_entry, &ctx->locked_free_list);
1874	ctx->locked_free_nr++;
1875	spin_unlock(&ctx->completion_lock);
1876
1877	percpu_ref_put(&ctx->refs);
1878}
1879
1880static inline void io_remove_next_linked(struct io_kiocb *req)
1881{
1882	struct io_kiocb *nxt = req->link;
1883
1884	req->link = nxt->link;
1885	nxt->link = NULL;
1886}
1887
1888static bool io_kill_linked_timeout(struct io_kiocb *req)
1889	__must_hold(&req->ctx->completion_lock)
1890	__must_hold(&req->ctx->timeout_lock)
1891{
1892	struct io_kiocb *link = req->link;
1893
1894	if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
1895		struct io_timeout_data *io = link->async_data;
1896
1897		io_remove_next_linked(req);
1898		link->timeout.head = NULL;
1899		if (hrtimer_try_to_cancel(&io->timer) != -1) {
1900			io_cqring_fill_event(link->ctx, link->user_data,
1901					     -ECANCELED, 0);
1902			io_put_req_deferred(link);
1903			return true;
1904		}
1905	}
1906	return false;
1907}
1908
1909static void io_fail_links(struct io_kiocb *req)
1910	__must_hold(&req->ctx->completion_lock)
1911{
1912	struct io_kiocb *nxt, *link = req->link;
1913
1914	req->link = NULL;
1915	while (link) {
1916		nxt = link->link;
1917		link->link = NULL;
1918
1919		trace_io_uring_fail_link(req, link);
1920		io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
1921		io_put_req_deferred(link);
1922		link = nxt;
1923	}
1924}
1925
1926static bool io_disarm_next(struct io_kiocb *req)
1927	__must_hold(&req->ctx->completion_lock)
1928{
1929	bool posted = false;
1930
1931	if (likely(req->flags & REQ_F_LINK_TIMEOUT)) {
1932		struct io_ring_ctx *ctx = req->ctx;
1933
1934		spin_lock_irq(&ctx->timeout_lock);
1935		posted = io_kill_linked_timeout(req);
1936		spin_unlock_irq(&ctx->timeout_lock);
1937	}
1938	if (unlikely((req->flags & REQ_F_FAIL) &&
1939		     !(req->flags & REQ_F_HARDLINK))) {
1940		posted |= (req->link != NULL);
1941		io_fail_links(req);
1942	}
1943	return posted;
1944}
1945
1946static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
1947{
1948	struct io_kiocb *nxt;
1949
1950	/*
1951	 * If LINK is set, we have dependent requests in this chain. If we
1952	 * didn't fail this request, queue the first one up, moving any other
1953	 * dependencies to the next request. In case of failure, fail the rest
1954	 * of the chain.
1955	 */
1956	if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
1957		struct io_ring_ctx *ctx = req->ctx;
1958		bool posted;
1959
1960		spin_lock(&ctx->completion_lock);
1961		posted = io_disarm_next(req);
1962		if (posted)
1963			io_commit_cqring(req->ctx);
1964		spin_unlock(&ctx->completion_lock);
1965		if (posted)
1966			io_cqring_ev_posted(ctx);
1967	}
1968	nxt = req->link;
1969	req->link = NULL;
1970	return nxt;
1971}
1972
1973static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1974{
1975	if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
1976		return NULL;
1977	return __io_req_find_next(req);
1978}
1979
1980static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1981{
1982	if (!ctx)
1983		return;
1984	if (ctx->submit_state.compl_nr) {
1985		mutex_lock(&ctx->uring_lock);
1986		io_submit_flush_completions(ctx);
1987		mutex_unlock(&ctx->uring_lock);
1988	}
1989	percpu_ref_put(&ctx->refs);
1990}
1991
1992static void tctx_task_work(struct callback_head *cb)
1993{
1994	struct io_ring_ctx *ctx = NULL;
1995	struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1996						  task_work);
1997
1998	while (1) {
1999		struct io_wq_work_node *node;
2000
2001		spin_lock_irq(&tctx->task_lock);
2002		node = tctx->task_list.first;
2003		INIT_WQ_LIST(&tctx->task_list);
2004		if (!node)
2005			tctx->task_running = false;
2006		spin_unlock_irq(&tctx->task_lock);
2007		if (!node)
2008			break;
2009
2010		do {
2011			struct io_wq_work_node *next = node->next;
2012			struct io_kiocb *req = container_of(node, struct io_kiocb,
2013							    io_task_work.node);
2014
2015			if (req->ctx != ctx) {
2016				ctx_flush_and_put(ctx);
2017				ctx = req->ctx;
2018				percpu_ref_get(&ctx->refs);
2019			}
2020			req->io_task_work.func(req);
2021			node = next;
2022		} while (node);
2023
2024		cond_resched();
2025	}
2026
2027	ctx_flush_and_put(ctx);
2028}
2029
2030static void io_req_task_work_add(struct io_kiocb *req)
2031{
2032	struct task_struct *tsk = req->task;
2033	struct io_uring_task *tctx = tsk->io_uring;
2034	enum task_work_notify_mode notify;
2035	struct io_wq_work_node *node;
2036	unsigned long flags;
2037	bool running;
2038
2039	WARN_ON_ONCE(!tctx);
2040
2041	spin_lock_irqsave(&tctx->task_lock, flags);
2042	wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2043	running = tctx->task_running;
2044	if (!running)
2045		tctx->task_running = true;
2046	spin_unlock_irqrestore(&tctx->task_lock, flags);
2047
2048	/* task_work already pending, we're done */
2049	if (running)
2050		return;
2051
2052	/*
2053	 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2054	 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2055	 * processing task_work. There's no reliable way to tell if TWA_RESUME
2056	 * will do the job.
2057	 */
2058	notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
2059	if (!task_work_add(tsk, &tctx->task_work, notify)) {
2060		wake_up_process(tsk);
2061		return;
2062	}
2063
2064	spin_lock_irqsave(&tctx->task_lock, flags);
2065	tctx->task_running = false;
2066	node = tctx->task_list.first;
2067	INIT_WQ_LIST(&tctx->task_list);
2068	spin_unlock_irqrestore(&tctx->task_lock, flags);
2069
2070	while (node) {
2071		req = container_of(node, struct io_kiocb, io_task_work.node);
2072		node = node->next;
2073		if (llist_add(&req->io_task_work.fallback_node,
2074			      &req->ctx->fallback_llist))
2075			schedule_delayed_work(&req->ctx->fallback_work, 1);
2076	}
2077}
2078
2079static void io_req_task_cancel(struct io_kiocb *req)
2080{
2081	struct io_ring_ctx *ctx = req->ctx;
2082
2083	/* ctx is guaranteed to stay alive while we hold uring_lock */
2084	mutex_lock(&ctx->uring_lock);
2085	io_req_complete_failed(req, req->result);
2086	mutex_unlock(&ctx->uring_lock);
2087}
2088
2089static void io_req_task_submit(struct io_kiocb *req)
2090{
2091	struct io_ring_ctx *ctx = req->ctx;
2092
2093	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
2094	mutex_lock(&ctx->uring_lock);
2095	if (likely(!(req->task->flags & PF_EXITING)))
2096		__io_queue_sqe(req);
2097	else
2098		io_req_complete_failed(req, -EFAULT);
2099	mutex_unlock(&ctx->uring_lock);
2100}
2101
2102static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2103{
2104	req->result = ret;
2105	req->io_task_work.func = io_req_task_cancel;
2106	io_req_task_work_add(req);
2107}
2108
2109static void io_req_task_queue(struct io_kiocb *req)
2110{
2111	req->io_task_work.func = io_req_task_submit;
2112	io_req_task_work_add(req);
2113}
2114
2115static void io_req_task_queue_reissue(struct io_kiocb *req)
2116{
2117	req->io_task_work.func = io_queue_async_work;
2118	io_req_task_work_add(req);
2119}
2120
2121static inline void io_queue_next(struct io_kiocb *req)
2122{
2123	struct io_kiocb *nxt = io_req_find_next(req);
2124
2125	if (nxt)
2126		io_req_task_queue(nxt);
2127}
2128
2129static void io_free_req(struct io_kiocb *req)
2130{
2131	io_queue_next(req);
2132	__io_free_req(req);
2133}
2134
2135struct req_batch {
2136	struct task_struct	*task;
2137	int			task_refs;
2138	int			ctx_refs;
2139};
2140
2141static inline void io_init_req_batch(struct req_batch *rb)
2142{
2143	rb->task_refs = 0;
2144	rb->ctx_refs = 0;
2145	rb->task = NULL;
2146}
2147
2148static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2149				     struct req_batch *rb)
2150{
2151	if (rb->ctx_refs)
2152		percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2153	if (rb->task == current)
2154		current->io_uring->cached_refs += rb->task_refs;
2155	else if (rb->task)
2156		io_put_task(rb->task, rb->task_refs);
2157}
2158
2159static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2160			      struct io_submit_state *state)
2161{
2162	io_queue_next(req);
2163	io_dismantle_req(req);
2164
2165	if (req->task != rb->task) {
2166		if (rb->task)
2167			io_put_task(rb->task, rb->task_refs);
2168		rb->task = req->task;
2169		rb->task_refs = 0;
2170	}
2171	rb->task_refs++;
2172	rb->ctx_refs++;
2173
2174	if (state->free_reqs != ARRAY_SIZE(state->reqs))
2175		state->reqs[state->free_reqs++] = req;
2176	else
2177		list_add(&req->inflight_entry, &state->free_list);
2178}
2179
2180static void io_submit_flush_completions(struct io_ring_ctx *ctx)
2181	__must_hold(&ctx->uring_lock)
2182{
2183	struct io_submit_state *state = &ctx->submit_state;
2184	int i, nr = state->compl_nr;
2185	struct req_batch rb;
2186
2187	spin_lock(&ctx->completion_lock);
2188	for (i = 0; i < nr; i++) {
2189		struct io_kiocb *req = state->compl_reqs[i];
2190
2191		__io_cqring_fill_event(ctx, req->user_data, req->result,
2192					req->compl.cflags);
2193	}
2194	io_commit_cqring(ctx);
2195	spin_unlock(&ctx->completion_lock);
2196	io_cqring_ev_posted(ctx);
2197
2198	io_init_req_batch(&rb);
2199	for (i = 0; i < nr; i++) {
2200		struct io_kiocb *req = state->compl_reqs[i];
2201
2202		if (req_ref_put_and_test(req))
2203			io_req_free_batch(&rb, req, &ctx->submit_state);
2204	}
2205
2206	io_req_free_batch_finish(ctx, &rb);
2207	state->compl_nr = 0;
2208}
2209
2210/*
2211 * Drop reference to request, return next in chain (if there is one) if this
2212 * was the last reference to this request.
2213 */
2214static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2215{
2216	struct io_kiocb *nxt = NULL;
2217
2218	if (req_ref_put_and_test(req)) {
2219		nxt = io_req_find_next(req);
2220		__io_free_req(req);
2221	}
2222	return nxt;
2223}
2224
2225static inline void io_put_req(struct io_kiocb *req)
2226{
2227	if (req_ref_put_and_test(req))
2228		io_free_req(req);
2229}
2230
2231static inline void io_put_req_deferred(struct io_kiocb *req)
2232{
2233	if (req_ref_put_and_test(req)) {
2234		req->io_task_work.func = io_free_req;
2235		io_req_task_work_add(req);
2236	}
2237}
2238
2239static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2240{
2241	/* See comment at the top of this file */
2242	smp_rmb();
2243	return __io_cqring_events(ctx);
2244}
2245
2246static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2247{
2248	struct io_rings *rings = ctx->rings;
2249
2250	/* make sure SQ entry isn't read before tail */
2251	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2252}
2253
2254static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2255{
2256	unsigned int cflags;
2257
2258	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2259	cflags |= IORING_CQE_F_BUFFER;
2260	req->flags &= ~REQ_F_BUFFER_SELECTED;
2261	kfree(kbuf);
2262	return cflags;
2263}
2264
2265static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2266{
2267	struct io_buffer *kbuf;
2268
2269	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2270	return io_put_kbuf(req, kbuf);
2271}
2272
2273static inline bool io_run_task_work(void)
2274{
2275	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
2276		__set_current_state(TASK_RUNNING);
2277		tracehook_notify_signal();
2278		return true;
2279	}
2280
2281	return false;
2282}
2283
2284/*
2285 * Find and free completed poll iocbs
2286 */
2287static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2288			       struct list_head *done)
2289{
2290	struct req_batch rb;
2291	struct io_kiocb *req;
2292
2293	/* order with ->result store in io_complete_rw_iopoll() */
2294	smp_rmb();
2295
2296	io_init_req_batch(&rb);
2297	while (!list_empty(done)) {
2298		int cflags = 0;
2299
2300		req = list_first_entry(done, struct io_kiocb, inflight_entry);
2301		list_del(&req->inflight_entry);
2302
2303		if (READ_ONCE(req->result) == -EAGAIN &&
2304		    !(req->flags & REQ_F_DONT_REISSUE)) {
2305			req->iopoll_completed = 0;
2306			io_req_task_queue_reissue(req);
2307			continue;
2308		}
2309
2310		if (req->flags & REQ_F_BUFFER_SELECTED)
2311			cflags = io_put_rw_kbuf(req);
2312
2313		__io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
2314		(*nr_events)++;
2315
2316		if (req_ref_put_and_test(req))
2317			io_req_free_batch(&rb, req, &ctx->submit_state);
2318	}
2319
2320	io_commit_cqring(ctx);
2321	io_cqring_ev_posted_iopoll(ctx);
2322	io_req_free_batch_finish(ctx, &rb);
2323}
2324
2325static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2326			long min)
2327{
2328	struct io_kiocb *req, *tmp;
2329	LIST_HEAD(done);
2330	bool spin;
2331
2332	/*
2333	 * Only spin for completions if we don't have multiple devices hanging
2334	 * off our complete list, and we're under the requested amount.
2335	 */
2336	spin = !ctx->poll_multi_queue && *nr_events < min;
2337
2338	list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2339		struct kiocb *kiocb = &req->rw.kiocb;
2340		int ret;
2341
2342		/*
2343		 * Move completed and retryable entries to our local lists.
2344		 * If we find a request that requires polling, break out
2345		 * and complete those lists first, if we have entries there.
2346		 */
2347		if (READ_ONCE(req->iopoll_completed)) {
2348			list_move_tail(&req->inflight_entry, &done);
2349			continue;
2350		}
2351		if (!list_empty(&done))
2352			break;
2353
2354		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2355		if (unlikely(ret < 0))
2356			return ret;
2357		else if (ret)
2358			spin = false;
2359
2360		/* iopoll may have completed current req */
2361		if (READ_ONCE(req->iopoll_completed))
2362			list_move_tail(&req->inflight_entry, &done);
2363	}
2364
2365	if (!list_empty(&done))
2366		io_iopoll_complete(ctx, nr_events, &done);
2367
2368	return 0;
2369}
2370
2371/*
2372 * We can't just wait for polled events to come to us, we have to actively
2373 * find and complete them.
2374 */
2375static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2376{
2377	if (!(ctx->flags & IORING_SETUP_IOPOLL))
2378		return;
2379
2380	mutex_lock(&ctx->uring_lock);
2381	while (!list_empty(&ctx->iopoll_list)) {
2382		unsigned int nr_events = 0;
2383
2384		io_do_iopoll(ctx, &nr_events, 0);
2385
2386		/* let it sleep and repeat later if can't complete a request */
2387		if (nr_events == 0)
2388			break;
2389		/*
2390		 * Ensure we allow local-to-the-cpu processing to take place,
2391		 * in this case we need to ensure that we reap all events.
2392		 * Also let task_work, etc. to progress by releasing the mutex
2393		 */
2394		if (need_resched()) {
2395			mutex_unlock(&ctx->uring_lock);
2396			cond_resched();
2397			mutex_lock(&ctx->uring_lock);
2398		}
2399	}
2400	mutex_unlock(&ctx->uring_lock);
2401}
2402
2403static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2404{
2405	unsigned int nr_events = 0;
2406	int ret = 0;
2407
2408	/*
2409	 * We disallow the app entering submit/complete with polling, but we
2410	 * still need to lock the ring to prevent racing with polled issue
2411	 * that got punted to a workqueue.
2412	 */
2413	mutex_lock(&ctx->uring_lock);
2414	/*
2415	 * Don't enter poll loop if we already have events pending.
2416	 * If we do, we can potentially be spinning for commands that
2417	 * already triggered a CQE (eg in error).
2418	 */
2419	if (test_bit(0, &ctx->check_cq_overflow))
2420		__io_cqring_overflow_flush(ctx, false);
2421	if (io_cqring_events(ctx))
2422		goto out;
2423	do {
2424		/*
2425		 * If a submit got punted to a workqueue, we can have the
2426		 * application entering polling for a command before it gets
2427		 * issued. That app will hold the uring_lock for the duration
2428		 * of the poll right here, so we need to take a breather every
2429		 * now and then to ensure that the issue has a chance to add
2430		 * the poll to the issued list. Otherwise we can spin here
2431		 * forever, while the workqueue is stuck trying to acquire the
2432		 * very same mutex.
2433		 */
2434		if (list_empty(&ctx->iopoll_list)) {
2435			u32 tail = ctx->cached_cq_tail;
2436
2437			mutex_unlock(&ctx->uring_lock);
2438			io_run_task_work();
2439			mutex_lock(&ctx->uring_lock);
2440
2441			/* some requests don't go through iopoll_list */
2442			if (tail != ctx->cached_cq_tail ||
2443			    list_empty(&ctx->iopoll_list))
2444				break;
2445		}
2446		ret = io_do_iopoll(ctx, &nr_events, min);
2447	} while (!ret && nr_events < min && !need_resched());
2448out:
2449	mutex_unlock(&ctx->uring_lock);
2450	return ret;
2451}
2452
2453static void kiocb_end_write(struct io_kiocb *req)
2454{
2455	/*
2456	 * Tell lockdep we inherited freeze protection from submission
2457	 * thread.
2458	 */
2459	if (req->flags & REQ_F_ISREG) {
2460		struct super_block *sb = file_inode(req->file)->i_sb;
2461
2462		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
2463		sb_end_write(sb);
2464	}
2465}
2466
2467#ifdef CONFIG_BLOCK
2468static bool io_resubmit_prep(struct io_kiocb *req)
2469{
2470	struct io_async_rw *rw = req->async_data;
2471
2472	if (!rw)
2473		return !io_req_prep_async(req);
2474	/* may have left rw->iter inconsistent on -EIOCBQUEUED */
2475	iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2476	return true;
2477}
2478
2479static bool io_rw_should_reissue(struct io_kiocb *req)
2480{
2481	umode_t mode = file_inode(req->file)->i_mode;
2482	struct io_ring_ctx *ctx = req->ctx;
2483
2484	if (!S_ISBLK(mode) && !S_ISREG(mode))
2485		return false;
2486	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2487	    !(ctx->flags & IORING_SETUP_IOPOLL)))
2488		return false;
2489	/*
2490	 * If ref is dying, we might be running poll reap from the exit work.
2491	 * Don't attempt to reissue from that path, just let it fail with
2492	 * -EAGAIN.
2493	 */
2494	if (percpu_ref_is_dying(&ctx->refs))
2495		return false;
2496	/*
2497	 * Play it safe and assume not safe to re-import and reissue if we're
2498	 * not in the original thread group (or in task context).
2499	 */
2500	if (!same_thread_group(req->task, current) || !in_task())
2501		return false;
2502	return true;
2503}
2504#else
2505static bool io_resubmit_prep(struct io_kiocb *req)
2506{
2507	return false;
2508}
2509static bool io_rw_should_reissue(struct io_kiocb *req)
2510{
2511	return false;
2512}
2513#endif
2514
2515static bool __io_complete_rw_common(struct io_kiocb *req, long res)
2516{
2517	if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2518		kiocb_end_write(req);
2519	if (res != req->result) {
2520		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2521		    io_rw_should_reissue(req)) {
2522			req->flags |= REQ_F_REISSUE;
2523			return true;
2524		}
2525		req_set_fail(req);
2526		req->result = res;
2527	}
2528	return false;
2529}
2530
2531static void io_req_task_complete(struct io_kiocb *req)
2532{
2533	int cflags = 0;
2534
2535	if (req->flags & REQ_F_BUFFER_SELECTED)
2536		cflags = io_put_rw_kbuf(req);
2537	__io_req_complete(req, 0, req->result, cflags);
2538}
2539
2540static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2541			     unsigned int issue_flags)
2542{
2543	if (__io_complete_rw_common(req, res))
2544		return;
2545	io_req_task_complete(req);
2546}
2547
2548static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2549{
2550	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2551
2552	if (__io_complete_rw_common(req, res))
2553		return;
2554	req->result = res;
2555	req->io_task_work.func = io_req_task_complete;
2556	io_req_task_work_add(req);
2557}
2558
2559static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2560{
2561	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2562
2563	if (kiocb->ki_flags & IOCB_WRITE)
2564		kiocb_end_write(req);
2565	if (unlikely(res != req->result)) {
2566		if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2567		    io_resubmit_prep(req))) {
2568			req_set_fail(req);
2569			req->flags |= REQ_F_DONT_REISSUE;
2570		}
2571	}
2572
2573	WRITE_ONCE(req->result, res);
2574	/* order with io_iopoll_complete() checking ->result */
2575	smp_wmb();
2576	WRITE_ONCE(req->iopoll_completed, 1);
2577}
2578
2579/*
2580 * After the iocb has been issued, it's safe to be found on the poll list.
2581 * Adding the kiocb to the list AFTER submission ensures that we don't
2582 * find it from a io_do_iopoll() thread before the issuer is done
2583 * accessing the kiocb cookie.
2584 */
2585static void io_iopoll_req_issued(struct io_kiocb *req)
2586{
2587	struct io_ring_ctx *ctx = req->ctx;
2588	const bool in_async = io_wq_current_is_worker();
2589
2590	/* workqueue context doesn't hold uring_lock, grab it now */
2591	if (unlikely(in_async))
2592		mutex_lock(&ctx->uring_lock);
2593
2594	/*
2595	 * Track whether we have multiple files in our lists. This will impact
2596	 * how we do polling eventually, not spinning if we're on potentially
2597	 * different devices.
2598	 */
2599	if (list_empty(&ctx->iopoll_list)) {
2600		ctx->poll_multi_queue = false;
2601	} else if (!ctx->poll_multi_queue) {
2602		struct io_kiocb *list_req;
2603		unsigned int queue_num0, queue_num1;
2604
2605		list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2606						inflight_entry);
2607
2608		if (list_req->file != req->file) {
2609			ctx->poll_multi_queue = true;
2610		} else {
2611			queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2612			queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2613			if (queue_num0 != queue_num1)
2614				ctx->poll_multi_queue = true;
2615		}
2616	}
2617
2618	/*
2619	 * For fast devices, IO may have already completed. If it has, add
2620	 * it to the front so we find it first.
2621	 */
2622	if (READ_ONCE(req->iopoll_completed))
2623		list_add(&req->inflight_entry, &ctx->iopoll_list);
2624	else
2625		list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2626
2627	if (unlikely(in_async)) {
2628		/*
2629		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2630		 * in sq thread task context or in io worker task context. If
2631		 * current task context is sq thread, we don't need to check
2632		 * whether should wake up sq thread.
2633		 */
2634		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2635		    wq_has_sleeper(&ctx->sq_data->wait))
2636			wake_up(&ctx->sq_data->wait);
2637
2638		mutex_unlock(&ctx->uring_lock);
2639	}
2640}
2641
2642static bool io_bdev_nowait(struct block_device *bdev)
2643{
2644	return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2645}
2646
2647/*
2648 * If we tracked the file through the SCM inflight mechanism, we could support
2649 * any file. For now, just ensure that anything potentially problematic is done
2650 * inline.
2651 */
2652static bool __io_file_supports_nowait(struct file *file, int rw)
2653{
2654	umode_t mode = file_inode(file)->i_mode;
2655
2656	if (S_ISBLK(mode)) {
2657		if (IS_ENABLED(CONFIG_BLOCK) &&
2658		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
2659			return true;
2660		return false;
2661	}
2662	if (S_ISSOCK(mode))
2663		return true;
2664	if (S_ISREG(mode)) {
2665		if (IS_ENABLED(CONFIG_BLOCK) &&
2666		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2667		    file->f_op != &io_uring_fops)
2668			return true;
2669		return false;
2670	}
2671
2672	/* any ->read/write should understand O_NONBLOCK */
2673	if (file->f_flags & O_NONBLOCK)
2674		return true;
2675
2676	if (!(file->f_mode & FMODE_NOWAIT))
2677		return false;
2678
2679	if (rw == READ)
2680		return file->f_op->read_iter != NULL;
2681
2682	return file->f_op->write_iter != NULL;
2683}
2684
2685static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
2686{
2687	if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
2688		return true;
2689	else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
2690		return true;
2691
2692	return __io_file_supports_nowait(req->file, rw);
2693}
2694
2695static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2696{
2697	struct io_ring_ctx *ctx = req->ctx;
2698	struct kiocb *kiocb = &req->rw.kiocb;
2699	struct file *file = req->file;
2700	unsigned ioprio;
2701	int ret;
2702
2703	if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
2704		req->flags |= REQ_F_ISREG;
2705
2706	kiocb->ki_pos = READ_ONCE(sqe->off);
2707	if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
2708		req->flags |= REQ_F_CUR_POS;
2709		kiocb->ki_pos = file->f_pos;
2710	}
2711	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2712	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2713	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2714	if (unlikely(ret))
2715		return ret;
2716
2717	/* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2718	if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2719		req->flags |= REQ_F_NOWAIT;
2720
2721	ioprio = READ_ONCE(sqe->ioprio);
2722	if (ioprio) {
2723		ret = ioprio_check_cap(ioprio);
2724		if (ret)
2725			return ret;
2726
2727		kiocb->ki_ioprio = ioprio;
2728	} else
2729		kiocb->ki_ioprio = get_current_ioprio();
2730
2731	if (ctx->flags & IORING_SETUP_IOPOLL) {
2732		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2733		    !kiocb->ki_filp->f_op->iopoll)
2734			return -EOPNOTSUPP;
2735
2736		kiocb->ki_flags |= IOCB_HIPRI;
2737		kiocb->ki_complete = io_complete_rw_iopoll;
2738		req->iopoll_completed = 0;
2739	} else {
2740		if (kiocb->ki_flags & IOCB_HIPRI)
2741			return -EINVAL;
2742		kiocb->ki_complete = io_complete_rw;
2743	}
2744
2745	if (req->opcode == IORING_OP_READ_FIXED ||
2746	    req->opcode == IORING_OP_WRITE_FIXED) {
2747		req->imu = NULL;
2748		io_req_set_rsrc_node(req);
2749	}
2750
2751	req->rw.addr = READ_ONCE(sqe->addr);
2752	req->rw.len = READ_ONCE(sqe->len);
2753	req->buf_index = READ_ONCE(sqe->buf_index);
2754	return 0;
2755}
2756
2757static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2758{
2759	switch (ret) {
2760	case -EIOCBQUEUED:
2761		break;
2762	case -ERESTARTSYS:
2763	case -ERESTARTNOINTR:
2764	case -ERESTARTNOHAND:
2765	case -ERESTART_RESTARTBLOCK:
2766		/*
2767		 * We can't just restart the syscall, since previously
2768		 * submitted sqes may already be in progress. Just fail this
2769		 * IO with EINTR.
2770		 */
2771		ret = -EINTR;
2772		fallthrough;
2773	default:
2774		kiocb->ki_complete(kiocb, ret, 0);
2775	}
2776}
2777
2778static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2779		       unsigned int issue_flags)
2780{
2781	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2782	struct io_async_rw *io = req->async_data;
2783	bool check_reissue = kiocb->ki_complete == io_complete_rw;
2784
2785	/* add previously done IO, if any */
2786	if (io && io->bytes_done > 0) {
2787		if (ret < 0)
2788			ret = io->bytes_done;
2789		else
2790			ret += io->bytes_done;
2791	}
2792
2793	if (req->flags & REQ_F_CUR_POS)
2794		req->file->f_pos = kiocb->ki_pos;
2795	if (ret >= 0 && check_reissue)
2796		__io_complete_rw(req, ret, 0, issue_flags);
2797	else
2798		io_rw_done(kiocb, ret);
2799
2800	if (check_reissue && (req->flags & REQ_F_REISSUE)) {
2801		req->flags &= ~REQ_F_REISSUE;
2802		if (io_resubmit_prep(req)) {
2803			io_req_task_queue_reissue(req);
2804		} else {
2805			int cflags = 0;
2806
2807			req_set_fail(req);
2808			if (req->flags & REQ_F_BUFFER_SELECTED)
2809				cflags = io_put_rw_kbuf(req);
2810			__io_req_complete(req, issue_flags, ret, cflags);
2811		}
2812	}
2813}
2814
2815static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2816			     struct io_mapped_ubuf *imu)
2817{
2818	size_t len = req->rw.len;
2819	u64 buf_end, buf_addr = req->rw.addr;
2820	size_t offset;
2821
2822	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
2823		return -EFAULT;
2824	/* not inside the mapped region */
2825	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
2826		return -EFAULT;
2827
2828	/*
2829	 * May not be a start of buffer, set size appropriately
2830	 * and advance us to the beginning.
2831	 */
2832	offset = buf_addr - imu->ubuf;
2833	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2834
2835	if (offset) {
2836		/*
2837		 * Don't use iov_iter_advance() here, as it's really slow for
2838		 * using the latter parts of a big fixed buffer - it iterates
2839		 * over each segment manually. We can cheat a bit here, because
2840		 * we know that:
2841		 *
2842		 * 1) it's a BVEC iter, we set it up
2843		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2844		 *    first and last bvec
2845		 *
2846		 * So just find our index, and adjust the iterator afterwards.
2847		 * If the offset is within the first bvec (or the whole first
2848		 * bvec, just use iov_iter_advance(). This makes it easier
2849		 * since we can just skip the first segment, which may not
2850		 * be PAGE_SIZE aligned.
2851		 */
2852		const struct bio_vec *bvec = imu->bvec;
2853
2854		if (offset <= bvec->bv_len) {
2855			iov_iter_advance(iter, offset);
2856		} else {
2857			unsigned long seg_skip;
2858
2859			/* skip first vec */
2860			offset -= bvec->bv_len;
2861			seg_skip = 1 + (offset >> PAGE_SHIFT);
2862
2863			iter->bvec = bvec + seg_skip;
2864			iter->nr_segs -= seg_skip;
2865			iter->count -= bvec->bv_len + offset;
2866			iter->iov_offset = offset & ~PAGE_MASK;
2867		}
2868	}
2869
2870	return 0;
2871}
2872
2873static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2874{
2875	struct io_ring_ctx *ctx = req->ctx;
2876	struct io_mapped_ubuf *imu = req->imu;
2877	u16 index, buf_index = req->buf_index;
2878
2879	if (likely(!imu)) {
2880		if (unlikely(buf_index >= ctx->nr_user_bufs))
2881			return -EFAULT;
2882		index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2883		imu = READ_ONCE(ctx->user_bufs[index]);
2884		req->imu = imu;
2885	}
2886	return __io_import_fixed(req, rw, iter, imu);
2887}
2888
2889static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2890{
2891	if (needs_lock)
2892		mutex_unlock(&ctx->uring_lock);
2893}
2894
2895static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2896{
2897	/*
2898	 * "Normal" inline submissions always hold the uring_lock, since we
2899	 * grab it from the system call. Same is true for the SQPOLL offload.
2900	 * The only exception is when we've detached the request and issue it
2901	 * from an async worker thread, grab the lock for that case.
2902	 */
2903	if (needs_lock)
2904		mutex_lock(&ctx->uring_lock);
2905}
2906
2907static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2908					  int bgid, struct io_buffer *kbuf,
2909					  bool needs_lock)
2910{
2911	struct io_buffer *head;
2912
2913	if (req->flags & REQ_F_BUFFER_SELECTED)
2914		return kbuf;
2915
2916	io_ring_submit_lock(req->ctx, needs_lock);
2917
2918	lockdep_assert_held(&req->ctx->uring_lock);
2919
2920	head = xa_load(&req->ctx->io_buffers, bgid);
2921	if (head) {
2922		if (!list_empty(&head->list)) {
2923			kbuf = list_last_entry(&head->list, struct io_buffer,
2924							list);
2925			list_del(&kbuf->list);
2926		} else {
2927			kbuf = head;
2928			xa_erase(&req->ctx->io_buffers, bgid);
2929		}
2930		if (*len > kbuf->len)
2931			*len = kbuf->len;
2932	} else {
2933		kbuf = ERR_PTR(-ENOBUFS);
2934	}
2935
2936	io_ring_submit_unlock(req->ctx, needs_lock);
2937
2938	return kbuf;
2939}
2940
2941static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2942					bool needs_lock)
2943{
2944	struct io_buffer *kbuf;
2945	u16 bgid;
2946
2947	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2948	bgid = req->buf_index;
2949	kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2950	if (IS_ERR(kbuf))
2951		return kbuf;
2952	req->rw.addr = (u64) (unsigned long) kbuf;
2953	req->flags |= REQ_F_BUFFER_SELECTED;
2954	return u64_to_user_ptr(kbuf->addr);
2955}
2956
2957#ifdef CONFIG_COMPAT
2958static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2959				bool needs_lock)
2960{
2961	struct compat_iovec __user *uiov;
2962	compat_ssize_t clen;
2963	void __user *buf;
2964	ssize_t len;
2965
2966	uiov = u64_to_user_ptr(req->rw.addr);
2967	if (!access_ok(uiov, sizeof(*uiov)))
2968		return -EFAULT;
2969	if (__get_user(clen, &uiov->iov_len))
2970		return -EFAULT;
2971	if (clen < 0)
2972		return -EINVAL;
2973
2974	len = clen;
2975	buf = io_rw_buffer_select(req, &len, needs_lock);
2976	if (IS_ERR(buf))
2977		return PTR_ERR(buf);
2978	iov[0].iov_base = buf;
2979	iov[0].iov_len = (compat_size_t) len;
2980	return 0;
2981}
2982#endif
2983
2984static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2985				      bool needs_lock)
2986{
2987	struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2988	void __user *buf;
2989	ssize_t len;
2990
2991	if (copy_from_user(iov, uiov, sizeof(*uiov)))
2992		return -EFAULT;
2993
2994	len = iov[0].iov_len;
2995	if (len < 0)
2996		return -EINVAL;
2997	buf = io_rw_buffer_select(req, &len, needs_lock);
2998	if (IS_ERR(buf))
2999		return PTR_ERR(buf);
3000	iov[0].iov_base = buf;
3001	iov[0].iov_len = len;
3002	return 0;
3003}
3004
3005static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3006				    bool needs_lock)
3007{
3008	if (req->flags & REQ_F_BUFFER_SELECTED) {
3009		struct io_buffer *kbuf;
3010
3011		kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3012		iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3013		iov[0].iov_len = kbuf->len;
3014		return 0;
3015	}
3016	if (req->rw.len != 1)
3017		return -EINVAL;
3018
3019#ifdef CONFIG_COMPAT
3020	if (req->ctx->compat)
3021		return io_compat_import(req, iov, needs_lock);
3022#endif
3023
3024	return __io_iov_buffer_select(req, iov, needs_lock);
3025}
3026
3027static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3028			   struct iov_iter *iter, bool needs_lock)
3029{
3030	void __user *buf = u64_to_user_ptr(req->rw.addr);
3031	size_t sqe_len = req->rw.len;
3032	u8 opcode = req->opcode;
3033	ssize_t ret;
3034
3035	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3036		*iovec = NULL;
3037		return io_import_fixed(req, rw, iter);
3038	}
3039
3040	/* buffer index only valid with fixed read/write, or buffer select  */
3041	if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3042		return -EINVAL;
3043
3044	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3045		if (req->flags & REQ_F_BUFFER_SELECT) {
3046			buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3047			if (IS_ERR(buf))
3048				return PTR_ERR(buf);
3049			req->rw.len = sqe_len;
3050		}
3051
3052		ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3053		*iovec = NULL;
3054		return ret;
3055	}
3056
3057	if (req->flags & REQ_F_BUFFER_SELECT) {
3058		ret = io_iov_buffer_select(req, *iovec, needs_lock);
3059		if (!ret)
3060			iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
3061		*iovec = NULL;
3062		return ret;
3063	}
3064
3065	return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3066			      req->ctx->compat);
3067}
3068
3069static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3070{
3071	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3072}
3073
3074/*
3075 * For files that don't have ->read_iter() and ->write_iter(), handle them
3076 * by looping over ->read() or ->write() manually.
3077 */
3078static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3079{
3080	struct kiocb *kiocb = &req->rw.kiocb;
3081	struct file *file = req->file;
3082	ssize_t ret = 0;
3083
3084	/*
3085	 * Don't support polled IO through this interface, and we can't
3086	 * support non-blocking either. For the latter, this just causes
3087	 * the kiocb to be handled from an async context.
3088	 */
3089	if (kiocb->ki_flags & IOCB_HIPRI)
3090		return -EOPNOTSUPP;
3091	if (kiocb->ki_flags & IOCB_NOWAIT)
3092		return -EAGAIN;
3093
3094	while (iov_iter_count(iter)) {
3095		struct iovec iovec;
3096		ssize_t nr;
3097
3098		if (!iov_iter_is_bvec(iter)) {
3099			iovec = iov_iter_iovec(iter);
3100		} else {
3101			iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3102			iovec.iov_len = req->rw.len;
3103		}
3104
3105		if (rw == READ) {
3106			nr = file->f_op->read(file, iovec.iov_base,
3107					      iovec.iov_len, io_kiocb_ppos(kiocb));
3108		} else {
3109			nr = file->f_op->write(file, iovec.iov_base,
3110					       iovec.iov_len, io_kiocb_ppos(kiocb));
3111		}
3112
3113		if (nr < 0) {
3114			if (!ret)
3115				ret = nr;
3116			break;
3117		}
3118		ret += nr;
3119		if (nr != iovec.iov_len)
3120			break;
3121		req->rw.len -= nr;
3122		req->rw.addr += nr;
3123		iov_iter_advance(iter, nr);
3124	}
3125
3126	return ret;
3127}
3128
3129static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3130			  const struct iovec *fast_iov, struct iov_iter *iter)
3131{
3132	struct io_async_rw *rw = req->async_data;
3133
3134	memcpy(&rw->iter, iter, sizeof(*iter));
3135	rw->free_iovec = iovec;
3136	rw->bytes_done = 0;
3137	/* can only be fixed buffers, no need to do anything */
3138	if (iov_iter_is_bvec(iter))
3139		return;
3140	if (!iovec) {
3141		unsigned iov_off = 0;
3142
3143		rw->iter.iov = rw->fast_iov;
3144		if (iter->iov != fast_iov) {
3145			iov_off = iter->iov - fast_iov;
3146			rw->iter.iov += iov_off;
3147		}
3148		if (rw->fast_iov != fast_iov)
3149			memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3150			       sizeof(struct iovec) * iter->nr_segs);
3151	} else {
3152		req->flags |= REQ_F_NEED_CLEANUP;
3153	}
3154}
3155
3156static inline int io_alloc_async_data(struct io_kiocb *req)
3157{
3158	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3159	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3160	return req->async_data == NULL;
3161}
3162
3163static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3164			     const struct iovec *fast_iov,
3165			     struct iov_iter *iter, bool force)
3166{
3167	if (!force && !io_op_defs[req->opcode].needs_async_setup)
3168		return 0;
3169	if (!req->async_data) {
3170		if (io_alloc_async_data(req)) {
3171			kfree(iovec);
3172			return -ENOMEM;
3173		}
3174
3175		io_req_map_rw(req, iovec, fast_iov, iter);
3176	}
3177	return 0;
3178}
3179
3180static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3181{
3182	struct io_async_rw *iorw = req->async_data;
3183	struct iovec *iov = iorw->fast_iov;
3184	int ret;
3185
3186	ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3187	if (unlikely(ret < 0))
3188		return ret;
3189
3190	iorw->bytes_done = 0;
3191	iorw->free_iovec = iov;
3192	if (iov)
3193		req->flags |= REQ_F_NEED_CLEANUP;
3194	return 0;
3195}
3196
3197static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3198{
3199	if (unlikely(!(req->file->f_mode & FMODE_READ)))
3200		return -EBADF;
3201	return io_prep_rw(req, sqe);
3202}
3203
3204/*
3205 * This is our waitqueue callback handler, registered through lock_page_async()
3206 * when we initially tried to do the IO with the iocb armed our waitqueue.
3207 * This gets called when the page is unlocked, and we generally expect that to
3208 * happen when the page IO is completed and the page is now uptodate. This will
3209 * queue a task_work based retry of the operation, attempting to copy the data
3210 * again. If the latter fails because the page was NOT uptodate, then we will
3211 * do a thread based blocking retry of the operation. That's the unexpected
3212 * slow path.
3213 */
3214static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3215			     int sync, void *arg)
3216{
3217	struct wait_page_queue *wpq;
3218	struct io_kiocb *req = wait->private;
3219	struct wait_page_key *key = arg;
3220
3221	wpq = container_of(wait, struct wait_page_queue, wait);
3222
3223	if (!wake_page_match(wpq, key))
3224		return 0;
3225
3226	req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3227	list_del_init(&wait->entry);
3228	io_req_task_queue(req);
3229	return 1;
3230}
3231
3232/*
3233 * This controls whether a given IO request should be armed for async page
3234 * based retry. If we return false here, the request is handed to the async
3235 * worker threads for retry. If we're doing buffered reads on a regular file,
3236 * we prepare a private wait_page_queue entry and retry the operation. This
3237 * will either succeed because the page is now uptodate and unlocked, or it
3238 * will register a callback when the page is unlocked at IO completion. Through
3239 * that callback, io_uring uses task_work to setup a retry of the operation.
3240 * That retry will attempt the buffered read again. The retry will generally
3241 * succeed, or in rare cases where it fails, we then fall back to using the
3242 * async worker threads for a blocking retry.
3243 */
3244static bool io_rw_should_retry(struct io_kiocb *req)
3245{
3246	struct io_async_rw *rw = req->async_data;
3247	struct wait_page_queue *wait = &rw->wpq;
3248	struct kiocb *kiocb = &req->rw.kiocb;
3249
3250	/* never retry for NOWAIT, we just complete with -EAGAIN */
3251	if (req->flags & REQ_F_NOWAIT)
3252		return false;
3253
3254	/* Only for buffered IO */
3255	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3256		return false;
3257
3258	/*
3259	 * just use poll if we can, and don't attempt if the fs doesn't
3260	 * support callback based unlocks
3261	 */
3262	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3263		return false;
3264
3265	wait->wait.func = io_async_buf_func;
3266	wait->wait.private = req;
3267	wait->wait.flags = 0;
3268	INIT_LIST_HEAD(&wait->wait.entry);
3269	kiocb->ki_flags |= IOCB_WAITQ;
3270	kiocb->ki_flags &= ~IOCB_NOWAIT;
3271	kiocb->ki_waitq = wait;
3272	return true;
3273}
3274
3275static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3276{
3277	if (req->file->f_op->read_iter)
3278		return call_read_iter(req->file, &req->rw.kiocb, iter);
3279	else if (req->file->f_op->read)
3280		return loop_rw_iter(READ, req, iter);
3281	else
3282		return -EINVAL;
3283}
3284
3285static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3286{
3287	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3288	struct kiocb *kiocb = &req->rw.kiocb;
3289	struct iov_iter __iter, *iter = &__iter;
3290	struct io_async_rw *rw = req->async_data;
3291	ssize_t io_size, ret, ret2;
3292	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3293
3294	if (rw) {
3295		iter = &rw->iter;
3296		iovec = NULL;
3297	} else {
3298		ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3299		if (ret < 0)
3300			return ret;
3301	}
3302	io_size = iov_iter_count(iter);
3303	req->result = io_size;
3304
3305	/* Ensure we clear previously set non-block flag */
3306	if (!force_nonblock)
3307		kiocb->ki_flags &= ~IOCB_NOWAIT;
3308	else
3309		kiocb->ki_flags |= IOCB_NOWAIT;
3310
3311	/* If the file doesn't support async, just async punt */
3312	if (force_nonblock && !io_file_supports_nowait(req, READ)) {
3313		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3314		return ret ?: -EAGAIN;
3315	}
3316
3317	ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3318	if (unlikely(ret)) {
3319		kfree(iovec);
3320		return ret;
3321	}
3322
3323	ret = io_iter_do_read(req, iter);
3324
3325	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
3326		req->flags &= ~REQ_F_REISSUE;
3327		/* IOPOLL retry should happen for io-wq threads */
3328		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3329			goto done;
3330		/* no retry on NONBLOCK nor RWF_NOWAIT */
3331		if (req->flags & REQ_F_NOWAIT)
3332			goto done;
3333		/* some cases will consume bytes even on error returns */
3334		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3335		ret = 0;
3336	} else if (ret == -EIOCBQUEUED) {
3337		goto out_free;
3338	} else if (ret <= 0 || ret == io_size || !force_nonblock ||
3339		   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
3340		/* read all, failed, already did sync or don't want to retry */
3341		goto done;
3342	}
3343
3344	ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3345	if (ret2)
3346		return ret2;
3347
3348	iovec = NULL;
3349	rw = req->async_data;
3350	/* now use our persistent iterator, if we aren't already */
3351	iter = &rw->iter;
3352
3353	do {
3354		io_size -= ret;
3355		rw->bytes_done += ret;
3356		/* if we can retry, do so with the callbacks armed */
3357		if (!io_rw_should_retry(req)) {
3358			kiocb->ki_flags &= ~IOCB_WAITQ;
3359			return -EAGAIN;
3360		}
3361
3362		/*
3363		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3364		 * we get -EIOCBQUEUED, then we'll get a notification when the
3365		 * desired page gets unlocked. We can also get a partial read
3366		 * here, and if we do, then just retry at the new offset.
3367		 */
3368		ret = io_iter_do_read(req, iter);
3369		if (ret == -EIOCBQUEUED)
3370			return 0;
3371		/* we got some bytes, but not all. retry. */
3372		kiocb->ki_flags &= ~IOCB_WAITQ;
3373	} while (ret > 0 && ret < io_size);
3374done:
3375	kiocb_done(kiocb, ret, issue_flags);
3376out_free:
3377	/* it's faster to check here then delegate to kfree */
3378	if (iovec)
3379		kfree(iovec);
3380	return 0;
3381}
3382
3383static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3384{
3385	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3386		return -EBADF;
3387	return io_prep_rw(req, sqe);
3388}
3389
3390static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3391{
3392	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3393	struct kiocb *kiocb = &req->rw.kiocb;
3394	struct iov_iter __iter, *iter = &__iter;
3395	struct io_async_rw *rw = req->async_data;
3396	ssize_t ret, ret2, io_size;
3397	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3398
3399	if (rw) {
3400		iter = &rw->iter;
3401		iovec = NULL;
3402	} else {
3403		ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3404		if (ret < 0)
3405			return ret;
3406	}
3407	io_size = iov_iter_count(iter);
3408	req->result = io_size;
3409
3410	/* Ensure we clear previously set non-block flag */
3411	if (!force_nonblock)
3412		kiocb->ki_flags &= ~IOCB_NOWAIT;
3413	else
3414		kiocb->ki_flags |= IOCB_NOWAIT;
3415
3416	/* If the file doesn't support async, just async punt */
3417	if (force_nonblock && !io_file_supports_nowait(req, WRITE))
3418		goto copy_iov;
3419
3420	/* file path doesn't support NOWAIT for non-direct_IO */
3421	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3422	    (req->flags & REQ_F_ISREG))
3423		goto copy_iov;
3424
3425	ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3426	if (unlikely(ret))
3427		goto out_free;
3428
3429	/*
3430	 * Open-code file_start_write here to grab freeze protection,
3431	 * which will be released by another thread in
3432	 * io_complete_rw().  Fool lockdep by telling it the lock got
3433	 * released so that it doesn't complain about the held lock when
3434	 * we return to userspace.
3435	 */
3436	if (req->flags & REQ_F_ISREG) {
3437		sb_start_write(file_inode(req->file)->i_sb);
3438		__sb_writers_release(file_inode(req->file)->i_sb,
3439					SB_FREEZE_WRITE);
3440	}
3441	kiocb->ki_flags |= IOCB_WRITE;
3442
3443	if (req->file->f_op->write_iter)
3444		ret2 = call_write_iter(req->file, kiocb, iter);
3445	else if (req->file->f_op->write)
3446		ret2 = loop_rw_iter(WRITE, req, iter);
3447	else
3448		ret2 = -EINVAL;
3449
3450	if (req->flags & REQ_F_REISSUE) {
3451		req->flags &= ~REQ_F_REISSUE;
3452		ret2 = -EAGAIN;
3453	}
3454
3455	/*
3456	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3457	 * retry them without IOCB_NOWAIT.
3458	 */
3459	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3460		ret2 = -EAGAIN;
3461	/* no retry on NONBLOCK nor RWF_NOWAIT */
3462	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3463		goto done;
3464	if (!force_nonblock || ret2 != -EAGAIN) {
3465		/* IOPOLL retry should happen for io-wq threads */
3466		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3467			goto copy_iov;
3468done:
3469		kiocb_done(kiocb, ret2, issue_flags);
3470	} else {
3471copy_iov:
3472		/* some cases will consume bytes even on error returns */
3473		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3474		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3475		return ret ?: -EAGAIN;
3476	}
3477out_free:
3478	/* it's reportedly faster than delegating the null check to kfree() */
3479	if (iovec)
3480		kfree(iovec);
3481	return ret;
3482}
3483
3484static int io_renameat_prep(struct io_kiocb *req,
3485			    const struct io_uring_sqe *sqe)
3486{
3487	struct io_rename *ren = &req->rename;
3488	const char __user *oldf, *newf;
3489
3490	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3491		return -EINVAL;
3492	if (sqe->ioprio || sqe->buf_index)
3493		return -EINVAL;
3494	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3495		return -EBADF;
3496
3497	ren->old_dfd = READ_ONCE(sqe->fd);
3498	oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3499	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3500	ren->new_dfd = READ_ONCE(sqe->len);
3501	ren->flags = READ_ONCE(sqe->rename_flags);
3502
3503	ren->oldpath = getname(oldf);
3504	if (IS_ERR(ren->oldpath))
3505		return PTR_ERR(ren->oldpath);
3506
3507	ren->newpath = getname(newf);
3508	if (IS_ERR(ren->newpath)) {
3509		putname(ren->oldpath);
3510		return PTR_ERR(ren->newpath);
3511	}
3512
3513	req->flags |= REQ_F_NEED_CLEANUP;
3514	return 0;
3515}
3516
3517static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3518{
3519	struct io_rename *ren = &req->rename;
3520	int ret;
3521
3522	if (issue_flags & IO_URING_F_NONBLOCK)
3523		return -EAGAIN;
3524
3525	ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3526				ren->newpath, ren->flags);
3527
3528	req->flags &= ~REQ_F_NEED_CLEANUP;
3529	if (ret < 0)
3530		req_set_fail(req);
3531	io_req_complete(req, ret);
3532	return 0;
3533}
3534
3535static int io_unlinkat_prep(struct io_kiocb *req,
3536			    const struct io_uring_sqe *sqe)
3537{
3538	struct io_unlink *un = &req->unlink;
3539	const char __user *fname;
3540
3541	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3542		return -EINVAL;
3543	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3544		return -EINVAL;
3545	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3546		return -EBADF;
3547
3548	un->dfd = READ_ONCE(sqe->fd);
3549
3550	un->flags = READ_ONCE(sqe->unlink_flags);
3551	if (un->flags & ~AT_REMOVEDIR)
3552		return -EINVAL;
3553
3554	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3555	un->filename = getname(fname);
3556	if (IS_ERR(un->filename))
3557		return PTR_ERR(un->filename);
3558
3559	req->flags |= REQ_F_NEED_CLEANUP;
3560	return 0;
3561}
3562
3563static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3564{
3565	struct io_unlink *un = &req->unlink;
3566	int ret;
3567
3568	if (issue_flags & IO_URING_F_NONBLOCK)
3569		return -EAGAIN;
3570
3571	if (un->flags & AT_REMOVEDIR)
3572		ret = do_rmdir(un->dfd, un->filename);
3573	else
3574		ret = do_unlinkat(un->dfd, un->filename);
3575
3576	req->flags &= ~REQ_F_NEED_CLEANUP;
3577	if (ret < 0)
3578		req_set_fail(req);
3579	io_req_complete(req, ret);
3580	return 0;
3581}
3582
3583static int io_shutdown_prep(struct io_kiocb *req,
3584			    const struct io_uring_sqe *sqe)
3585{
3586#if defined(CONFIG_NET)
3587	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3588		return -EINVAL;
3589	if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3590	    sqe->buf_index)
3591		return -EINVAL;
3592
3593	req->shutdown.how = READ_ONCE(sqe->len);
3594	return 0;
3595#else
3596	return -EOPNOTSUPP;
3597#endif
3598}
3599
3600static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3601{
3602#if defined(CONFIG_NET)
3603	struct socket *sock;
3604	int ret;
3605
3606	if (issue_flags & IO_URING_F_NONBLOCK)
3607		return -EAGAIN;
3608
3609	sock = sock_from_file(req->file);
3610	if (unlikely(!sock))
3611		return -ENOTSOCK;
3612
3613	ret = __sys_shutdown_sock(sock, req->shutdown.how);
3614	if (ret < 0)
3615		req_set_fail(req);
3616	io_req_complete(req, ret);
3617	return 0;
3618#else
3619	return -EOPNOTSUPP;
3620#endif
3621}
3622
3623static int __io_splice_prep(struct io_kiocb *req,
3624			    const struct io_uring_sqe *sqe)
3625{
3626	struct io_splice *sp = &req->splice;
3627	unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3628
3629	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3630		return -EINVAL;
3631
3632	sp->file_in = NULL;
3633	sp->len = READ_ONCE(sqe->len);
3634	sp->flags = READ_ONCE(sqe->splice_flags);
3635
3636	if (unlikely(sp->flags & ~valid_flags))
3637		return -EINVAL;
3638
3639	sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
3640				  (sp->flags & SPLICE_F_FD_IN_FIXED));
3641	if (!sp->file_in)
3642		return -EBADF;
3643	req->flags |= REQ_F_NEED_CLEANUP;
3644	return 0;
3645}
3646
3647static int io_tee_prep(struct io_kiocb *req,
3648		       const struct io_uring_sqe *sqe)
3649{
3650	if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3651		return -EINVAL;
3652	return __io_splice_prep(req, sqe);
3653}
3654
3655static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
3656{
3657	struct io_splice *sp = &req->splice;
3658	struct file *in = sp->file_in;
3659	struct file *out = sp->file_out;
3660	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3661	long ret = 0;
3662
3663	if (issue_flags & IO_URING_F_NONBLOCK)
3664		return -EAGAIN;
3665	if (sp->len)
3666		ret = do_tee(in, out, sp->len, flags);
3667
3668	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3669		io_put_file(in);
3670	req->flags &= ~REQ_F_NEED_CLEANUP;
3671
3672	if (ret != sp->len)
3673		req_set_fail(req);
3674	io_req_complete(req, ret);
3675	return 0;
3676}
3677
3678static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3679{
3680	struct io_splice *sp = &req->splice;
3681
3682	sp->off_in = READ_ONCE(sqe->splice_off_in);
3683	sp->off_out = READ_ONCE(sqe->off);
3684	return __io_splice_prep(req, sqe);
3685}
3686
3687static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
3688{
3689	struct io_splice *sp = &req->splice;
3690	struct file *in = sp->file_in;
3691	struct file *out = sp->file_out;
3692	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3693	loff_t *poff_in, *poff_out;
3694	long ret = 0;
3695
3696	if (issue_flags & IO_URING_F_NONBLOCK)
3697		return -EAGAIN;
3698
3699	poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3700	poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3701
3702	if (sp->len)
3703		ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3704
3705	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3706		io_put_file(in);
3707	req->flags &= ~REQ_F_NEED_CLEANUP;
3708
3709	if (ret != sp->len)
3710		req_set_fail(req);
3711	io_req_complete(req, ret);
3712	return 0;
3713}
3714
3715/*
3716 * IORING_OP_NOP just posts a completion event, nothing else.
3717 */
3718static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
3719{
3720	struct io_ring_ctx *ctx = req->ctx;
3721
3722	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3723		return -EINVAL;
3724
3725	__io_req_complete(req, issue_flags, 0, 0);
3726	return 0;
3727}
3728
3729static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3730{
3731	struct io_ring_ctx *ctx = req->ctx;
3732
3733	if (!req->file)
3734		return -EBADF;
3735
3736	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3737		return -EINVAL;
3738	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3739		return -EINVAL;
3740
3741	req->sync.flags = READ_ONCE(sqe->fsync_flags);
3742	if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3743		return -EINVAL;
3744
3745	req->sync.off = READ_ONCE(sqe->off);
3746	req->sync.len = READ_ONCE(sqe->len);
3747	return 0;
3748}
3749
3750static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
3751{
3752	loff_t end = req->sync.off + req->sync.len;
3753	int ret;
3754
3755	/* fsync always requires a blocking context */
3756	if (issue_flags & IO_URING_F_NONBLOCK)
3757		return -EAGAIN;
3758
3759	ret = vfs_fsync_range(req->file, req->sync.off,
3760				end > 0 ? end : LLONG_MAX,
3761				req->sync.flags & IORING_FSYNC_DATASYNC);
3762	if (ret < 0)
3763		req_set_fail(req);
3764	io_req_complete(req, ret);
3765	return 0;
3766}
3767
3768static int io_fallocate_prep(struct io_kiocb *req,
3769			     const struct io_uring_sqe *sqe)
3770{
3771	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3772		return -EINVAL;
3773	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3774		return -EINVAL;
3775
3776	req->sync.off = READ_ONCE(sqe->off);
3777	req->sync.len = READ_ONCE(sqe->addr);
3778	req->sync.mode = READ_ONCE(sqe->len);
3779	return 0;
3780}
3781
3782static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
3783{
3784	int ret;
3785
3786	/* fallocate always requiring blocking context */
3787	if (issue_flags & IO_URING_F_NONBLOCK)
3788		return -EAGAIN;
3789	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3790				req->sync.len);
3791	if (ret < 0)
3792		req_set_fail(req);
3793	io_req_complete(req, ret);
3794	return 0;
3795}
3796
3797static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3798{
3799	const char __user *fname;
3800	int ret;
3801
3802	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3803		return -EINVAL;
3804	if (unlikely(sqe->ioprio || sqe->buf_index))
3805		return -EINVAL;
3806	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3807		return -EBADF;
3808
3809	/* open.how should be already initialised */
3810	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3811		req->open.how.flags |= O_LARGEFILE;
3812
3813	req->open.dfd = READ_ONCE(sqe->fd);
3814	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3815	req->open.filename = getname(fname);
3816	if (IS_ERR(req->open.filename)) {
3817		ret = PTR_ERR(req->open.filename);
3818		req->open.filename = NULL;
3819		return ret;
3820	}
3821	req->open.nofile = rlimit(RLIMIT_NOFILE);
3822	req->flags |= REQ_F_NEED_CLEANUP;
3823	return 0;
3824}
3825
3826static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3827{
3828	u64 mode = READ_ONCE(sqe->len);
3829	u64 flags = READ_ONCE(sqe->open_flags);
3830
3831	req->open.how = build_open_how(flags, mode);
3832	return __io_openat_prep(req, sqe);
3833}
3834
3835static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3836{
3837	struct open_how __user *how;
3838	size_t len;
3839	int ret;
3840
3841	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3842	len = READ_ONCE(sqe->len);
3843	if (len < OPEN_HOW_SIZE_VER0)
3844		return -EINVAL;
3845
3846	ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3847					len);
3848	if (ret)
3849		return ret;
3850
3851	return __io_openat_prep(req, sqe);
3852}
3853
3854static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
3855{
3856	struct open_flags op;
3857	struct file *file;
3858	bool nonblock_set;
3859	bool resolve_nonblock;
3860	int ret;
3861
3862	ret = build_open_flags(&req->open.how, &op);
3863	if (ret)
3864		goto err;
3865	nonblock_set = op.open_flag & O_NONBLOCK;
3866	resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
3867	if (issue_flags & IO_URING_F_NONBLOCK) {
3868		/*
3869		 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3870		 * it'll always -EAGAIN
3871		 */
3872		if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3873			return -EAGAIN;
3874		op.lookup_flags |= LOOKUP_CACHED;
3875		op.open_flag |= O_NONBLOCK;
3876	}
3877
3878	ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3879	if (ret < 0)
3880		goto err;
3881
3882	file = do_filp_open(req->open.dfd, req->open.filename, &op);
3883	if (IS_ERR(file)) {
3884		/*
3885		 * We could hang on to this 'fd' on retrying, but seems like
3886		 * marginal gain for something that is now known to be a slower
3887		 * path. So just put it, and we'll get a new one when we retry.
3888		 */
3889		put_unused_fd(ret);
3890
3891		ret = PTR_ERR(file);
3892		/* only retry if RESOLVE_CACHED wasn't already set by application */
3893		if (ret == -EAGAIN &&
3894		    (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3895			return -EAGAIN;
3896		goto err;
3897	}
3898
3899	if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3900		file->f_flags &= ~O_NONBLOCK;
3901	fsnotify_open(file);
3902	fd_install(ret, file);
3903err:
3904	putname(req->open.filename);
3905	req->flags &= ~REQ_F_NEED_CLEANUP;
3906	if (ret < 0)
3907		req_set_fail(req);
3908	__io_req_complete(req, issue_flags, ret, 0);
3909	return 0;
3910}
3911
3912static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
3913{
3914	return io_openat2(req, issue_flags);
3915}
3916
3917static int io_remove_buffers_prep(struct io_kiocb *req,
3918				  const struct io_uring_sqe *sqe)
3919{
3920	struct io_provide_buf *p = &req->pbuf;
3921	u64 tmp;
3922
3923	if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3924		return -EINVAL;
3925
3926	tmp = READ_ONCE(sqe->fd);
3927	if (!tmp || tmp > USHRT_MAX)
3928		return -EINVAL;
3929
3930	memset(p, 0, sizeof(*p));
3931	p->nbufs = tmp;
3932	p->bgid = READ_ONCE(sqe->buf_group);
3933	return 0;
3934}
3935
3936static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3937			       int bgid, unsigned nbufs)
3938{
3939	unsigned i = 0;
3940
3941	/* shouldn't happen */
3942	if (!nbufs)
3943		return 0;
3944
3945	/* the head kbuf is the list itself */
3946	while (!list_empty(&buf->list)) {
3947		struct io_buffer *nxt;
3948
3949		nxt = list_first_entry(&buf->list, struct io_buffer, list);
3950		list_del(&nxt->list);
3951		kfree(nxt);
3952		if (++i == nbufs)
3953			return i;
3954	}
3955	i++;
3956	kfree(buf);
3957	xa_erase(&ctx->io_buffers, bgid);
3958
3959	return i;
3960}
3961
3962static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
3963{
3964	struct io_provide_buf *p = &req->pbuf;
3965	struct io_ring_ctx *ctx = req->ctx;
3966	struct io_buffer *head;
3967	int ret = 0;
3968	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3969
3970	io_ring_submit_lock(ctx, !force_nonblock);
3971
3972	lockdep_assert_held(&ctx->uring_lock);
3973
3974	ret = -ENOENT;
3975	head = xa_load(&ctx->io_buffers, p->bgid);
3976	if (head)
3977		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3978	if (ret < 0)
3979		req_set_fail(req);
3980
3981	/* complete before unlock, IOPOLL may need the lock */
3982	__io_req_complete(req, issue_flags, ret, 0);
3983	io_ring_submit_unlock(ctx, !force_nonblock);
3984	return 0;
3985}
3986
3987static int io_provide_buffers_prep(struct io_kiocb *req,
3988				   const struct io_uring_sqe *sqe)
3989{
3990	unsigned long size, tmp_check;
3991	struct io_provide_buf *p = &req->pbuf;
3992	u64 tmp;
3993
3994	if (sqe->ioprio || sqe->rw_flags)
3995		return -EINVAL;
3996
3997	tmp = READ_ONCE(sqe->fd);
3998	if (!tmp || tmp > USHRT_MAX)
3999		return -E2BIG;
4000	p->nbufs = tmp;
4001	p->addr = READ_ONCE(sqe->addr);
4002	p->len = READ_ONCE(sqe->len);
4003
4004	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4005				&size))
4006		return -EOVERFLOW;
4007	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4008		return -EOVERFLOW;
4009
4010	size = (unsigned long)p->len * p->nbufs;
4011	if (!access_ok(u64_to_user_ptr(p->addr), size))
4012		return -EFAULT;
4013
4014	p->bgid = READ_ONCE(sqe->buf_group);
4015	tmp = READ_ONCE(sqe->off);
4016	if (tmp > USHRT_MAX)
4017		return -E2BIG;
4018	p->bid = tmp;
4019	return 0;
4020}
4021
4022static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4023{
4024	struct io_buffer *buf;
4025	u64 addr = pbuf->addr;
4026	int i, bid = pbuf->bid;
4027
4028	for (i = 0; i < pbuf->nbufs; i++) {
4029		buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4030		if (!buf)
4031			break;
4032
4033		buf->addr = addr;
4034		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
4035		buf->bid = bid;
4036		addr += pbuf->len;
4037		bid++;
4038		if (!*head) {
4039			INIT_LIST_HEAD(&buf->list);
4040			*head = buf;
4041		} else {
4042			list_add_tail(&buf->list, &(*head)->list);
4043		}
4044	}
4045
4046	return i ? i : -ENOMEM;
4047}
4048
4049static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4050{
4051	struct io_provide_buf *p = &req->pbuf;
4052	struct io_ring_ctx *ctx = req->ctx;
4053	struct io_buffer *head, *list;
4054	int ret = 0;
4055	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4056
4057	io_ring_submit_lock(ctx, !force_nonblock);
4058
4059	lockdep_assert_held(&ctx->uring_lock);
4060
4061	list = head = xa_load(&ctx->io_buffers, p->bgid);
4062
4063	ret = io_add_buffers(p, &head);
4064	if (ret >= 0 && !list) {
4065		ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4066		if (ret < 0)
4067			__io_remove_buffers(ctx, head, p->bgid, -1U);
4068	}
4069	if (ret < 0)
4070		req_set_fail(req);
4071	/* complete before unlock, IOPOLL may need the lock */
4072	__io_req_complete(req, issue_flags, ret, 0);
4073	io_ring_submit_unlock(ctx, !force_nonblock);
4074	return 0;
4075}
4076
4077static int io_epoll_ctl_prep(struct io_kiocb *req,
4078			     const struct io_uring_sqe *sqe)
4079{
4080#if defined(CONFIG_EPOLL)
4081	if (sqe->ioprio || sqe->buf_index)
4082		return -EINVAL;
4083	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4084		return -EINVAL;
4085
4086	req->epoll.epfd = READ_ONCE(sqe->fd);
4087	req->epoll.op = READ_ONCE(sqe->len);
4088	req->epoll.fd = READ_ONCE(sqe->off);
4089
4090	if (ep_op_has_event(req->epoll.op)) {
4091		struct epoll_event __user *ev;
4092
4093		ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4094		if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4095			return -EFAULT;
4096	}
4097
4098	return 0;
4099#else
4100	return -EOPNOTSUPP;
4101#endif
4102}
4103
4104static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4105{
4106#if defined(CONFIG_EPOLL)
4107	struct io_epoll *ie = &req->epoll;
4108	int ret;
4109	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4110
4111	ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4112	if (force_nonblock && ret == -EAGAIN)
4113		return -EAGAIN;
4114
4115	if (ret < 0)
4116		req_set_fail(req);
4117	__io_req_complete(req, issue_flags, ret, 0);
4118	return 0;
4119#else
4120	return -EOPNOTSUPP;
4121#endif
4122}
4123
4124static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4125{
4126#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4127	if (sqe->ioprio || sqe->buf_index || sqe->off)
4128		return -EINVAL;
4129	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4130		return -EINVAL;
4131
4132	req->madvise.addr = READ_ONCE(sqe->addr);
4133	req->madvise.len = READ_ONCE(sqe->len);
4134	req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4135	return 0;
4136#else
4137	return -EOPNOTSUPP;
4138#endif
4139}
4140
4141static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4142{
4143#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4144	struct io_madvise *ma = &req->madvise;
4145	int ret;
4146
4147	if (issue_flags & IO_URING_F_NONBLOCK)
4148		return -EAGAIN;
4149
4150	ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4151	if (ret < 0)
4152		req_set_fail(req);
4153	io_req_complete(req, ret);
4154	return 0;
4155#else
4156	return -EOPNOTSUPP;
4157#endif
4158}
4159
4160static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4161{
4162	if (sqe->ioprio || sqe->buf_index || sqe->addr)
4163		return -EINVAL;
4164	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4165		return -EINVAL;
4166
4167	req->fadvise.offset = READ_ONCE(sqe->off);
4168	req->fadvise.len = READ_ONCE(sqe->len);
4169	req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4170	return 0;
4171}
4172
4173static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4174{
4175	struct io_fadvise *fa = &req->fadvise;
4176	int ret;
4177
4178	if (issue_flags & IO_URING_F_NONBLOCK) {
4179		switch (fa->advice) {
4180		case POSIX_FADV_NORMAL:
4181		case POSIX_FADV_RANDOM:
4182		case POSIX_FADV_SEQUENTIAL:
4183			break;
4184		default:
4185			return -EAGAIN;
4186		}
4187	}
4188
4189	ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4190	if (ret < 0)
4191		req_set_fail(req);
4192	__io_req_complete(req, issue_flags, ret, 0);
4193	return 0;
4194}
4195
4196static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4197{
4198	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4199		return -EINVAL;
4200	if (sqe->ioprio || sqe->buf_index)
4201		return -EINVAL;
4202	if (req->flags & REQ_F_FIXED_FILE)
4203		return -EBADF;
4204
4205	req->statx.dfd = READ_ONCE(sqe->fd);
4206	req->statx.mask = READ_ONCE(sqe->len);
4207	req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4208	req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4209	req->statx.flags = READ_ONCE(sqe->statx_flags);
4210
4211	return 0;
4212}
4213
4214static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4215{
4216	struct io_statx *ctx = &req->statx;
4217	int ret;
4218
4219	if (issue_flags & IO_URING_F_NONBLOCK)
4220		return -EAGAIN;
4221
4222	ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4223		       ctx->buffer);
4224
4225	if (ret < 0)
4226		req_set_fail(req);
4227	io_req_complete(req, ret);
4228	return 0;
4229}
4230
4231static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4232{
4233	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4234		return -EINVAL;
4235	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4236	    sqe->rw_flags || sqe->buf_index)
4237		return -EINVAL;
4238	if (req->flags & REQ_F_FIXED_FILE)
4239		return -EBADF;
4240
4241	req->close.fd = READ_ONCE(sqe->fd);
4242	return 0;
4243}
4244
4245static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4246{
4247	struct files_struct *files = current->files;
4248	struct io_close *close = &req->close;
4249	struct fdtable *fdt;
4250	struct file *file = NULL;
4251	int ret = -EBADF;
4252
4253	spin_lock(&files->file_lock);
4254	fdt = files_fdtable(files);
4255	if (close->fd >= fdt->max_fds) {
4256		spin_unlock(&files->file_lock);
4257		goto err;
4258	}
4259	file = fdt->fd[close->fd];
4260	if (!file || file->f_op == &io_uring_fops) {
4261		spin_unlock(&files->file_lock);
4262		file = NULL;
4263		goto err;
4264	}
4265
4266	/* if the file has a flush method, be safe and punt to async */
4267	if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
4268		spin_unlock(&files->file_lock);
4269		return -EAGAIN;
4270	}
4271
4272	ret = __close_fd_get_file(close->fd, &file);
4273	spin_unlock(&files->file_lock);
4274	if (ret < 0) {
4275		if (ret == -ENOENT)
4276			ret = -EBADF;
4277		goto err;
4278	}
4279
4280	/* No ->flush() or already async, safely close from here */
4281	ret = filp_close(file, current->files);
4282err:
4283	if (ret < 0)
4284		req_set_fail(req);
4285	if (file)
4286		fput(file);
4287	__io_req_complete(req, issue_flags, ret, 0);
4288	return 0;
4289}
4290
4291static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4292{
4293	struct io_ring_ctx *ctx = req->ctx;
4294
4295	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4296		return -EINVAL;
4297	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4298		return -EINVAL;
4299
4300	req->sync.off = READ_ONCE(sqe->off);
4301	req->sync.len = READ_ONCE(sqe->len);
4302	req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4303	return 0;
4304}
4305
4306static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4307{
4308	int ret;
4309
4310	/* sync_file_range always requires a blocking context */
4311	if (issue_flags & IO_URING_F_NONBLOCK)
4312		return -EAGAIN;
4313
4314	ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4315				req->sync.flags);
4316	if (ret < 0)
4317		req_set_fail(req);
4318	io_req_complete(req, ret);
4319	return 0;
4320}
4321
4322#if defined(CONFIG_NET)
4323static int io_setup_async_msg(struct io_kiocb *req,
4324			      struct io_async_msghdr *kmsg)
4325{
4326	struct io_async_msghdr *async_msg = req->async_data;
4327
4328	if (async_msg)
4329		return -EAGAIN;
4330	if (io_alloc_async_data(req)) {
4331		kfree(kmsg->free_iov);
4332		return -ENOMEM;
4333	}
4334	async_msg = req->async_data;
4335	req->flags |= REQ_F_NEED_CLEANUP;
4336	memcpy(async_msg, kmsg, sizeof(*kmsg));
4337	async_msg->msg.msg_name = &async_msg->addr;
4338	/* if were using fast_iov, set it to the new one */
4339	if (!async_msg->free_iov)
4340		async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4341
4342	return -EAGAIN;
4343}
4344
4345static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4346			       struct io_async_msghdr *iomsg)
4347{
4348	iomsg->msg.msg_name = &iomsg->addr;
4349	iomsg->free_iov = iomsg->fast_iov;
4350	return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4351				   req->sr_msg.msg_flags, &iomsg->free_iov);
4352}
4353
4354static int io_sendmsg_prep_async(struct io_kiocb *req)
4355{
4356	int ret;
4357
4358	ret = io_sendmsg_copy_hdr(req, req->async_data);
4359	if (!ret)
4360		req->flags |= REQ_F_NEED_CLEANUP;
4361	return ret;
4362}
4363
4364static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4365{
4366	struct io_sr_msg *sr = &req->sr_msg;
4367
4368	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4369		return -EINVAL;
4370
4371	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4372	sr->len = READ_ONCE(sqe->len);
4373	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4374	if (sr->msg_flags & MSG_DONTWAIT)
4375		req->flags |= REQ_F_NOWAIT;
4376
4377#ifdef CONFIG_COMPAT
4378	if (req->ctx->compat)
4379		sr->msg_flags |= MSG_CMSG_COMPAT;
4380#endif
4381	return 0;
4382}
4383
4384static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4385{
4386	struct io_async_msghdr iomsg, *kmsg;
4387	struct socket *sock;
4388	unsigned flags;
4389	int min_ret = 0;
4390	int ret;
4391
4392	sock = sock_from_file(req->file);
4393	if (unlikely(!sock))
4394		return -ENOTSOCK;
4395
4396	kmsg = req->async_data;
4397	if (!kmsg) {
4398		ret = io_sendmsg_copy_hdr(req, &iomsg);
4399		if (ret)
4400			return ret;
4401		kmsg = &iomsg;
4402	}
4403
4404	flags = req->sr_msg.msg_flags;
4405	if (issue_flags & IO_URING_F_NONBLOCK)
4406		flags |= MSG_DONTWAIT;
4407	if (flags & MSG_WAITALL)
4408		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4409
4410	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4411	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4412		return io_setup_async_msg(req, kmsg);
4413	if (ret == -ERESTARTSYS)
4414		ret = -EINTR;
4415
4416	/* fast path, check for non-NULL to avoid function call */
4417	if (kmsg->free_iov)
4418		kfree(kmsg->free_iov);
4419	req->flags &= ~REQ_F_NEED_CLEANUP;
4420	if (ret < min_ret)
4421		req_set_fail(req);
4422	__io_req_complete(req, issue_flags, ret, 0);
4423	return 0;
4424}
4425
4426static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4427{
4428	struct io_sr_msg *sr = &req->sr_msg;
4429	struct msghdr msg;
4430	struct iovec iov;
4431	struct socket *sock;
4432	unsigned flags;
4433	int min_ret = 0;
4434	int ret;
4435
4436	sock = sock_from_file(req->file);
4437	if (unlikely(!sock))
4438		return -ENOTSOCK;
4439
4440	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4441	if (unlikely(ret))
4442		return ret;
4443
4444	msg.msg_name = NULL;
4445	msg.msg_control = NULL;
4446	msg.msg_controllen = 0;
4447	msg.msg_namelen = 0;
4448
4449	flags = req->sr_msg.msg_flags;
4450	if (issue_flags & IO_URING_F_NONBLOCK)
4451		flags |= MSG_DONTWAIT;
4452	if (flags & MSG_WAITALL)
4453		min_ret = iov_iter_count(&msg.msg_iter);
4454
4455	msg.msg_flags = flags;
4456	ret = sock_sendmsg(sock, &msg);
4457	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4458		return -EAGAIN;
4459	if (ret == -ERESTARTSYS)
4460		ret = -EINTR;
4461
4462	if (ret < min_ret)
4463		req_set_fail(req);
4464	__io_req_complete(req, issue_flags, ret, 0);
4465	return 0;
4466}
4467
4468static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4469				 struct io_async_msghdr *iomsg)
4470{
4471	struct io_sr_msg *sr = &req->sr_msg;
4472	struct iovec __user *uiov;
4473	size_t iov_len;
4474	int ret;
4475
4476	ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4477					&iomsg->uaddr, &uiov, &iov_len);
4478	if (ret)
4479		return ret;
4480
4481	if (req->flags & REQ_F_BUFFER_SELECT) {
4482		if (iov_len > 1)
4483			return -EINVAL;
4484		if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
4485			return -EFAULT;
4486		sr->len = iomsg->fast_iov[0].iov_len;
4487		iomsg->free_iov = NULL;
4488	} else {
4489		iomsg->free_iov = iomsg->fast_iov;
4490		ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4491				     &iomsg->free_iov, &iomsg->msg.msg_iter,
4492				     false);
4493		if (ret > 0)
4494			ret = 0;
4495	}
4496
4497	return ret;
4498}
4499
4500#ifdef CONFIG_COMPAT
4501static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4502					struct io_async_msghdr *iomsg)
4503{
4504	struct io_sr_msg *sr = &req->sr_msg;
4505	struct compat_iovec __user *uiov;
4506	compat_uptr_t ptr;
4507	compat_size_t len;
4508	int ret;
4509
4510	ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4511				  &ptr, &len);
4512	if (ret)
4513		return ret;
4514
4515	uiov = compat_ptr(ptr);
4516	if (req->flags & REQ_F_BUFFER_SELECT) {
4517		compat_ssize_t clen;
4518
4519		if (len > 1)
4520			return -EINVAL;
4521		if (!access_ok(uiov, sizeof(*uiov)))
4522			return -EFAULT;
4523		if (__get_user(clen, &uiov->iov_len))
4524			return -EFAULT;
4525		if (clen < 0)
4526			return -EINVAL;
4527		sr->len = clen;
4528		iomsg->free_iov = NULL;
4529	} else {
4530		iomsg->free_iov = iomsg->fast_iov;
4531		ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4532				   UIO_FASTIOV, &iomsg->free_iov,
4533				   &iomsg->msg.msg_iter, true);
4534		if (ret < 0)
4535			return ret;
4536	}
4537
4538	return 0;
4539}
4540#endif
4541
4542static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4543			       struct io_async_msghdr *iomsg)
4544{
4545	iomsg->msg.msg_name = &iomsg->addr;
4546
4547#ifdef CONFIG_COMPAT
4548	if (req->ctx->compat)
4549		return __io_compat_recvmsg_copy_hdr(req, iomsg);
4550#endif
4551
4552	return __io_recvmsg_copy_hdr(req, iomsg);
4553}
4554
4555static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4556					       bool needs_lock)
4557{
4558	struct io_sr_msg *sr = &req->sr_msg;
4559	struct io_buffer *kbuf;
4560
4561	kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4562	if (IS_ERR(kbuf))
4563		return kbuf;
4564
4565	sr->kbuf = kbuf;
4566	req->flags |= REQ_F_BUFFER_SELECTED;
4567	return kbuf;
4568}
4569
4570static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4571{
4572	return io_put_kbuf(req, req->sr_msg.kbuf);
4573}
4574
4575static int io_recvmsg_prep_async(struct io_kiocb *req)
4576{
4577	int ret;
4578
4579	ret = io_recvmsg_copy_hdr(req, req->async_data);
4580	if (!ret)
4581		req->flags |= REQ_F_NEED_CLEANUP;
4582	return ret;
4583}
4584
4585static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4586{
4587	struct io_sr_msg *sr = &req->sr_msg;
4588
4589	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4590		return -EINVAL;
4591
4592	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4593	sr->len = READ_ONCE(sqe->len);
4594	sr->bgid = READ_ONCE(sqe->buf_group);
4595	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4596	if (sr->msg_flags & MSG_DONTWAIT)
4597		req->flags |= REQ_F_NOWAIT;
4598
4599#ifdef CONFIG_COMPAT
4600	if (req->ctx->compat)
4601		sr->msg_flags |= MSG_CMSG_COMPAT;
4602#endif
4603	return 0;
4604}
4605
4606static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
4607{
4608	struct io_async_msghdr iomsg, *kmsg;
4609	struct socket *sock;
4610	struct io_buffer *kbuf;
4611	unsigned flags;
4612	int min_ret = 0;
4613	int ret, cflags = 0;
4614	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4615
4616	sock = sock_from_file(req->file);
4617	if (unlikely(!sock))
4618		return -ENOTSOCK;
4619
4620	kmsg = req->async_data;
4621	if (!kmsg) {
4622		ret = io_recvmsg_copy_hdr(req, &iomsg);
4623		if (ret)
4624			return ret;
4625		kmsg = &iomsg;
4626	}
4627
4628	if (req->flags & REQ_F_BUFFER_SELECT) {
4629		kbuf = io_recv_buffer_select(req, !force_nonblock);
4630		if (IS_ERR(kbuf))
4631			return PTR_ERR(kbuf);
4632		kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4633		kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4634		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
4635				1, req->sr_msg.len);
4636	}
4637
4638	flags = req->sr_msg.msg_flags;
4639	if (force_nonblock)
4640		flags |= MSG_DONTWAIT;
4641	if (flags & MSG_WAITALL)
4642		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4643
4644	ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4645					kmsg->uaddr, flags);
4646	if (force_nonblock && ret == -EAGAIN)
4647		return io_setup_async_msg(req, kmsg);
4648	if (ret == -ERESTARTSYS)
4649		ret = -EINTR;
4650
4651	if (req->flags & REQ_F_BUFFER_SELECTED)
4652		cflags = io_put_recv_kbuf(req);
4653	/* fast path, check for non-NULL to avoid function call */
4654	if (kmsg->free_iov)
4655		kfree(kmsg->free_iov);
4656	req->flags &= ~REQ_F_NEED_CLEANUP;
4657	if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4658		req_set_fail(req);
4659	__io_req_complete(req, issue_flags, ret, cflags);
4660	return 0;
4661}
4662
4663static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
4664{
4665	struct io_buffer *kbuf;
4666	struct io_sr_msg *sr = &req->sr_msg;
4667	struct msghdr msg;
4668	void __user *buf = sr->buf;
4669	struct socket *sock;
4670	struct iovec iov;
4671	unsigned flags;
4672	int min_ret = 0;
4673	int ret, cflags = 0;
4674	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4675
4676	sock = sock_from_file(req->file);
4677	if (unlikely(!sock))
4678		return -ENOTSOCK;
4679
4680	if (req->flags & REQ_F_BUFFER_SELECT) {
4681		kbuf = io_recv_buffer_select(req, !force_nonblock);
4682		if (IS_ERR(kbuf))
4683			return PTR_ERR(kbuf);
4684		buf = u64_to_user_ptr(kbuf->addr);
4685	}
4686
4687	ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4688	if (unlikely(ret))
4689		goto out_free;
4690
4691	msg.msg_name = NULL;
4692	msg.msg_control = NULL;
4693	msg.msg_controllen = 0;
4694	msg.msg_namelen = 0;
4695	msg.msg_iocb = NULL;
4696	msg.msg_flags = 0;
4697
4698	flags = req->sr_msg.msg_flags;
4699	if (force_nonblock)
4700		flags |= MSG_DONTWAIT;
4701	if (flags & MSG_WAITALL)
4702		min_ret = iov_iter_count(&msg.msg_iter);
4703
4704	ret = sock_recvmsg(sock, &msg, flags);
4705	if (force_nonblock && ret == -EAGAIN)
4706		return -EAGAIN;
4707	if (ret == -ERESTARTSYS)
4708		ret = -EINTR;
4709out_free:
4710	if (req->flags & REQ_F_BUFFER_SELECTED)
4711		cflags = io_put_recv_kbuf(req);
4712	if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4713		req_set_fail(req);
4714	__io_req_complete(req, issue_flags, ret, cflags);
4715	return 0;
4716}
4717
4718static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4719{
4720	struct io_accept *accept = &req->accept;
4721
4722	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4723		return -EINVAL;
4724	if (sqe->ioprio || sqe->len || sqe->buf_index)
4725		return -EINVAL;
4726
4727	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4728	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4729	accept->flags = READ_ONCE(sqe->accept_flags);
4730	accept->nofile = rlimit(RLIMIT_NOFILE);
4731	return 0;
4732}
4733
4734static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
4735{
4736	struct io_accept *accept = &req->accept;
4737	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4738	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
4739	int ret;
4740
4741	if (req->file->f_flags & O_NONBLOCK)
4742		req->flags |= REQ_F_NOWAIT;
4743
4744	ret = __sys_accept4_file(req->file, file_flags, accept->addr,
4745					accept->addr_len, accept->flags,
4746					accept->nofile);
4747	if (ret == -EAGAIN && force_nonblock)
4748		return -EAGAIN;
4749	if (ret < 0) {
4750		if (ret == -ERESTARTSYS)
4751			ret = -EINTR;
4752		req_set_fail(req);
4753	}
4754	__io_req_complete(req, issue_flags, ret, 0);
4755	return 0;
4756}
4757
4758static int io_connect_prep_async(struct io_kiocb *req)
4759{
4760	struct io_async_connect *io = req->async_data;
4761	struct io_connect *conn = &req->connect;
4762
4763	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4764}
4765
4766static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4767{
4768	struct io_connect *conn = &req->connect;
4769
4770	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4771		return -EINVAL;
4772	if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4773		return -EINVAL;
4774
4775	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4776	conn->addr_len =  READ_ONCE(sqe->addr2);
4777	return 0;
4778}
4779
4780static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
4781{
4782	struct io_async_connect __io, *io;
4783	unsigned file_flags;
4784	int ret;
4785	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4786
4787	if (req->async_data) {
4788		io = req->async_data;
4789	} else {
4790		ret = move_addr_to_kernel(req->connect.addr,
4791						req->connect.addr_len,
4792						&__io.address);
4793		if (ret)
4794			goto out;
4795		io = &__io;
4796	}
4797
4798	file_flags = force_nonblock ? O_NONBLOCK : 0;
4799
4800	ret = __sys_connect_file(req->file, &io->address,
4801					req->connect.addr_len, file_flags);
4802	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4803		if (req->async_data)
4804			return -EAGAIN;
4805		if (io_alloc_async_data(req)) {
4806			ret = -ENOMEM;
4807			goto out;
4808		}
4809		memcpy(req->async_data, &__io, sizeof(__io));
4810		return -EAGAIN;
4811	}
4812	if (ret == -ERESTARTSYS)
4813		ret = -EINTR;
4814out:
4815	if (ret < 0)
4816		req_set_fail(req);
4817	__io_req_complete(req, issue_flags, ret, 0);
4818	return 0;
4819}
4820#else /* !CONFIG_NET */
4821#define IO_NETOP_FN(op)							\
4822static int io_##op(struct io_kiocb *req, unsigned int issue_flags)	\
4823{									\
4824	return -EOPNOTSUPP;						\
4825}
4826
4827#define IO_NETOP_PREP(op)						\
4828IO_NETOP_FN(op)								\
4829static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4830{									\
4831	return -EOPNOTSUPP;						\
4832}									\
4833
4834#define IO_NETOP_PREP_ASYNC(op)						\
4835IO_NETOP_PREP(op)							\
4836static int io_##op##_prep_async(struct io_kiocb *req)			\
4837{									\
4838	return -EOPNOTSUPP;						\
4839}
4840
4841IO_NETOP_PREP_ASYNC(sendmsg);
4842IO_NETOP_PREP_ASYNC(recvmsg);
4843IO_NETOP_PREP_ASYNC(connect);
4844IO_NETOP_PREP(accept);
4845IO_NETOP_FN(send);
4846IO_NETOP_FN(recv);
4847#endif /* CONFIG_NET */
4848
4849struct io_poll_table {
4850	struct poll_table_struct pt;
4851	struct io_kiocb *req;
4852	int nr_entries;
4853	int error;
4854};
4855
4856static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4857			   __poll_t mask, io_req_tw_func_t func)
4858{
4859	/* for instances that support it check for an event match first: */
4860	if (mask && !(mask & poll->events))
4861		return 0;
4862
4863	trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4864
4865	list_del_init(&poll->wait.entry);
4866
4867	req->result = mask;
4868	req->io_task_work.func = func;
4869
4870	/*
4871	 * If this fails, then the task is exiting. When a task exits, the
4872	 * work gets canceled, so just cancel this request as well instead
4873	 * of executing it. We can't safely execute it anyway, as we may not
4874	 * have the needed state needed for it anyway.
4875	 */
4876	io_req_task_work_add(req);
4877	return 1;
4878}
4879
4880static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4881	__acquires(&req->ctx->completion_lock)
4882{
4883	struct io_ring_ctx *ctx = req->ctx;
4884
4885	if (unlikely(req->task->flags & PF_EXITING))
4886		WRITE_ONCE(poll->canceled, true);
4887
4888	if (!req->result && !READ_ONCE(poll->canceled)) {
4889		struct poll_table_struct pt = { ._key = poll->events };
4890
4891		req->result = vfs_poll(req->file, &pt) & poll->events;
4892	}
4893
4894	spin_lock(&ctx->completion_lock);
4895	if (!req->result && !READ_ONCE(poll->canceled)) {
4896		add_wait_queue(poll->head, &poll->wait);
4897		return true;
4898	}
4899
4900	return false;
4901}
4902
4903static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
4904{
4905	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
4906	if (req->opcode == IORING_OP_POLL_ADD)
4907		return req->async_data;
4908	return req->apoll->double_poll;
4909}
4910
4911static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4912{
4913	if (req->opcode == IORING_OP_POLL_ADD)
4914		return &req->poll;
4915	return &req->apoll->poll;
4916}
4917
4918static void io_poll_remove_double(struct io_kiocb *req)
4919	__must_hold(&req->ctx->completion_lock)
4920{
4921	struct io_poll_iocb *poll = io_poll_get_double(req);
4922
4923	lockdep_assert_held(&req->ctx->completion_lock);
4924
4925	if (poll && poll->head) {
4926		struct wait_queue_head *head = poll->head;
4927
4928		spin_lock_irq(&head->lock);
4929		list_del_init(&poll->wait.entry);
4930		if (poll->wait.private)
4931			req_ref_put(req);
4932		poll->head = NULL;
4933		spin_unlock_irq(&head->lock);
4934	}
4935}
4936
4937static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
4938	__must_hold(&req->ctx->completion_lock)
4939{
4940	struct io_ring_ctx *ctx = req->ctx;
4941	unsigned flags = IORING_CQE_F_MORE;
4942	int error;
4943
4944	if (READ_ONCE(req->poll.canceled)) {
4945		error = -ECANCELED;
4946		req->poll.events |= EPOLLONESHOT;
4947	} else {
4948		error = mangle_poll(mask);
4949	}
4950	if (req->poll.events & EPOLLONESHOT)
4951		flags = 0;
4952	if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
4953		req->poll.done = true;
4954		flags = 0;
4955	}
4956	if (flags & IORING_CQE_F_MORE)
4957		ctx->cq_extra++;
4958
4959	io_commit_cqring(ctx);
4960	return !(flags & IORING_CQE_F_MORE);
4961}
4962
4963static void io_poll_task_func(struct io_kiocb *req)
4964{
4965	struct io_ring_ctx *ctx = req->ctx;
4966	struct io_kiocb *nxt;
4967
4968	if (io_poll_rewait(req, &req->poll)) {
4969		spin_unlock(&ctx->completion_lock);
4970	} else {
4971		bool done;
4972
4973		done = io_poll_complete(req, req->result);
4974		if (done) {
4975			io_poll_remove_double(req);
4976			hash_del(&req->hash_node);
4977		} else {
4978			req->result = 0;
4979			add_wait_queue(req->poll.head, &req->poll.wait);
4980		}
4981		spin_unlock(&ctx->completion_lock);
4982		io_cqring_ev_posted(ctx);
4983
4984		if (done) {
4985			nxt = io_put_req_find_next(req);
4986			if (nxt)
4987				io_req_task_submit(nxt);
4988		}
4989	}
4990}
4991
4992static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4993			       int sync, void *key)
4994{
4995	struct io_kiocb *req = wait->private;
4996	struct io_poll_iocb *poll = io_poll_get_single(req);
4997	__poll_t mask = key_to_poll(key);
4998	unsigned long flags;
4999
5000	/* for instances that support it check for an event match first: */
5001	if (mask && !(mask & poll->events))
5002		return 0;
5003	if (!(poll->events & EPOLLONESHOT))
5004		return poll->wait.func(&poll->wait, mode, sync, key);
5005
5006	list_del_init(&wait->entry);
5007
5008	if (poll->head) {
5009		bool done;
5010
5011		spin_lock_irqsave(&poll->head->lock, flags);
5012		done = list_empty(&poll->wait.entry);
5013		if (!done)
5014			list_del_init(&poll->wait.entry);
5015		/* make sure double remove sees this as being gone */
5016		wait->private = NULL;
5017		spin_unlock_irqrestore(&poll->head->lock, flags);
5018		if (!done) {
5019			/* use wait func handler, so it matches the rq type */
5020			poll->wait.func(&poll->wait, mode, sync, key);
5021		}
5022	}
5023	req_ref_put(req);
5024	return 1;
5025}
5026
5027static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5028			      wait_queue_func_t wake_func)
5029{
5030	poll->head = NULL;
5031	poll->done = false;
5032	poll->canceled = false;
5033#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5034	/* mask in events that we always want/need */
5035	poll->events = events | IO_POLL_UNMASK;
5036	INIT_LIST_HEAD(&poll->wait.entry);
5037	init_waitqueue_func_entry(&poll->wait, wake_func);
5038}
5039
5040static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5041			    struct wait_queue_head *head,
5042			    struct io_poll_iocb **poll_ptr)
5043{
5044	struct io_kiocb *req = pt->req;
5045
5046	/*
5047	 * The file being polled uses multiple waitqueues for poll handling
5048	 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5049	 * if this happens.
5050	 */
5051	if (unlikely(pt->nr_entries)) {
5052		struct io_poll_iocb *poll_one = poll;
5053
5054		/* already have a 2nd entry, fail a third attempt */
5055		if (*poll_ptr) {
5056			pt->error = -EINVAL;
5057			return;
5058		}
5059		/*
5060		 * Can't handle multishot for double wait for now, turn it
5061		 * into one-shot mode.
5062		 */
5063		if (!(poll_one->events & EPOLLONESHOT))
5064			poll_one->events |= EPOLLONESHOT;
5065		/* double add on the same waitqueue head, ignore */
5066		if (poll_one->head == head)
5067			return;
5068		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5069		if (!poll) {
5070			pt->error = -ENOMEM;
5071			return;
5072		}
5073		io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5074		req_ref_get(req);
5075		poll->wait.private = req;
5076		*poll_ptr = poll;
5077	}
5078
5079	pt->nr_entries++;
5080	poll->head = head;
5081
5082	if (poll->events & EPOLLEXCLUSIVE)
5083		add_wait_queue_exclusive(head, &poll->wait);
5084	else
5085		add_wait_queue(head, &poll->wait);
5086}
5087
5088static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5089			       struct poll_table_struct *p)
5090{
5091	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5092	struct async_poll *apoll = pt->req->apoll;
5093
5094	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5095}
5096
5097static void io_async_task_func(struct io_kiocb *req)
5098{
5099	struct async_poll *apoll = req->apoll;
5100	struct io_ring_ctx *ctx = req->ctx;
5101
5102	trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
5103
5104	if (io_poll_rewait(req, &apoll->poll)) {
5105		spin_unlock(&ctx->completion_lock);
5106		return;
5107	}
5108
5109	hash_del(&req->hash_node);
5110	io_poll_remove_double(req);
5111	spin_unlock(&ctx->completion_lock);
5112
5113	if (!READ_ONCE(apoll->poll.canceled))
5114		io_req_task_submit(req);
5115	else
5116		io_req_complete_failed(req, -ECANCELED);
5117}
5118
5119static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5120			void *key)
5121{
5122	struct io_kiocb *req = wait->private;
5123	struct io_poll_iocb *poll = &req->apoll->poll;
5124
5125	trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5126					key_to_poll(key));
5127
5128	return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5129}
5130
5131static void io_poll_req_insert(struct io_kiocb *req)
5132{
5133	struct io_ring_ctx *ctx = req->ctx;
5134	struct hlist_head *list;
5135
5136	list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5137	hlist_add_head(&req->hash_node, list);
5138}
5139
5140static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5141				      struct io_poll_iocb *poll,
5142				      struct io_poll_table *ipt, __poll_t mask,
5143				      wait_queue_func_t wake_func)
5144	__acquires(&ctx->completion_lock)
5145{
5146	struct io_ring_ctx *ctx = req->ctx;
5147	bool cancel = false;
5148
5149	INIT_HLIST_NODE(&req->hash_node);
5150	io_init_poll_iocb(poll, mask, wake_func);
5151	poll->file = req->file;
5152	poll->wait.private = req;
5153
5154	ipt->pt._key = mask;
5155	ipt->req = req;
5156	ipt->error = 0;
5157	ipt->nr_entries = 0;
5158
5159	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5160	if (unlikely(!ipt->nr_entries) && !ipt->error)
5161		ipt->error = -EINVAL;
5162
5163	spin_lock(&ctx->completion_lock);
5164	if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
5165		io_poll_remove_double(req);
5166	if (likely(poll->head)) {
5167		spin_lock_irq(&poll->head->lock);
5168		if (unlikely(list_empty(&poll->wait.entry))) {
5169			if (ipt->error)
5170				cancel = true;
5171			ipt->error = 0;
5172			mask = 0;
5173		}
5174		if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
5175			list_del_init(&poll->wait.entry);
5176		else if (cancel)
5177			WRITE_ONCE(poll->canceled, true);
5178		else if (!poll->done) /* actually waiting for an event */
5179			io_poll_req_insert(req);
5180		spin_unlock_irq(&poll->head->lock);
5181	}
5182
5183	return mask;
5184}
5185
5186enum {
5187	IO_APOLL_OK,
5188	IO_APOLL_ABORTED,
5189	IO_APOLL_READY
5190};
5191
5192static int io_arm_poll_handler(struct io_kiocb *req)
5193{
5194	const struct io_op_def *def = &io_op_defs[req->opcode];
5195	struct io_ring_ctx *ctx = req->ctx;
5196	struct async_poll *apoll;
5197	struct io_poll_table ipt;
5198	__poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
5199	int rw;
5200
5201	if (!req->file || !file_can_poll(req->file))
5202		return IO_APOLL_ABORTED;
5203	if (req->flags & REQ_F_POLLED)
5204		return IO_APOLL_ABORTED;
5205	if (!def->pollin && !def->pollout)
5206		return IO_APOLL_ABORTED;
5207
5208	if (def->pollin) {
5209		rw = READ;
5210		mask |= POLLIN | POLLRDNORM;
5211
5212		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5213		if ((req->opcode == IORING_OP_RECVMSG) &&
5214		    (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5215			mask &= ~POLLIN;
5216	} else {
5217		rw = WRITE;
5218		mask |= POLLOUT | POLLWRNORM;
5219	}
5220
5221	/* if we can't nonblock try, then no point in arming a poll handler */
5222	if (!io_file_supports_nowait(req, rw))
5223		return IO_APOLL_ABORTED;
5224
5225	apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5226	if (unlikely(!apoll))
5227		return IO_APOLL_ABORTED;
5228	apoll->double_poll = NULL;
5229	req->apoll = apoll;
5230	req->flags |= REQ_F_POLLED;
5231	ipt.pt._qproc = io_async_queue_proc;
5232	io_req_set_refcount(req);
5233
5234	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5235					io_async_wake);
5236	spin_unlock(&ctx->completion_lock);
5237	if (ret || ipt.error)
5238		return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5239
5240	trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5241				mask, apoll->poll.events);
5242	return IO_APOLL_OK;
5243}
5244
5245static bool __io_poll_remove_one(struct io_kiocb *req,
5246				 struct io_poll_iocb *poll, bool do_cancel)
5247	__must_hold(&req->ctx->completion_lock)
5248{
5249	bool do_complete = false;
5250
5251	if (!poll->head)
5252		return false;
5253	spin_lock_irq(&poll->head->lock);
5254	if (do_cancel)
5255		WRITE_ONCE(poll->canceled, true);
5256	if (!list_empty(&poll->wait.entry)) {
5257		list_del_init(&poll->wait.entry);
5258		do_complete = true;
5259	}
5260	spin_unlock_irq(&poll->head->lock);
5261	hash_del(&req->hash_node);
5262	return do_complete;
5263}
5264
5265static bool io_poll_remove_one(struct io_kiocb *req)
5266	__must_hold(&req->ctx->completion_lock)
5267{
5268	bool do_complete;
5269
5270	io_poll_remove_double(req);
5271	do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
5272
5273	if (do_complete) {
5274		io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
5275		io_commit_cqring(req->ctx);
5276		req_set_fail(req);
5277		io_put_req_deferred(req);
5278	}
5279	return do_complete;
5280}
5281
5282/*
5283 * Returns true if we found and killed one or more poll requests
5284 */
5285static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5286			       bool cancel_all)
5287{
5288	struct hlist_node *tmp;
5289	struct io_kiocb *req;
5290	int posted = 0, i;
5291
5292	spin_lock(&ctx->completion_lock);
5293	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5294		struct hlist_head *list;
5295
5296		list = &ctx->cancel_hash[i];
5297		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5298			if (io_match_task(req, tsk, cancel_all))
5299				posted += io_poll_remove_one(req);
5300		}
5301	}
5302	spin_unlock(&ctx->completion_lock);
5303
5304	if (posted)
5305		io_cqring_ev_posted(ctx);
5306
5307	return posted != 0;
5308}
5309
5310static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5311				     bool poll_only)
5312	__must_hold(&ctx->completion_lock)
5313{
5314	struct hlist_head *list;
5315	struct io_kiocb *req;
5316
5317	list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5318	hlist_for_each_entry(req, list, hash_node) {
5319		if (sqe_addr != req->user_data)
5320			continue;
5321		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5322			continue;
5323		return req;
5324	}
5325	return NULL;
5326}
5327
5328static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5329			  bool poll_only)
5330	__must_hold(&ctx->completion_lock)
5331{
5332	struct io_kiocb *req;
5333
5334	req = io_poll_find(ctx, sqe_addr, poll_only);
5335	if (!req)
5336		return -ENOENT;
5337	if (io_poll_remove_one(req))
5338		return 0;
5339
5340	return -EALREADY;
5341}
5342
5343static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5344				     unsigned int flags)
5345{
5346	u32 events;
5347
5348	events = READ_ONCE(sqe->poll32_events);
5349#ifdef __BIG_ENDIAN
5350	events = swahw32(events);
5351#endif
5352	if (!(flags & IORING_POLL_ADD_MULTI))
5353		events |= EPOLLONESHOT;
5354	return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5355}
5356
5357static int io_poll_update_prep(struct io_kiocb *req,
5358			       const struct io_uring_sqe *sqe)
5359{
5360	struct io_poll_update *upd = &req->poll_update;
5361	u32 flags;
5362
5363	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5364		return -EINVAL;
5365	if (sqe->ioprio || sqe->buf_index)
5366		return -EINVAL;
5367	flags = READ_ONCE(sqe->len);
5368	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5369		      IORING_POLL_ADD_MULTI))
5370		return -EINVAL;
5371	/* meaningless without update */
5372	if (flags == IORING_POLL_ADD_MULTI)
5373		return -EINVAL;
5374
5375	upd->old_user_data = READ_ONCE(sqe->addr);
5376	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5377	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
5378
5379	upd->new_user_data = READ_ONCE(sqe->off);
5380	if (!upd->update_user_data && upd->new_user_data)
5381		return -EINVAL;
5382	if (upd->update_events)
5383		upd->events = io_poll_parse_events(sqe, flags);
5384	else if (sqe->poll32_events)
5385		return -EINVAL;
5386
5387	return 0;
5388}
5389
5390static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5391			void *key)
5392{
5393	struct io_kiocb *req = wait->private;
5394	struct io_poll_iocb *poll = &req->poll;
5395
5396	return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5397}
5398
5399static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5400			       struct poll_table_struct *p)
5401{
5402	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5403
5404	__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5405}
5406
5407static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5408{
5409	struct io_poll_iocb *poll = &req->poll;
5410	u32 flags;
5411
5412	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5413		return -EINVAL;
5414	if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
5415		return -EINVAL;
5416	flags = READ_ONCE(sqe->len);
5417	if (flags & ~IORING_POLL_ADD_MULTI)
5418		return -EINVAL;
5419
5420	io_req_set_refcount(req);
5421	poll->events = io_poll_parse_events(sqe, flags);
5422	return 0;
5423}
5424
5425static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5426{
5427	struct io_poll_iocb *poll = &req->poll;
5428	struct io_ring_ctx *ctx = req->ctx;
5429	struct io_poll_table ipt;
5430	__poll_t mask;
5431
5432	ipt.pt._qproc = io_poll_queue_proc;
5433
5434	mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5435					io_poll_wake);
5436
5437	if (mask) { /* no async, we'd stolen it */
5438		ipt.error = 0;
5439		io_poll_complete(req, mask);
5440	}
5441	spin_unlock(&ctx->completion_lock);
5442
5443	if (mask) {
5444		io_cqring_ev_posted(ctx);
5445		if (poll->events & EPOLLONESHOT)
5446			io_put_req(req);
5447	}
5448	return ipt.error;
5449}
5450
5451static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
5452{
5453	struct io_ring_ctx *ctx = req->ctx;
5454	struct io_kiocb *preq;
5455	bool completing;
5456	int ret;
5457
5458	spin_lock(&ctx->completion_lock);
5459	preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
5460	if (!preq) {
5461		ret = -ENOENT;
5462		goto err;
5463	}
5464
5465	if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5466		completing = true;
5467		ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5468		goto err;
5469	}
5470
5471	/*
5472	 * Don't allow racy completion with singleshot, as we cannot safely
5473	 * update those. For multishot, if we're racing with completion, just
5474	 * let completion re-add it.
5475	 */
5476	completing = !__io_poll_remove_one(preq, &preq->poll, false);
5477	if (completing && (preq->poll.events & EPOLLONESHOT)) {
5478		ret = -EALREADY;
5479		goto err;
5480	}
5481	/* we now have a detached poll request. reissue. */
5482	ret = 0;
5483err:
5484	if (ret < 0) {
5485		spin_unlock(&ctx->completion_lock);
5486		req_set_fail(req);
5487		io_req_complete(req, ret);
5488		return 0;
5489	}
5490	/* only mask one event flags, keep behavior flags */
5491	if (req->poll_update.update_events) {
5492		preq->poll.events &= ~0xffff;
5493		preq->poll.events |= req->poll_update.events & 0xffff;
5494		preq->poll.events |= IO_POLL_UNMASK;
5495	}
5496	if (req->poll_update.update_user_data)
5497		preq->user_data = req->poll_update.new_user_data;
5498	spin_unlock(&ctx->completion_lock);
5499
5500	/* complete update request, we're done with it */
5501	io_req_complete(req, ret);
5502
5503	if (!completing) {
5504		ret = io_poll_add(preq, issue_flags);
5505		if (ret < 0) {
5506			req_set_fail(preq);
5507			io_req_complete(preq, ret);
5508		}
5509	}
5510	return 0;
5511}
5512
5513static void io_req_task_timeout(struct io_kiocb *req)
5514{
5515	struct io_ring_ctx *ctx = req->ctx;
5516
5517	spin_lock(&ctx->completion_lock);
5518	io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
5519	io_commit_cqring(ctx);
5520	spin_unlock(&ctx->completion_lock);
5521
5522	io_cqring_ev_posted(ctx);
5523	req_set_fail(req);
5524	io_put_req(req);
5525}
5526
5527static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5528{
5529	struct io_timeout_data *data = container_of(timer,
5530						struct io_timeout_data, timer);
5531	struct io_kiocb *req = data->req;
5532	struct io_ring_ctx *ctx = req->ctx;
5533	unsigned long flags;
5534
5535	spin_lock_irqsave(&ctx->timeout_lock, flags);
5536	list_del_init(&req->timeout.list);
5537	atomic_set(&req->ctx->cq_timeouts,
5538		atomic_read(&req->ctx->cq_timeouts) + 1);
5539	spin_unlock_irqrestore(&ctx->timeout_lock, flags);
5540
5541	req->io_task_work.func = io_req_task_timeout;
5542	io_req_task_work_add(req);
5543	return HRTIMER_NORESTART;
5544}
5545
5546static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5547					   __u64 user_data)
5548	__must_hold(&ctx->timeout_lock)
5549{
5550	struct io_timeout_data *io;
5551	struct io_kiocb *req;
5552	bool found = false;
5553
5554	list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5555		found = user_data == req->user_data;
5556		if (found)
5557			break;
5558	}
5559	if (!found)
5560		return ERR_PTR(-ENOENT);
5561
5562	io = req->async_data;
5563	if (hrtimer_try_to_cancel(&io->timer) == -1)
5564		return ERR_PTR(-EALREADY);
5565	list_del_init(&req->timeout.list);
5566	return req;
5567}
5568
5569static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5570	__must_hold(&ctx->timeout_lock)
5571{
5572	struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5573
5574	if (IS_ERR(req))
5575		return PTR_ERR(req);
5576
5577	req_set_fail(req);
5578	io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
5579	io_put_req_deferred(req);
5580	return 0;
5581}
5582
5583static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5584			     struct timespec64 *ts, enum hrtimer_mode mode)
5585	__must_hold(&ctx->timeout_lock)
5586{
5587	struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5588	struct io_timeout_data *data;
5589
5590	if (IS_ERR(req))
5591		return PTR_ERR(req);
5592
5593	req->timeout.off = 0; /* noseq */
5594	data = req->async_data;
5595	list_add_tail(&req->timeout.list, &ctx->timeout_list);
5596	hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5597	data->timer.function = io_timeout_fn;
5598	hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5599	return 0;
5600}
5601
5602static int io_timeout_remove_prep(struct io_kiocb *req,
5603				  const struct io_uring_sqe *sqe)
5604{
5605	struct io_timeout_rem *tr = &req->timeout_rem;
5606
5607	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5608		return -EINVAL;
5609	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5610		return -EINVAL;
5611	if (sqe->ioprio || sqe->buf_index || sqe->len)
5612		return -EINVAL;
5613
5614	tr->addr = READ_ONCE(sqe->addr);
5615	tr->flags = READ_ONCE(sqe->timeout_flags);
5616	if (tr->flags & IORING_TIMEOUT_UPDATE) {
5617		if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5618			return -EINVAL;
5619		if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5620			return -EFAULT;
5621	} else if (tr->flags) {
5622		/* timeout removal doesn't support flags */
5623		return -EINVAL;
5624	}
5625
5626	return 0;
5627}
5628
5629static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5630{
5631	return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5632					    : HRTIMER_MODE_REL;
5633}
5634
5635/*
5636 * Remove or update an existing timeout command
5637 */
5638static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5639{
5640	struct io_timeout_rem *tr = &req->timeout_rem;
5641	struct io_ring_ctx *ctx = req->ctx;
5642	int ret;
5643
5644	spin_lock_irq(&ctx->timeout_lock);
5645	if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
5646		ret = io_timeout_cancel(ctx, tr->addr);
5647	else
5648		ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5649					io_translate_timeout_mode(tr->flags));
5650	spin_unlock_irq(&ctx->timeout_lock);
5651
5652	spin_lock(&ctx->completion_lock);
5653	io_cqring_fill_event(ctx, req->user_data, ret, 0);
5654	io_commit_cqring(ctx);
5655	spin_unlock(&ctx->completion_lock);
5656	io_cqring_ev_posted(ctx);
5657	if (ret < 0)
5658		req_set_fail(req);
5659	io_put_req(req);
5660	return 0;
5661}
5662
5663static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5664			   bool is_timeout_link)
5665{
5666	struct io_timeout_data *data;
5667	unsigned flags;
5668	u32 off = READ_ONCE(sqe->off);
5669
5670	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5671		return -EINVAL;
5672	if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5673		return -EINVAL;
5674	if (off && is_timeout_link)
5675		return -EINVAL;
5676	flags = READ_ONCE(sqe->timeout_flags);
5677	if (flags & ~IORING_TIMEOUT_ABS)
5678		return -EINVAL;
5679
5680	req->timeout.off = off;
5681	if (unlikely(off && !req->ctx->off_timeout_used))
5682		req->ctx->off_timeout_used = true;
5683
5684	if (!req->async_data && io_alloc_async_data(req))
5685		return -ENOMEM;
5686
5687	data = req->async_data;
5688	data->req = req;
5689
5690	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5691		return -EFAULT;
5692
5693	data->mode = io_translate_timeout_mode(flags);
5694	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5695
5696	if (is_timeout_link) {
5697		struct io_submit_link *link = &req->ctx->submit_state.link;
5698
5699		if (!link->head)
5700			return -EINVAL;
5701		if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
5702			return -EINVAL;
5703	}
5704	return 0;
5705}
5706
5707static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5708{
5709	struct io_ring_ctx *ctx = req->ctx;
5710	struct io_timeout_data *data = req->async_data;
5711	struct list_head *entry;
5712	u32 tail, off = req->timeout.off;
5713
5714	spin_lock_irq(&ctx->timeout_lock);
5715
5716	/*
5717	 * sqe->off holds how many events that need to occur for this
5718	 * timeout event to be satisfied. If it isn't set, then this is
5719	 * a pure timeout request, sequence isn't used.
5720	 */
5721	if (io_is_timeout_noseq(req)) {
5722		entry = ctx->timeout_list.prev;
5723		goto add;
5724	}
5725
5726	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5727	req->timeout.