io_uring.c revision 6294f368
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 *	git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <net/compat.h>
48#include <linux/refcount.h>
49#include <linux/uio.h>
50#include <linux/bits.h>
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
60#include <linux/blkdev.h>
61#include <linux/bvec.h>
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
65#include <net/scm.h>
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
72#include <linux/highmem.h>
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
75#include <linux/fadvise.h>
76#include <linux/eventpoll.h>
77#include <linux/splice.h>
78#include <linux/task_work.h>
79#include <linux/pagemap.h>
80#include <linux/io_uring.h>
81#include <linux/tracehook.h>
82
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
89#include "io-wq.h"
90
91#define IORING_MAX_ENTRIES	32768
92#define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
93#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
94
95/* 512 entries per page on 64-bit archs, 64 pages max */
96#define IORING_MAX_FIXED_FILES	(1U << 15)
97#define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
98				 IORING_REGISTER_LAST + IORING_OP_LAST)
99
100#define IO_RSRC_TAG_TABLE_SHIFT	9
101#define IO_RSRC_TAG_TABLE_MAX	(1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK	(IO_RSRC_TAG_TABLE_MAX - 1)
103
104#define IORING_MAX_REG_BUFFERS	(1U << 14)
105
106#define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
107				IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108				IOSQE_BUFFER_SELECT)
109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
111
112#define IO_TCTX_REFS_CACHE_NR	(1U << 10)
113
114struct io_uring {
115	u32 head ____cacheline_aligned_in_smp;
116	u32 tail ____cacheline_aligned_in_smp;
117};
118
119/*
120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
126struct io_rings {
127	/*
128	 * Head and tail offsets into the ring; the offsets need to be
129	 * masked to get valid indices.
130	 *
131	 * The kernel controls head of the sq ring and the tail of the cq ring,
132	 * and the application controls tail of the sq ring and the head of the
133	 * cq ring.
134	 */
135	struct io_uring		sq, cq;
136	/*
137	 * Bitmasks to apply to head and tail offsets (constant, equals
138	 * ring_entries - 1)
139	 */
140	u32			sq_ring_mask, cq_ring_mask;
141	/* Ring sizes (constant, power of 2) */
142	u32			sq_ring_entries, cq_ring_entries;
143	/*
144	 * Number of invalid entries dropped by the kernel due to
145	 * invalid index stored in array
146	 *
147	 * Written by the kernel, shouldn't be modified by the
148	 * application (i.e. get number of "new events" by comparing to
149	 * cached value).
150	 *
151	 * After a new SQ head value was read by the application this
152	 * counter includes all submissions that were dropped reaching
153	 * the new SQ head (and possibly more).
154	 */
155	u32			sq_dropped;
156	/*
157	 * Runtime SQ flags
158	 *
159	 * Written by the kernel, shouldn't be modified by the
160	 * application.
161	 *
162	 * The application needs a full memory barrier before checking
163	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164	 */
165	u32			sq_flags;
166	/*
167	 * Runtime CQ flags
168	 *
169	 * Written by the application, shouldn't be modified by the
170	 * kernel.
171	 */
172	u32			cq_flags;
173	/*
174	 * Number of completion events lost because the queue was full;
175	 * this should be avoided by the application by making sure
176	 * there are not more requests pending than there is space in
177	 * the completion queue.
178	 *
179	 * Written by the kernel, shouldn't be modified by the
180	 * application (i.e. get number of "new events" by comparing to
181	 * cached value).
182	 *
183	 * As completion events come in out of order this counter is not
184	 * ordered with any other data.
185	 */
186	u32			cq_overflow;
187	/*
188	 * Ring buffer of completion events.
189	 *
190	 * The kernel writes completion events fresh every time they are
191	 * produced, so the application is allowed to modify pending
192	 * entries.
193	 */
194	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
195};
196
197enum io_uring_cmd_flags {
198	IO_URING_F_NONBLOCK		= 1,
199	IO_URING_F_COMPLETE_DEFER	= 2,
200};
201
202struct io_mapped_ubuf {
203	u64		ubuf;
204	u64		ubuf_end;
205	unsigned int	nr_bvecs;
206	unsigned long	acct_pages;
207	struct bio_vec	bvec[];
208};
209
210struct io_ring_ctx;
211
212struct io_overflow_cqe {
213	struct io_uring_cqe cqe;
214	struct list_head list;
215};
216
217struct io_fixed_file {
218	/* file * with additional FFS_* flags */
219	unsigned long file_ptr;
220};
221
222struct io_rsrc_put {
223	struct list_head list;
224	u64 tag;
225	union {
226		void *rsrc;
227		struct file *file;
228		struct io_mapped_ubuf *buf;
229	};
230};
231
232struct io_file_table {
233	struct io_fixed_file *files;
234};
235
236struct io_rsrc_node {
237	struct percpu_ref		refs;
238	struct list_head		node;
239	struct list_head		rsrc_list;
240	struct io_rsrc_data		*rsrc_data;
241	struct llist_node		llist;
242	bool				done;
243};
244
245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
247struct io_rsrc_data {
248	struct io_ring_ctx		*ctx;
249
250	u64				**tags;
251	unsigned int			nr;
252	rsrc_put_fn			*do_put;
253	atomic_t			refs;
254	struct completion		done;
255	bool				quiesce;
256};
257
258struct io_buffer {
259	struct list_head list;
260	__u64 addr;
261	__u32 len;
262	__u16 bid;
263};
264
265struct io_restriction {
266	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268	u8 sqe_flags_allowed;
269	u8 sqe_flags_required;
270	bool registered;
271};
272
273enum {
274	IO_SQ_THREAD_SHOULD_STOP = 0,
275	IO_SQ_THREAD_SHOULD_PARK,
276};
277
278struct io_sq_data {
279	refcount_t		refs;
280	atomic_t		park_pending;
281	struct mutex		lock;
282
283	/* ctx's that are using this sqd */
284	struct list_head	ctx_list;
285
286	struct task_struct	*thread;
287	struct wait_queue_head	wait;
288
289	unsigned		sq_thread_idle;
290	int			sq_cpu;
291	pid_t			task_pid;
292	pid_t			task_tgid;
293
294	unsigned long		state;
295	struct completion	exited;
296};
297
298#define IO_COMPL_BATCH			32
299#define IO_REQ_CACHE_SIZE		32
300#define IO_REQ_ALLOC_BATCH		8
301
302struct io_submit_link {
303	struct io_kiocb		*head;
304	struct io_kiocb		*last;
305};
306
307struct io_submit_state {
308	struct blk_plug		plug;
309	struct io_submit_link	link;
310
311	/*
312	 * io_kiocb alloc cache
313	 */
314	void			*reqs[IO_REQ_CACHE_SIZE];
315	unsigned int		free_reqs;
316
317	bool			plug_started;
318
319	/*
320	 * Batch completion logic
321	 */
322	struct io_kiocb		*compl_reqs[IO_COMPL_BATCH];
323	unsigned int		compl_nr;
324	/* inline/task_work completion list, under ->uring_lock */
325	struct list_head	free_list;
326
327	/*
328	 * File reference cache
329	 */
330	struct file		*file;
331	unsigned int		fd;
332	unsigned int		file_refs;
333	unsigned int		ios_left;
334};
335
336struct io_ring_ctx {
337	/* const or read-mostly hot data */
338	struct {
339		struct percpu_ref	refs;
340
341		struct io_rings		*rings;
342		unsigned int		flags;
343		unsigned int		compat: 1;
344		unsigned int		drain_next: 1;
345		unsigned int		eventfd_async: 1;
346		unsigned int		restricted: 1;
347		unsigned int		off_timeout_used: 1;
348		unsigned int		drain_active: 1;
349	} ____cacheline_aligned_in_smp;
350
351	/* submission data */
352	struct {
353		struct mutex		uring_lock;
354
355		/*
356		 * Ring buffer of indices into array of io_uring_sqe, which is
357		 * mmapped by the application using the IORING_OFF_SQES offset.
358		 *
359		 * This indirection could e.g. be used to assign fixed
360		 * io_uring_sqe entries to operations and only submit them to
361		 * the queue when needed.
362		 *
363		 * The kernel modifies neither the indices array nor the entries
364		 * array.
365		 */
366		u32			*sq_array;
367		struct io_uring_sqe	*sq_sqes;
368		unsigned		cached_sq_head;
369		unsigned		sq_entries;
370		struct list_head	defer_list;
371
372		/*
373		 * Fixed resources fast path, should be accessed only under
374		 * uring_lock, and updated through io_uring_register(2)
375		 */
376		struct io_rsrc_node	*rsrc_node;
377		struct io_file_table	file_table;
378		unsigned		nr_user_files;
379		unsigned		nr_user_bufs;
380		struct io_mapped_ubuf	**user_bufs;
381
382		struct io_submit_state	submit_state;
383		struct list_head	timeout_list;
384		struct list_head	cq_overflow_list;
385		struct xarray		io_buffers;
386		struct xarray		personalities;
387		u32			pers_next;
388		unsigned		sq_thread_idle;
389	} ____cacheline_aligned_in_smp;
390
391	/* IRQ completion list, under ->completion_lock */
392	struct list_head	locked_free_list;
393	unsigned int		locked_free_nr;
394
395	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
396	struct io_sq_data	*sq_data;	/* if using sq thread polling */
397
398	struct wait_queue_head	sqo_sq_wait;
399	struct list_head	sqd_list;
400
401	unsigned long		check_cq_overflow;
402
403	struct {
404		unsigned		cached_cq_tail;
405		unsigned		cq_entries;
406		struct eventfd_ctx	*cq_ev_fd;
407		struct wait_queue_head	poll_wait;
408		struct wait_queue_head	cq_wait;
409		unsigned		cq_extra;
410		atomic_t		cq_timeouts;
411		struct fasync_struct	*cq_fasync;
412		unsigned		cq_last_tm_flush;
413	} ____cacheline_aligned_in_smp;
414
415	struct {
416		spinlock_t		completion_lock;
417
418		/*
419		 * ->iopoll_list is protected by the ctx->uring_lock for
420		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
421		 * For SQPOLL, only the single threaded io_sq_thread() will
422		 * manipulate the list, hence no extra locking is needed there.
423		 */
424		struct list_head	iopoll_list;
425		struct hlist_head	*cancel_hash;
426		unsigned		cancel_hash_bits;
427		bool			poll_multi_queue;
428	} ____cacheline_aligned_in_smp;
429
430	struct io_restriction		restrictions;
431
432	/* slow path rsrc auxilary data, used by update/register */
433	struct {
434		struct io_rsrc_node		*rsrc_backup_node;
435		struct io_mapped_ubuf		*dummy_ubuf;
436		struct io_rsrc_data		*file_data;
437		struct io_rsrc_data		*buf_data;
438
439		struct delayed_work		rsrc_put_work;
440		struct llist_head		rsrc_put_llist;
441		struct list_head		rsrc_ref_list;
442		spinlock_t			rsrc_ref_lock;
443	};
444
445	/* Keep this last, we don't need it for the fast path */
446	struct {
447		#if defined(CONFIG_UNIX)
448			struct socket		*ring_sock;
449		#endif
450		/* hashed buffered write serialization */
451		struct io_wq_hash		*hash_map;
452
453		/* Only used for accounting purposes */
454		struct user_struct		*user;
455		struct mm_struct		*mm_account;
456
457		/* ctx exit and cancelation */
458		struct llist_head		fallback_llist;
459		struct delayed_work		fallback_work;
460		struct work_struct		exit_work;
461		struct list_head		tctx_list;
462		struct completion		ref_comp;
463	};
464};
465
466struct io_uring_task {
467	/* submission side */
468	int			cached_refs;
469	struct xarray		xa;
470	struct wait_queue_head	wait;
471	const struct io_ring_ctx *last;
472	struct io_wq		*io_wq;
473	struct percpu_counter	inflight;
474	atomic_t		inflight_tracked;
475	atomic_t		in_idle;
476
477	spinlock_t		task_lock;
478	struct io_wq_work_list	task_list;
479	struct callback_head	task_work;
480	bool			task_running;
481};
482
483/*
484 * First field must be the file pointer in all the
485 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
486 */
487struct io_poll_iocb {
488	struct file			*file;
489	struct wait_queue_head		*head;
490	__poll_t			events;
491	bool				done;
492	bool				canceled;
493	struct wait_queue_entry		wait;
494};
495
496struct io_poll_update {
497	struct file			*file;
498	u64				old_user_data;
499	u64				new_user_data;
500	__poll_t			events;
501	bool				update_events;
502	bool				update_user_data;
503};
504
505struct io_close {
506	struct file			*file;
507	int				fd;
508};
509
510struct io_timeout_data {
511	struct io_kiocb			*req;
512	struct hrtimer			timer;
513	struct timespec64		ts;
514	enum hrtimer_mode		mode;
515};
516
517struct io_accept {
518	struct file			*file;
519	struct sockaddr __user		*addr;
520	int __user			*addr_len;
521	int				flags;
522	unsigned long			nofile;
523};
524
525struct io_sync {
526	struct file			*file;
527	loff_t				len;
528	loff_t				off;
529	int				flags;
530	int				mode;
531};
532
533struct io_cancel {
534	struct file			*file;
535	u64				addr;
536};
537
538struct io_timeout {
539	struct file			*file;
540	u32				off;
541	u32				target_seq;
542	struct list_head		list;
543	/* head of the link, used by linked timeouts only */
544	struct io_kiocb			*head;
545};
546
547struct io_timeout_rem {
548	struct file			*file;
549	u64				addr;
550
551	/* timeout update */
552	struct timespec64		ts;
553	u32				flags;
554};
555
556struct io_rw {
557	/* NOTE: kiocb has the file as the first member, so don't do it here */
558	struct kiocb			kiocb;
559	u64				addr;
560	u64				len;
561};
562
563struct io_connect {
564	struct file			*file;
565	struct sockaddr __user		*addr;
566	int				addr_len;
567};
568
569struct io_sr_msg {
570	struct file			*file;
571	union {
572		struct compat_msghdr __user	*umsg_compat;
573		struct user_msghdr __user	*umsg;
574		void __user			*buf;
575	};
576	int				msg_flags;
577	int				bgid;
578	size_t				len;
579	struct io_buffer		*kbuf;
580};
581
582struct io_open {
583	struct file			*file;
584	int				dfd;
585	struct filename			*filename;
586	struct open_how			how;
587	unsigned long			nofile;
588};
589
590struct io_rsrc_update {
591	struct file			*file;
592	u64				arg;
593	u32				nr_args;
594	u32				offset;
595};
596
597struct io_fadvise {
598	struct file			*file;
599	u64				offset;
600	u32				len;
601	u32				advice;
602};
603
604struct io_madvise {
605	struct file			*file;
606	u64				addr;
607	u32				len;
608	u32				advice;
609};
610
611struct io_epoll {
612	struct file			*file;
613	int				epfd;
614	int				op;
615	int				fd;
616	struct epoll_event		event;
617};
618
619struct io_splice {
620	struct file			*file_out;
621	struct file			*file_in;
622	loff_t				off_out;
623	loff_t				off_in;
624	u64				len;
625	unsigned int			flags;
626};
627
628struct io_provide_buf {
629	struct file			*file;
630	__u64				addr;
631	__u32				len;
632	__u32				bgid;
633	__u16				nbufs;
634	__u16				bid;
635};
636
637struct io_statx {
638	struct file			*file;
639	int				dfd;
640	unsigned int			mask;
641	unsigned int			flags;
642	const char __user		*filename;
643	struct statx __user		*buffer;
644};
645
646struct io_shutdown {
647	struct file			*file;
648	int				how;
649};
650
651struct io_rename {
652	struct file			*file;
653	int				old_dfd;
654	int				new_dfd;
655	struct filename			*oldpath;
656	struct filename			*newpath;
657	int				flags;
658};
659
660struct io_unlink {
661	struct file			*file;
662	int				dfd;
663	int				flags;
664	struct filename			*filename;
665};
666
667struct io_completion {
668	struct file			*file;
669	u32				cflags;
670};
671
672struct io_async_connect {
673	struct sockaddr_storage		address;
674};
675
676struct io_async_msghdr {
677	struct iovec			fast_iov[UIO_FASTIOV];
678	/* points to an allocated iov, if NULL we use fast_iov instead */
679	struct iovec			*free_iov;
680	struct sockaddr __user		*uaddr;
681	struct msghdr			msg;
682	struct sockaddr_storage		addr;
683};
684
685struct io_async_rw {
686	struct iovec			fast_iov[UIO_FASTIOV];
687	const struct iovec		*free_iovec;
688	struct iov_iter			iter;
689	size_t				bytes_done;
690	struct wait_page_queue		wpq;
691};
692
693enum {
694	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
695	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
696	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
697	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
698	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
699	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
700
701	/* first byte is taken by user flags, shift it to not overlap */
702	REQ_F_FAIL_BIT		= 8,
703	REQ_F_INFLIGHT_BIT,
704	REQ_F_CUR_POS_BIT,
705	REQ_F_NOWAIT_BIT,
706	REQ_F_LINK_TIMEOUT_BIT,
707	REQ_F_NEED_CLEANUP_BIT,
708	REQ_F_POLLED_BIT,
709	REQ_F_BUFFER_SELECTED_BIT,
710	REQ_F_LTIMEOUT_ACTIVE_BIT,
711	REQ_F_COMPLETE_INLINE_BIT,
712	REQ_F_REISSUE_BIT,
713	REQ_F_DONT_REISSUE_BIT,
714	REQ_F_CREDS_BIT,
715	/* keep async read/write and isreg together and in order */
716	REQ_F_NOWAIT_READ_BIT,
717	REQ_F_NOWAIT_WRITE_BIT,
718	REQ_F_ISREG_BIT,
719
720	/* not a real bit, just to check we're not overflowing the space */
721	__REQ_F_LAST_BIT,
722};
723
724enum {
725	/* ctx owns file */
726	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
727	/* drain existing IO first */
728	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
729	/* linked sqes */
730	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
731	/* doesn't sever on completion < 0 */
732	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
733	/* IOSQE_ASYNC */
734	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
735	/* IOSQE_BUFFER_SELECT */
736	REQ_F_BUFFER_SELECT	= BIT(REQ_F_BUFFER_SELECT_BIT),
737
738	/* fail rest of links */
739	REQ_F_FAIL		= BIT(REQ_F_FAIL_BIT),
740	/* on inflight list, should be cancelled and waited on exit reliably */
741	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
742	/* read/write uses file position */
743	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
744	/* must not punt to workers */
745	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
746	/* has or had linked timeout */
747	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
748	/* needs cleanup */
749	REQ_F_NEED_CLEANUP	= BIT(REQ_F_NEED_CLEANUP_BIT),
750	/* already went through poll handler */
751	REQ_F_POLLED		= BIT(REQ_F_POLLED_BIT),
752	/* buffer already selected */
753	REQ_F_BUFFER_SELECTED	= BIT(REQ_F_BUFFER_SELECTED_BIT),
754	/* linked timeout is active, i.e. prepared by link's head */
755	REQ_F_LTIMEOUT_ACTIVE	= BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
756	/* completion is deferred through io_comp_state */
757	REQ_F_COMPLETE_INLINE	= BIT(REQ_F_COMPLETE_INLINE_BIT),
758	/* caller should reissue async */
759	REQ_F_REISSUE		= BIT(REQ_F_REISSUE_BIT),
760	/* don't attempt request reissue, see io_rw_reissue() */
761	REQ_F_DONT_REISSUE	= BIT(REQ_F_DONT_REISSUE_BIT),
762	/* supports async reads */
763	REQ_F_NOWAIT_READ	= BIT(REQ_F_NOWAIT_READ_BIT),
764	/* supports async writes */
765	REQ_F_NOWAIT_WRITE	= BIT(REQ_F_NOWAIT_WRITE_BIT),
766	/* regular file */
767	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
768	/* has creds assigned */
769	REQ_F_CREDS		= BIT(REQ_F_CREDS_BIT),
770};
771
772struct async_poll {
773	struct io_poll_iocb	poll;
774	struct io_poll_iocb	*double_poll;
775};
776
777typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
778
779struct io_task_work {
780	union {
781		struct io_wq_work_node	node;
782		struct llist_node	fallback_node;
783	};
784	io_req_tw_func_t		func;
785};
786
787enum {
788	IORING_RSRC_FILE		= 0,
789	IORING_RSRC_BUFFER		= 1,
790};
791
792/*
793 * NOTE! Each of the iocb union members has the file pointer
794 * as the first entry in their struct definition. So you can
795 * access the file pointer through any of the sub-structs,
796 * or directly as just 'ki_filp' in this struct.
797 */
798struct io_kiocb {
799	union {
800		struct file		*file;
801		struct io_rw		rw;
802		struct io_poll_iocb	poll;
803		struct io_poll_update	poll_update;
804		struct io_accept	accept;
805		struct io_sync		sync;
806		struct io_cancel	cancel;
807		struct io_timeout	timeout;
808		struct io_timeout_rem	timeout_rem;
809		struct io_connect	connect;
810		struct io_sr_msg	sr_msg;
811		struct io_open		open;
812		struct io_close		close;
813		struct io_rsrc_update	rsrc_update;
814		struct io_fadvise	fadvise;
815		struct io_madvise	madvise;
816		struct io_epoll		epoll;
817		struct io_splice	splice;
818		struct io_provide_buf	pbuf;
819		struct io_statx		statx;
820		struct io_shutdown	shutdown;
821		struct io_rename	rename;
822		struct io_unlink	unlink;
823		/* use only after cleaning per-op data, see io_clean_op() */
824		struct io_completion	compl;
825	};
826
827	/* opcode allocated if it needs to store data for async defer */
828	void				*async_data;
829	u8				opcode;
830	/* polled IO has completed */
831	u8				iopoll_completed;
832
833	u16				buf_index;
834	u32				result;
835
836	struct io_ring_ctx		*ctx;
837	unsigned int			flags;
838	atomic_t			refs;
839	struct task_struct		*task;
840	u64				user_data;
841
842	struct io_kiocb			*link;
843	struct percpu_ref		*fixed_rsrc_refs;
844
845	/* used with ctx->iopoll_list with reads/writes */
846	struct list_head		inflight_entry;
847	struct io_task_work		io_task_work;
848	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
849	struct hlist_node		hash_node;
850	struct async_poll		*apoll;
851	struct io_wq_work		work;
852	const struct cred		*creds;
853
854	/* store used ubuf, so we can prevent reloading */
855	struct io_mapped_ubuf		*imu;
856};
857
858struct io_tctx_node {
859	struct list_head	ctx_node;
860	struct task_struct	*task;
861	struct io_ring_ctx	*ctx;
862};
863
864struct io_defer_entry {
865	struct list_head	list;
866	struct io_kiocb		*req;
867	u32			seq;
868};
869
870struct io_op_def {
871	/* needs req->file assigned */
872	unsigned		needs_file : 1;
873	/* hash wq insertion if file is a regular file */
874	unsigned		hash_reg_file : 1;
875	/* unbound wq insertion if file is a non-regular file */
876	unsigned		unbound_nonreg_file : 1;
877	/* opcode is not supported by this kernel */
878	unsigned		not_supported : 1;
879	/* set if opcode supports polled "wait" */
880	unsigned		pollin : 1;
881	unsigned		pollout : 1;
882	/* op supports buffer selection */
883	unsigned		buffer_select : 1;
884	/* do prep async if is going to be punted */
885	unsigned		needs_async_setup : 1;
886	/* should block plug */
887	unsigned		plug : 1;
888	/* size of async data needed, if any */
889	unsigned short		async_size;
890};
891
892static const struct io_op_def io_op_defs[] = {
893	[IORING_OP_NOP] = {},
894	[IORING_OP_READV] = {
895		.needs_file		= 1,
896		.unbound_nonreg_file	= 1,
897		.pollin			= 1,
898		.buffer_select		= 1,
899		.needs_async_setup	= 1,
900		.plug			= 1,
901		.async_size		= sizeof(struct io_async_rw),
902	},
903	[IORING_OP_WRITEV] = {
904		.needs_file		= 1,
905		.hash_reg_file		= 1,
906		.unbound_nonreg_file	= 1,
907		.pollout		= 1,
908		.needs_async_setup	= 1,
909		.plug			= 1,
910		.async_size		= sizeof(struct io_async_rw),
911	},
912	[IORING_OP_FSYNC] = {
913		.needs_file		= 1,
914	},
915	[IORING_OP_READ_FIXED] = {
916		.needs_file		= 1,
917		.unbound_nonreg_file	= 1,
918		.pollin			= 1,
919		.plug			= 1,
920		.async_size		= sizeof(struct io_async_rw),
921	},
922	[IORING_OP_WRITE_FIXED] = {
923		.needs_file		= 1,
924		.hash_reg_file		= 1,
925		.unbound_nonreg_file	= 1,
926		.pollout		= 1,
927		.plug			= 1,
928		.async_size		= sizeof(struct io_async_rw),
929	},
930	[IORING_OP_POLL_ADD] = {
931		.needs_file		= 1,
932		.unbound_nonreg_file	= 1,
933	},
934	[IORING_OP_POLL_REMOVE] = {},
935	[IORING_OP_SYNC_FILE_RANGE] = {
936		.needs_file		= 1,
937	},
938	[IORING_OP_SENDMSG] = {
939		.needs_file		= 1,
940		.unbound_nonreg_file	= 1,
941		.pollout		= 1,
942		.needs_async_setup	= 1,
943		.async_size		= sizeof(struct io_async_msghdr),
944	},
945	[IORING_OP_RECVMSG] = {
946		.needs_file		= 1,
947		.unbound_nonreg_file	= 1,
948		.pollin			= 1,
949		.buffer_select		= 1,
950		.needs_async_setup	= 1,
951		.async_size		= sizeof(struct io_async_msghdr),
952	},
953	[IORING_OP_TIMEOUT] = {
954		.async_size		= sizeof(struct io_timeout_data),
955	},
956	[IORING_OP_TIMEOUT_REMOVE] = {
957		/* used by timeout updates' prep() */
958	},
959	[IORING_OP_ACCEPT] = {
960		.needs_file		= 1,
961		.unbound_nonreg_file	= 1,
962		.pollin			= 1,
963	},
964	[IORING_OP_ASYNC_CANCEL] = {},
965	[IORING_OP_LINK_TIMEOUT] = {
966		.async_size		= sizeof(struct io_timeout_data),
967	},
968	[IORING_OP_CONNECT] = {
969		.needs_file		= 1,
970		.unbound_nonreg_file	= 1,
971		.pollout		= 1,
972		.needs_async_setup	= 1,
973		.async_size		= sizeof(struct io_async_connect),
974	},
975	[IORING_OP_FALLOCATE] = {
976		.needs_file		= 1,
977	},
978	[IORING_OP_OPENAT] = {},
979	[IORING_OP_CLOSE] = {},
980	[IORING_OP_FILES_UPDATE] = {},
981	[IORING_OP_STATX] = {},
982	[IORING_OP_READ] = {
983		.needs_file		= 1,
984		.unbound_nonreg_file	= 1,
985		.pollin			= 1,
986		.buffer_select		= 1,
987		.plug			= 1,
988		.async_size		= sizeof(struct io_async_rw),
989	},
990	[IORING_OP_WRITE] = {
991		.needs_file		= 1,
992		.unbound_nonreg_file	= 1,
993		.pollout		= 1,
994		.plug			= 1,
995		.async_size		= sizeof(struct io_async_rw),
996	},
997	[IORING_OP_FADVISE] = {
998		.needs_file		= 1,
999	},
1000	[IORING_OP_MADVISE] = {},
1001	[IORING_OP_SEND] = {
1002		.needs_file		= 1,
1003		.unbound_nonreg_file	= 1,
1004		.pollout		= 1,
1005	},
1006	[IORING_OP_RECV] = {
1007		.needs_file		= 1,
1008		.unbound_nonreg_file	= 1,
1009		.pollin			= 1,
1010		.buffer_select		= 1,
1011	},
1012	[IORING_OP_OPENAT2] = {
1013	},
1014	[IORING_OP_EPOLL_CTL] = {
1015		.unbound_nonreg_file	= 1,
1016	},
1017	[IORING_OP_SPLICE] = {
1018		.needs_file		= 1,
1019		.hash_reg_file		= 1,
1020		.unbound_nonreg_file	= 1,
1021	},
1022	[IORING_OP_PROVIDE_BUFFERS] = {},
1023	[IORING_OP_REMOVE_BUFFERS] = {},
1024	[IORING_OP_TEE] = {
1025		.needs_file		= 1,
1026		.hash_reg_file		= 1,
1027		.unbound_nonreg_file	= 1,
1028	},
1029	[IORING_OP_SHUTDOWN] = {
1030		.needs_file		= 1,
1031	},
1032	[IORING_OP_RENAMEAT] = {},
1033	[IORING_OP_UNLINKAT] = {},
1034};
1035
1036static bool io_disarm_next(struct io_kiocb *req);
1037static void io_uring_del_tctx_node(unsigned long index);
1038static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1039					 struct task_struct *task,
1040					 bool cancel_all);
1041static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1042
1043static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1044				 long res, unsigned int cflags);
1045static void io_put_req(struct io_kiocb *req);
1046static void io_put_req_deferred(struct io_kiocb *req, int nr);
1047static void io_dismantle_req(struct io_kiocb *req);
1048static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1049static void io_queue_linked_timeout(struct io_kiocb *req);
1050static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
1051				     struct io_uring_rsrc_update2 *up,
1052				     unsigned nr_args);
1053static void io_clean_op(struct io_kiocb *req);
1054static struct file *io_file_get(struct io_ring_ctx *ctx,
1055				struct io_submit_state *state,
1056				struct io_kiocb *req, int fd, bool fixed);
1057static void __io_queue_sqe(struct io_kiocb *req);
1058static void io_rsrc_put_work(struct work_struct *work);
1059
1060static void io_req_task_queue(struct io_kiocb *req);
1061static void io_submit_flush_completions(struct io_ring_ctx *ctx);
1062static int io_req_prep_async(struct io_kiocb *req);
1063
1064static struct kmem_cache *req_cachep;
1065
1066static const struct file_operations io_uring_fops;
1067
1068struct sock *io_uring_get_socket(struct file *file)
1069{
1070#if defined(CONFIG_UNIX)
1071	if (file->f_op == &io_uring_fops) {
1072		struct io_ring_ctx *ctx = file->private_data;
1073
1074		return ctx->ring_sock->sk;
1075	}
1076#endif
1077	return NULL;
1078}
1079EXPORT_SYMBOL(io_uring_get_socket);
1080
1081#define io_for_each_link(pos, head) \
1082	for (pos = (head); pos; pos = pos->link)
1083
1084static inline void io_req_set_rsrc_node(struct io_kiocb *req)
1085{
1086	struct io_ring_ctx *ctx = req->ctx;
1087
1088	if (!req->fixed_rsrc_refs) {
1089		req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
1090		percpu_ref_get(req->fixed_rsrc_refs);
1091	}
1092}
1093
1094static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1095{
1096	bool got = percpu_ref_tryget(ref);
1097
1098	/* already at zero, wait for ->release() */
1099	if (!got)
1100		wait_for_completion(compl);
1101	percpu_ref_resurrect(ref);
1102	if (got)
1103		percpu_ref_put(ref);
1104}
1105
1106static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1107			  bool cancel_all)
1108{
1109	struct io_kiocb *req;
1110
1111	if (task && head->task != task)
1112		return false;
1113	if (cancel_all)
1114		return true;
1115
1116	io_for_each_link(req, head) {
1117		if (req->flags & REQ_F_INFLIGHT)
1118			return true;
1119	}
1120	return false;
1121}
1122
1123static inline void req_set_fail(struct io_kiocb *req)
1124{
1125	req->flags |= REQ_F_FAIL;
1126}
1127
1128static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1129{
1130	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1131
1132	complete(&ctx->ref_comp);
1133}
1134
1135static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1136{
1137	return !req->timeout.off;
1138}
1139
1140static void io_fallback_req_func(struct work_struct *work)
1141{
1142	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1143						fallback_work.work);
1144	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1145	struct io_kiocb *req, *tmp;
1146
1147	percpu_ref_get(&ctx->refs);
1148	llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
1149		req->io_task_work.func(req);
1150	percpu_ref_put(&ctx->refs);
1151}
1152
1153static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1154{
1155	struct io_ring_ctx *ctx;
1156	int hash_bits;
1157
1158	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1159	if (!ctx)
1160		return NULL;
1161
1162	/*
1163	 * Use 5 bits less than the max cq entries, that should give us around
1164	 * 32 entries per hash list if totally full and uniformly spread.
1165	 */
1166	hash_bits = ilog2(p->cq_entries);
1167	hash_bits -= 5;
1168	if (hash_bits <= 0)
1169		hash_bits = 1;
1170	ctx->cancel_hash_bits = hash_bits;
1171	ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1172					GFP_KERNEL);
1173	if (!ctx->cancel_hash)
1174		goto err;
1175	__hash_init(ctx->cancel_hash, 1U << hash_bits);
1176
1177	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1178	if (!ctx->dummy_ubuf)
1179		goto err;
1180	/* set invalid range, so io_import_fixed() fails meeting it */
1181	ctx->dummy_ubuf->ubuf = -1UL;
1182
1183	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1184			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1185		goto err;
1186
1187	ctx->flags = p->flags;
1188	init_waitqueue_head(&ctx->sqo_sq_wait);
1189	INIT_LIST_HEAD(&ctx->sqd_list);
1190	init_waitqueue_head(&ctx->poll_wait);
1191	INIT_LIST_HEAD(&ctx->cq_overflow_list);
1192	init_completion(&ctx->ref_comp);
1193	xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
1194	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
1195	mutex_init(&ctx->uring_lock);
1196	init_waitqueue_head(&ctx->cq_wait);
1197	spin_lock_init(&ctx->completion_lock);
1198	INIT_LIST_HEAD(&ctx->iopoll_list);
1199	INIT_LIST_HEAD(&ctx->defer_list);
1200	INIT_LIST_HEAD(&ctx->timeout_list);
1201	spin_lock_init(&ctx->rsrc_ref_lock);
1202	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1203	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1204	init_llist_head(&ctx->rsrc_put_llist);
1205	INIT_LIST_HEAD(&ctx->tctx_list);
1206	INIT_LIST_HEAD(&ctx->submit_state.free_list);
1207	INIT_LIST_HEAD(&ctx->locked_free_list);
1208	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
1209	return ctx;
1210err:
1211	kfree(ctx->dummy_ubuf);
1212	kfree(ctx->cancel_hash);
1213	kfree(ctx);
1214	return NULL;
1215}
1216
1217static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1218{
1219	struct io_rings *r = ctx->rings;
1220
1221	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1222	ctx->cq_extra--;
1223}
1224
1225static bool req_need_defer(struct io_kiocb *req, u32 seq)
1226{
1227	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1228		struct io_ring_ctx *ctx = req->ctx;
1229
1230		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
1231	}
1232
1233	return false;
1234}
1235
1236#define FFS_ASYNC_READ		0x1UL
1237#define FFS_ASYNC_WRITE		0x2UL
1238#ifdef CONFIG_64BIT
1239#define FFS_ISREG		0x4UL
1240#else
1241#define FFS_ISREG		0x0UL
1242#endif
1243#define FFS_MASK		~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1244
1245static inline bool io_req_ffs_set(struct io_kiocb *req)
1246{
1247	return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1248}
1249
1250static void io_req_track_inflight(struct io_kiocb *req)
1251{
1252	if (!(req->flags & REQ_F_INFLIGHT)) {
1253		req->flags |= REQ_F_INFLIGHT;
1254		atomic_inc(&current->io_uring->inflight_tracked);
1255	}
1256}
1257
1258static void io_prep_async_work(struct io_kiocb *req)
1259{
1260	const struct io_op_def *def = &io_op_defs[req->opcode];
1261	struct io_ring_ctx *ctx = req->ctx;
1262
1263	if (!(req->flags & REQ_F_CREDS)) {
1264		req->flags |= REQ_F_CREDS;
1265		req->creds = get_current_cred();
1266	}
1267
1268	req->work.list.next = NULL;
1269	req->work.flags = 0;
1270	if (req->flags & REQ_F_FORCE_ASYNC)
1271		req->work.flags |= IO_WQ_WORK_CONCURRENT;
1272
1273	if (req->flags & REQ_F_ISREG) {
1274		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1275			io_wq_hash_work(&req->work, file_inode(req->file));
1276	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1277		if (def->unbound_nonreg_file)
1278			req->work.flags |= IO_WQ_WORK_UNBOUND;
1279	}
1280
1281	switch (req->opcode) {
1282	case IORING_OP_SPLICE:
1283	case IORING_OP_TEE:
1284		if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1285			req->work.flags |= IO_WQ_WORK_UNBOUND;
1286		break;
1287	}
1288}
1289
1290static void io_prep_async_link(struct io_kiocb *req)
1291{
1292	struct io_kiocb *cur;
1293
1294	if (req->flags & REQ_F_LINK_TIMEOUT) {
1295		struct io_ring_ctx *ctx = req->ctx;
1296
1297		spin_lock_irq(&ctx->completion_lock);
1298		io_for_each_link(cur, req)
1299			io_prep_async_work(cur);
1300		spin_unlock_irq(&ctx->completion_lock);
1301	} else {
1302		io_for_each_link(cur, req)
1303			io_prep_async_work(cur);
1304	}
1305}
1306
1307static void io_queue_async_work(struct io_kiocb *req)
1308{
1309	struct io_ring_ctx *ctx = req->ctx;
1310	struct io_kiocb *link = io_prep_linked_timeout(req);
1311	struct io_uring_task *tctx = req->task->io_uring;
1312
1313	BUG_ON(!tctx);
1314	BUG_ON(!tctx->io_wq);
1315
1316	/* init ->work of the whole link before punting */
1317	io_prep_async_link(req);
1318
1319	/*
1320	 * Not expected to happen, but if we do have a bug where this _can_
1321	 * happen, catch it here and ensure the request is marked as
1322	 * canceled. That will make io-wq go through the usual work cancel
1323	 * procedure rather than attempt to run this request (or create a new
1324	 * worker for it).
1325	 */
1326	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1327		req->work.flags |= IO_WQ_WORK_CANCEL;
1328
1329	trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1330					&req->work, req->flags);
1331	io_wq_enqueue(tctx->io_wq, &req->work);
1332	if (link)
1333		io_queue_linked_timeout(link);
1334}
1335
1336static void io_kill_timeout(struct io_kiocb *req, int status)
1337	__must_hold(&req->ctx->completion_lock)
1338{
1339	struct io_timeout_data *io = req->async_data;
1340
1341	if (hrtimer_try_to_cancel(&io->timer) != -1) {
1342		atomic_set(&req->ctx->cq_timeouts,
1343			atomic_read(&req->ctx->cq_timeouts) + 1);
1344		list_del_init(&req->timeout.list);
1345		io_cqring_fill_event(req->ctx, req->user_data, status, 0);
1346		io_put_req_deferred(req, 1);
1347	}
1348}
1349
1350static void io_queue_deferred(struct io_ring_ctx *ctx)
1351{
1352	while (!list_empty(&ctx->defer_list)) {
1353		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1354						struct io_defer_entry, list);
1355
1356		if (req_need_defer(de->req, de->seq))
1357			break;
1358		list_del_init(&de->list);
1359		io_req_task_queue(de->req);
1360		kfree(de);
1361	}
1362}
1363
1364static void io_flush_timeouts(struct io_ring_ctx *ctx)
1365{
1366	u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1367
1368	while (!list_empty(&ctx->timeout_list)) {
1369		u32 events_needed, events_got;
1370		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1371						struct io_kiocb, timeout.list);
1372
1373		if (io_is_timeout_noseq(req))
1374			break;
1375
1376		/*
1377		 * Since seq can easily wrap around over time, subtract
1378		 * the last seq at which timeouts were flushed before comparing.
1379		 * Assuming not more than 2^31-1 events have happened since,
1380		 * these subtractions won't have wrapped, so we can check if
1381		 * target is in [last_seq, current_seq] by comparing the two.
1382		 */
1383		events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1384		events_got = seq - ctx->cq_last_tm_flush;
1385		if (events_got < events_needed)
1386			break;
1387
1388		list_del_init(&req->timeout.list);
1389		io_kill_timeout(req, 0);
1390	}
1391	ctx->cq_last_tm_flush = seq;
1392}
1393
1394static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
1395{
1396	if (ctx->off_timeout_used)
1397		io_flush_timeouts(ctx);
1398	if (ctx->drain_active)
1399		io_queue_deferred(ctx);
1400}
1401
1402static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1403{
1404	if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1405		__io_commit_cqring_flush(ctx);
1406	/* order cqe stores with ring update */
1407	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1408}
1409
1410static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1411{
1412	struct io_rings *r = ctx->rings;
1413
1414	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
1415}
1416
1417static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1418{
1419	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1420}
1421
1422static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
1423{
1424	struct io_rings *rings = ctx->rings;
1425	unsigned tail, mask = ctx->cq_entries - 1;
1426
1427	/*
1428	 * writes to the cq entry need to come after reading head; the
1429	 * control dependency is enough as we're using WRITE_ONCE to
1430	 * fill the cq entry
1431	 */
1432	if (__io_cqring_events(ctx) == ctx->cq_entries)
1433		return NULL;
1434
1435	tail = ctx->cached_cq_tail++;
1436	return &rings->cqes[tail & mask];
1437}
1438
1439static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1440{
1441	if (likely(!ctx->cq_ev_fd))
1442		return false;
1443	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1444		return false;
1445	return !ctx->eventfd_async || io_wq_current_is_worker();
1446}
1447
1448static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1449{
1450	/*
1451	 * wake_up_all() may seem excessive, but io_wake_function() and
1452	 * io_should_wake() handle the termination of the loop and only
1453	 * wake as many waiters as we need to.
1454	 */
1455	if (wq_has_sleeper(&ctx->cq_wait))
1456		wake_up_all(&ctx->cq_wait);
1457	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1458		wake_up(&ctx->sq_data->wait);
1459	if (io_should_trigger_evfd(ctx))
1460		eventfd_signal(ctx->cq_ev_fd, 1);
1461	if (waitqueue_active(&ctx->poll_wait)) {
1462		wake_up_interruptible(&ctx->poll_wait);
1463		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1464	}
1465}
1466
1467static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1468{
1469	if (ctx->flags & IORING_SETUP_SQPOLL) {
1470		if (wq_has_sleeper(&ctx->cq_wait))
1471			wake_up_all(&ctx->cq_wait);
1472	}
1473	if (io_should_trigger_evfd(ctx))
1474		eventfd_signal(ctx->cq_ev_fd, 1);
1475	if (waitqueue_active(&ctx->poll_wait)) {
1476		wake_up_interruptible(&ctx->poll_wait);
1477		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1478	}
1479}
1480
1481/* Returns true if there are no backlogged entries after the flush */
1482static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1483{
1484	unsigned long flags;
1485	bool all_flushed, posted;
1486
1487	if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
1488		return false;
1489
1490	posted = false;
1491	spin_lock_irqsave(&ctx->completion_lock, flags);
1492	while (!list_empty(&ctx->cq_overflow_list)) {
1493		struct io_uring_cqe *cqe = io_get_cqe(ctx);
1494		struct io_overflow_cqe *ocqe;
1495
1496		if (!cqe && !force)
1497			break;
1498		ocqe = list_first_entry(&ctx->cq_overflow_list,
1499					struct io_overflow_cqe, list);
1500		if (cqe)
1501			memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1502		else
1503			io_account_cq_overflow(ctx);
1504
1505		posted = true;
1506		list_del(&ocqe->list);
1507		kfree(ocqe);
1508	}
1509
1510	all_flushed = list_empty(&ctx->cq_overflow_list);
1511	if (all_flushed) {
1512		clear_bit(0, &ctx->check_cq_overflow);
1513		WRITE_ONCE(ctx->rings->sq_flags,
1514			   ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
1515	}
1516
1517	if (posted)
1518		io_commit_cqring(ctx);
1519	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1520	if (posted)
1521		io_cqring_ev_posted(ctx);
1522	return all_flushed;
1523}
1524
1525static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
1526{
1527	bool ret = true;
1528
1529	if (test_bit(0, &ctx->check_cq_overflow)) {
1530		/* iopoll syncs against uring_lock, not completion_lock */
1531		if (ctx->flags & IORING_SETUP_IOPOLL)
1532			mutex_lock(&ctx->uring_lock);
1533		ret = __io_cqring_overflow_flush(ctx, false);
1534		if (ctx->flags & IORING_SETUP_IOPOLL)
1535			mutex_unlock(&ctx->uring_lock);
1536	}
1537
1538	return ret;
1539}
1540
1541/*
1542 * Shamelessly stolen from the mm implementation of page reference checking,
1543 * see commit f958d7b528b1 for details.
1544 */
1545#define req_ref_zero_or_close_to_overflow(req)	\
1546	((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1547
1548static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1549{
1550	return atomic_inc_not_zero(&req->refs);
1551}
1552
1553static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1554{
1555	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1556	return atomic_sub_and_test(refs, &req->refs);
1557}
1558
1559static inline bool req_ref_put_and_test(struct io_kiocb *req)
1560{
1561	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1562	return atomic_dec_and_test(&req->refs);
1563}
1564
1565static inline void req_ref_put(struct io_kiocb *req)
1566{
1567	WARN_ON_ONCE(req_ref_put_and_test(req));
1568}
1569
1570static inline void req_ref_get(struct io_kiocb *req)
1571{
1572	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1573	atomic_inc(&req->refs);
1574}
1575
1576/* must to be called somewhat shortly after putting a request */
1577static inline void io_put_task(struct task_struct *task, int nr)
1578{
1579	struct io_uring_task *tctx = task->io_uring;
1580
1581	percpu_counter_sub(&tctx->inflight, nr);
1582	if (unlikely(atomic_read(&tctx->in_idle)))
1583		wake_up(&tctx->wait);
1584	put_task_struct_many(task, nr);
1585}
1586
1587static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1588				     long res, unsigned int cflags)
1589{
1590	struct io_overflow_cqe *ocqe;
1591
1592	ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1593	if (!ocqe) {
1594		/*
1595		 * If we're in ring overflow flush mode, or in task cancel mode,
1596		 * or cannot allocate an overflow entry, then we need to drop it
1597		 * on the floor.
1598		 */
1599		io_account_cq_overflow(ctx);
1600		return false;
1601	}
1602	if (list_empty(&ctx->cq_overflow_list)) {
1603		set_bit(0, &ctx->check_cq_overflow);
1604		WRITE_ONCE(ctx->rings->sq_flags,
1605			   ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1606
1607	}
1608	ocqe->cqe.user_data = user_data;
1609	ocqe->cqe.res = res;
1610	ocqe->cqe.flags = cflags;
1611	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1612	return true;
1613}
1614
1615static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1616					  long res, unsigned int cflags)
1617{
1618	struct io_uring_cqe *cqe;
1619
1620	trace_io_uring_complete(ctx, user_data, res, cflags);
1621
1622	/*
1623	 * If we can't get a cq entry, userspace overflowed the
1624	 * submission (by quite a lot). Increment the overflow count in
1625	 * the ring.
1626	 */
1627	cqe = io_get_cqe(ctx);
1628	if (likely(cqe)) {
1629		WRITE_ONCE(cqe->user_data, user_data);
1630		WRITE_ONCE(cqe->res, res);
1631		WRITE_ONCE(cqe->flags, cflags);
1632		return true;
1633	}
1634	return io_cqring_event_overflow(ctx, user_data, res, cflags);
1635}
1636
1637/* not as hot to bloat with inlining */
1638static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1639					  long res, unsigned int cflags)
1640{
1641	return __io_cqring_fill_event(ctx, user_data, res, cflags);
1642}
1643
1644static void io_req_complete_post(struct io_kiocb *req, long res,
1645				 unsigned int cflags)
1646{
1647	struct io_ring_ctx *ctx = req->ctx;
1648	unsigned long flags;
1649
1650	spin_lock_irqsave(&ctx->completion_lock, flags);
1651	__io_cqring_fill_event(ctx, req->user_data, res, cflags);
1652	/*
1653	 * If we're the last reference to this request, add to our locked
1654	 * free_list cache.
1655	 */
1656	if (req_ref_put_and_test(req)) {
1657		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1658			if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
1659				io_disarm_next(req);
1660			if (req->link) {
1661				io_req_task_queue(req->link);
1662				req->link = NULL;
1663			}
1664		}
1665		io_dismantle_req(req);
1666		io_put_task(req->task, 1);
1667		list_add(&req->inflight_entry, &ctx->locked_free_list);
1668		ctx->locked_free_nr++;
1669	} else {
1670		if (!percpu_ref_tryget(&ctx->refs))
1671			req = NULL;
1672	}
1673	io_commit_cqring(ctx);
1674	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1675
1676	if (req) {
1677		io_cqring_ev_posted(ctx);
1678		percpu_ref_put(&ctx->refs);
1679	}
1680}
1681
1682static inline bool io_req_needs_clean(struct io_kiocb *req)
1683{
1684	return req->flags & IO_REQ_CLEAN_FLAGS;
1685}
1686
1687static void io_req_complete_state(struct io_kiocb *req, long res,
1688				  unsigned int cflags)
1689{
1690	if (io_req_needs_clean(req))
1691		io_clean_op(req);
1692	req->result = res;
1693	req->compl.cflags = cflags;
1694	req->flags |= REQ_F_COMPLETE_INLINE;
1695}
1696
1697static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1698				     long res, unsigned cflags)
1699{
1700	if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1701		io_req_complete_state(req, res, cflags);
1702	else
1703		io_req_complete_post(req, res, cflags);
1704}
1705
1706static inline void io_req_complete(struct io_kiocb *req, long res)
1707{
1708	__io_req_complete(req, 0, res, 0);
1709}
1710
1711static void io_req_complete_failed(struct io_kiocb *req, long res)
1712{
1713	req_set_fail(req);
1714	io_put_req(req);
1715	io_req_complete_post(req, res, 0);
1716}
1717
1718/*
1719 * Don't initialise the fields below on every allocation, but do that in
1720 * advance and keep them valid across allocations.
1721 */
1722static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1723{
1724	req->ctx = ctx;
1725	req->link = NULL;
1726	req->async_data = NULL;
1727	/* not necessary, but safer to zero */
1728	req->result = 0;
1729}
1730
1731static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1732					struct io_submit_state *state)
1733{
1734	spin_lock_irq(&ctx->completion_lock);
1735	list_splice_init(&ctx->locked_free_list, &state->free_list);
1736	ctx->locked_free_nr = 0;
1737	spin_unlock_irq(&ctx->completion_lock);
1738}
1739
1740/* Returns true IFF there are requests in the cache */
1741static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
1742{
1743	struct io_submit_state *state = &ctx->submit_state;
1744	int nr;
1745
1746	/*
1747	 * If we have more than a batch's worth of requests in our IRQ side
1748	 * locked cache, grab the lock and move them over to our submission
1749	 * side cache.
1750	 */
1751	if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
1752		io_flush_cached_locked_reqs(ctx, state);
1753
1754	nr = state->free_reqs;
1755	while (!list_empty(&state->free_list)) {
1756		struct io_kiocb *req = list_first_entry(&state->free_list,
1757					struct io_kiocb, inflight_entry);
1758
1759		list_del(&req->inflight_entry);
1760		state->reqs[nr++] = req;
1761		if (nr == ARRAY_SIZE(state->reqs))
1762			break;
1763	}
1764
1765	state->free_reqs = nr;
1766	return nr != 0;
1767}
1768
1769static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
1770{
1771	struct io_submit_state *state = &ctx->submit_state;
1772	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1773	int ret, i;
1774
1775	BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
1776
1777	if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1778		goto got_req;
1779
1780	ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1781				    state->reqs);
1782
1783	/*
1784	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1785	 * retry single alloc to be on the safe side.
1786	 */
1787	if (unlikely(ret <= 0)) {
1788		state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1789		if (!state->reqs[0])
1790			return NULL;
1791		ret = 1;
1792	}
1793
1794	for (i = 0; i < ret; i++)
1795		io_preinit_req(state->reqs[i], ctx);
1796	state->free_reqs = ret;
1797got_req:
1798	state->free_reqs--;
1799	return state->reqs[state->free_reqs];
1800}
1801
1802static inline void io_put_file(struct file *file)
1803{
1804	if (file)
1805		fput(file);
1806}
1807
1808static void io_dismantle_req(struct io_kiocb *req)
1809{
1810	unsigned int flags = req->flags;
1811
1812	if (io_req_needs_clean(req))
1813		io_clean_op(req);
1814	if (!(flags & REQ_F_FIXED_FILE))
1815		io_put_file(req->file);
1816	if (req->fixed_rsrc_refs)
1817		percpu_ref_put(req->fixed_rsrc_refs);
1818	if (req->async_data) {
1819		kfree(req->async_data);
1820		req->async_data = NULL;
1821	}
1822}
1823
1824static void __io_free_req(struct io_kiocb *req)
1825{
1826	struct io_ring_ctx *ctx = req->ctx;
1827	unsigned long flags;
1828
1829	io_dismantle_req(req);
1830	io_put_task(req->task, 1);
1831
1832	spin_lock_irqsave(&ctx->completion_lock, flags);
1833	list_add(&req->inflight_entry, &ctx->locked_free_list);
1834	ctx->locked_free_nr++;
1835	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1836
1837	percpu_ref_put(&ctx->refs);
1838}
1839
1840static inline void io_remove_next_linked(struct io_kiocb *req)
1841{
1842	struct io_kiocb *nxt = req->link;
1843
1844	req->link = nxt->link;
1845	nxt->link = NULL;
1846}
1847
1848static bool io_kill_linked_timeout(struct io_kiocb *req)
1849	__must_hold(&req->ctx->completion_lock)
1850{
1851	struct io_kiocb *link = req->link;
1852
1853	/*
1854	 * Can happen if a linked timeout fired and link had been like
1855	 * req -> link t-out -> link t-out [-> ...]
1856	 */
1857	if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1858		struct io_timeout_data *io = link->async_data;
1859
1860		io_remove_next_linked(req);
1861		link->timeout.head = NULL;
1862		if (hrtimer_try_to_cancel(&io->timer) != -1) {
1863			io_cqring_fill_event(link->ctx, link->user_data,
1864					     -ECANCELED, 0);
1865			io_put_req_deferred(link, 1);
1866			return true;
1867		}
1868	}
1869	return false;
1870}
1871
1872static void io_fail_links(struct io_kiocb *req)
1873	__must_hold(&req->ctx->completion_lock)
1874{
1875	struct io_kiocb *nxt, *link = req->link;
1876
1877	req->link = NULL;
1878	while (link) {
1879		nxt = link->link;
1880		link->link = NULL;
1881
1882		trace_io_uring_fail_link(req, link);
1883		io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
1884		io_put_req_deferred(link, 2);
1885		link = nxt;
1886	}
1887}
1888
1889static bool io_disarm_next(struct io_kiocb *req)
1890	__must_hold(&req->ctx->completion_lock)
1891{
1892	bool posted = false;
1893
1894	if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1895		posted = io_kill_linked_timeout(req);
1896	if (unlikely((req->flags & REQ_F_FAIL) &&
1897		     !(req->flags & REQ_F_HARDLINK))) {
1898		posted |= (req->link != NULL);
1899		io_fail_links(req);
1900	}
1901	return posted;
1902}
1903
1904static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
1905{
1906	struct io_kiocb *nxt;
1907
1908	/*
1909	 * If LINK is set, we have dependent requests in this chain. If we
1910	 * didn't fail this request, queue the first one up, moving any other
1911	 * dependencies to the next request. In case of failure, fail the rest
1912	 * of the chain.
1913	 */
1914	if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
1915		struct io_ring_ctx *ctx = req->ctx;
1916		unsigned long flags;
1917		bool posted;
1918
1919		spin_lock_irqsave(&ctx->completion_lock, flags);
1920		posted = io_disarm_next(req);
1921		if (posted)
1922			io_commit_cqring(req->ctx);
1923		spin_unlock_irqrestore(&ctx->completion_lock, flags);
1924		if (posted)
1925			io_cqring_ev_posted(ctx);
1926	}
1927	nxt = req->link;
1928	req->link = NULL;
1929	return nxt;
1930}
1931
1932static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1933{
1934	if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
1935		return NULL;
1936	return __io_req_find_next(req);
1937}
1938
1939static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1940{
1941	if (!ctx)
1942		return;
1943	if (ctx->submit_state.compl_nr) {
1944		mutex_lock(&ctx->uring_lock);
1945		io_submit_flush_completions(ctx);
1946		mutex_unlock(&ctx->uring_lock);
1947	}
1948	percpu_ref_put(&ctx->refs);
1949}
1950
1951static void tctx_task_work(struct callback_head *cb)
1952{
1953	struct io_ring_ctx *ctx = NULL;
1954	struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1955						  task_work);
1956
1957	while (1) {
1958		struct io_wq_work_node *node;
1959
1960		spin_lock_irq(&tctx->task_lock);
1961		node = tctx->task_list.first;
1962		INIT_WQ_LIST(&tctx->task_list);
1963		if (!node)
1964			tctx->task_running = false;
1965		spin_unlock_irq(&tctx->task_lock);
1966		if (!node)
1967			break;
1968
1969		do {
1970			struct io_wq_work_node *next = node->next;
1971			struct io_kiocb *req = container_of(node, struct io_kiocb,
1972							    io_task_work.node);
1973
1974			if (req->ctx != ctx) {
1975				ctx_flush_and_put(ctx);
1976				ctx = req->ctx;
1977				percpu_ref_get(&ctx->refs);
1978			}
1979			req->io_task_work.func(req);
1980			node = next;
1981		} while (node);
1982
1983		cond_resched();
1984	}
1985
1986	ctx_flush_and_put(ctx);
1987}
1988
1989static void io_req_task_work_add(struct io_kiocb *req)
1990{
1991	struct task_struct *tsk = req->task;
1992	struct io_uring_task *tctx = tsk->io_uring;
1993	enum task_work_notify_mode notify;
1994	struct io_wq_work_node *node;
1995	unsigned long flags;
1996	bool running;
1997
1998	WARN_ON_ONCE(!tctx);
1999
2000	spin_lock_irqsave(&tctx->task_lock, flags);
2001	wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2002	running = tctx->task_running;
2003	if (!running)
2004		tctx->task_running = true;
2005	spin_unlock_irqrestore(&tctx->task_lock, flags);
2006
2007	/* task_work already pending, we're done */
2008	if (running)
2009		return;
2010
2011	/*
2012	 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2013	 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2014	 * processing task_work. There's no reliable way to tell if TWA_RESUME
2015	 * will do the job.
2016	 */
2017	notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
2018	if (!task_work_add(tsk, &tctx->task_work, notify)) {
2019		wake_up_process(tsk);
2020		return;
2021	}
2022
2023	spin_lock_irqsave(&tctx->task_lock, flags);
2024	tctx->task_running = false;
2025	node = tctx->task_list.first;
2026	INIT_WQ_LIST(&tctx->task_list);
2027	spin_unlock_irqrestore(&tctx->task_lock, flags);
2028
2029	while (node) {
2030		req = container_of(node, struct io_kiocb, io_task_work.node);
2031		node = node->next;
2032		if (llist_add(&req->io_task_work.fallback_node,
2033			      &req->ctx->fallback_llist))
2034			schedule_delayed_work(&req->ctx->fallback_work, 1);
2035	}
2036}
2037
2038static void io_req_task_cancel(struct io_kiocb *req)
2039{
2040	struct io_ring_ctx *ctx = req->ctx;
2041
2042	/* ctx is guaranteed to stay alive while we hold uring_lock */
2043	mutex_lock(&ctx->uring_lock);
2044	io_req_complete_failed(req, req->result);
2045	mutex_unlock(&ctx->uring_lock);
2046}
2047
2048static void io_req_task_submit(struct io_kiocb *req)
2049{
2050	struct io_ring_ctx *ctx = req->ctx;
2051
2052	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
2053	mutex_lock(&ctx->uring_lock);
2054	if (likely(!(req->task->flags & PF_EXITING)))
2055		__io_queue_sqe(req);
2056	else
2057		io_req_complete_failed(req, -EFAULT);
2058	mutex_unlock(&ctx->uring_lock);
2059}
2060
2061static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2062{
2063	req->result = ret;
2064	req->io_task_work.func = io_req_task_cancel;
2065	io_req_task_work_add(req);
2066}
2067
2068static void io_req_task_queue(struct io_kiocb *req)
2069{
2070	req->io_task_work.func = io_req_task_submit;
2071	io_req_task_work_add(req);
2072}
2073
2074static void io_req_task_queue_reissue(struct io_kiocb *req)
2075{
2076	req->io_task_work.func = io_queue_async_work;
2077	io_req_task_work_add(req);
2078}
2079
2080static inline void io_queue_next(struct io_kiocb *req)
2081{
2082	struct io_kiocb *nxt = io_req_find_next(req);
2083
2084	if (nxt)
2085		io_req_task_queue(nxt);
2086}
2087
2088static void io_free_req(struct io_kiocb *req)
2089{
2090	io_queue_next(req);
2091	__io_free_req(req);
2092}
2093
2094struct req_batch {
2095	struct task_struct	*task;
2096	int			task_refs;
2097	int			ctx_refs;
2098};
2099
2100static inline void io_init_req_batch(struct req_batch *rb)
2101{
2102	rb->task_refs = 0;
2103	rb->ctx_refs = 0;
2104	rb->task = NULL;
2105}
2106
2107static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2108				     struct req_batch *rb)
2109{
2110	if (rb->ctx_refs)
2111		percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2112	if (rb->task == current)
2113		current->io_uring->cached_refs += rb->task_refs;
2114	else if (rb->task)
2115		io_put_task(rb->task, rb->task_refs);
2116}
2117
2118static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2119			      struct io_submit_state *state)
2120{
2121	io_queue_next(req);
2122	io_dismantle_req(req);
2123
2124	if (req->task != rb->task) {
2125		if (rb->task)
2126			io_put_task(rb->task, rb->task_refs);
2127		rb->task = req->task;
2128		rb->task_refs = 0;
2129	}
2130	rb->task_refs++;
2131	rb->ctx_refs++;
2132
2133	if (state->free_reqs != ARRAY_SIZE(state->reqs))
2134		state->reqs[state->free_reqs++] = req;
2135	else
2136		list_add(&req->inflight_entry, &state->free_list);
2137}
2138
2139static void io_submit_flush_completions(struct io_ring_ctx *ctx)
2140	__must_hold(&req->ctx->uring_lock)
2141{
2142	struct io_submit_state *state = &ctx->submit_state;
2143	int i, nr = state->compl_nr;
2144	struct req_batch rb;
2145
2146	spin_lock_irq(&ctx->completion_lock);
2147	for (i = 0; i < nr; i++) {
2148		struct io_kiocb *req = state->compl_reqs[i];
2149
2150		__io_cqring_fill_event(ctx, req->user_data, req->result,
2151					req->compl.cflags);
2152	}
2153	io_commit_cqring(ctx);
2154	spin_unlock_irq(&ctx->completion_lock);
2155	io_cqring_ev_posted(ctx);
2156
2157	io_init_req_batch(&rb);
2158	for (i = 0; i < nr; i++) {
2159		struct io_kiocb *req = state->compl_reqs[i];
2160
2161		/* submission and completion refs */
2162		if (req_ref_sub_and_test(req, 2))
2163			io_req_free_batch(&rb, req, &ctx->submit_state);
2164	}
2165
2166	io_req_free_batch_finish(ctx, &rb);
2167	state->compl_nr = 0;
2168}
2169
2170/*
2171 * Drop reference to request, return next in chain (if there is one) if this
2172 * was the last reference to this request.
2173 */
2174static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2175{
2176	struct io_kiocb *nxt = NULL;
2177
2178	if (req_ref_put_and_test(req)) {
2179		nxt = io_req_find_next(req);
2180		__io_free_req(req);
2181	}
2182	return nxt;
2183}
2184
2185static inline void io_put_req(struct io_kiocb *req)
2186{
2187	if (req_ref_put_and_test(req))
2188		io_free_req(req);
2189}
2190
2191static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2192{
2193	if (req_ref_sub_and_test(req, refs)) {
2194		req->io_task_work.func = io_free_req;
2195		io_req_task_work_add(req);
2196	}
2197}
2198
2199static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2200{
2201	/* See comment at the top of this file */
2202	smp_rmb();
2203	return __io_cqring_events(ctx);
2204}
2205
2206static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2207{
2208	struct io_rings *rings = ctx->rings;
2209
2210	/* make sure SQ entry isn't read before tail */
2211	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2212}
2213
2214static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2215{
2216	unsigned int cflags;
2217
2218	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2219	cflags |= IORING_CQE_F_BUFFER;
2220	req->flags &= ~REQ_F_BUFFER_SELECTED;
2221	kfree(kbuf);
2222	return cflags;
2223}
2224
2225static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2226{
2227	struct io_buffer *kbuf;
2228
2229	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2230	return io_put_kbuf(req, kbuf);
2231}
2232
2233static inline bool io_run_task_work(void)
2234{
2235	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
2236		__set_current_state(TASK_RUNNING);
2237		tracehook_notify_signal();
2238		return true;
2239	}
2240
2241	return false;
2242}
2243
2244/*
2245 * Find and free completed poll iocbs
2246 */
2247static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2248			       struct list_head *done, bool resubmit)
2249{
2250	struct req_batch rb;
2251	struct io_kiocb *req;
2252
2253	/* order with ->result store in io_complete_rw_iopoll() */
2254	smp_rmb();
2255
2256	io_init_req_batch(&rb);
2257	while (!list_empty(done)) {
2258		int cflags = 0;
2259
2260		req = list_first_entry(done, struct io_kiocb, inflight_entry);
2261		list_del(&req->inflight_entry);
2262
2263		if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
2264		    !(req->flags & REQ_F_DONT_REISSUE)) {
2265			req->iopoll_completed = 0;
2266			req_ref_get(req);
2267			io_req_task_queue_reissue(req);
2268			continue;
2269		}
2270
2271		if (req->flags & REQ_F_BUFFER_SELECTED)
2272			cflags = io_put_rw_kbuf(req);
2273
2274		__io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
2275		(*nr_events)++;
2276
2277		if (req_ref_put_and_test(req))
2278			io_req_free_batch(&rb, req, &ctx->submit_state);
2279	}
2280
2281	io_commit_cqring(ctx);
2282	io_cqring_ev_posted_iopoll(ctx);
2283	io_req_free_batch_finish(ctx, &rb);
2284}
2285
2286static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2287			long min, bool resubmit)
2288{
2289	struct io_kiocb *req, *tmp;
2290	LIST_HEAD(done);
2291	bool spin;
2292
2293	/*
2294	 * Only spin for completions if we don't have multiple devices hanging
2295	 * off our complete list, and we're under the requested amount.
2296	 */
2297	spin = !ctx->poll_multi_queue && *nr_events < min;
2298
2299	list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2300		struct kiocb *kiocb = &req->rw.kiocb;
2301		int ret;
2302
2303		/*
2304		 * Move completed and retryable entries to our local lists.
2305		 * If we find a request that requires polling, break out
2306		 * and complete those lists first, if we have entries there.
2307		 */
2308		if (READ_ONCE(req->iopoll_completed)) {
2309			list_move_tail(&req->inflight_entry, &done);
2310			continue;
2311		}
2312		if (!list_empty(&done))
2313			break;
2314
2315		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2316		if (unlikely(ret < 0))
2317			return ret;
2318		else if (ret)
2319			spin = false;
2320
2321		/* iopoll may have completed current req */
2322		if (READ_ONCE(req->iopoll_completed))
2323			list_move_tail(&req->inflight_entry, &done);
2324	}
2325
2326	if (!list_empty(&done))
2327		io_iopoll_complete(ctx, nr_events, &done, resubmit);
2328
2329	return 0;
2330}
2331
2332/*
2333 * We can't just wait for polled events to come to us, we have to actively
2334 * find and complete them.
2335 */
2336static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2337{
2338	if (!(ctx->flags & IORING_SETUP_IOPOLL))
2339		return;
2340
2341	mutex_lock(&ctx->uring_lock);
2342	while (!list_empty(&ctx->iopoll_list)) {
2343		unsigned int nr_events = 0;
2344
2345		io_do_iopoll(ctx, &nr_events, 0, false);
2346
2347		/* let it sleep and repeat later if can't complete a request */
2348		if (nr_events == 0)
2349			break;
2350		/*
2351		 * Ensure we allow local-to-the-cpu processing to take place,
2352		 * in this case we need to ensure that we reap all events.
2353		 * Also let task_work, etc. to progress by releasing the mutex
2354		 */
2355		if (need_resched()) {
2356			mutex_unlock(&ctx->uring_lock);
2357			cond_resched();
2358			mutex_lock(&ctx->uring_lock);
2359		}
2360	}
2361	mutex_unlock(&ctx->uring_lock);
2362}
2363
2364static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2365{
2366	unsigned int nr_events = 0;
2367	int ret = 0;
2368
2369	/*
2370	 * We disallow the app entering submit/complete with polling, but we
2371	 * still need to lock the ring to prevent racing with polled issue
2372	 * that got punted to a workqueue.
2373	 */
2374	mutex_lock(&ctx->uring_lock);
2375	/*
2376	 * Don't enter poll loop if we already have events pending.
2377	 * If we do, we can potentially be spinning for commands that
2378	 * already triggered a CQE (eg in error).
2379	 */
2380	if (test_bit(0, &ctx->check_cq_overflow))
2381		__io_cqring_overflow_flush(ctx, false);
2382	if (io_cqring_events(ctx))
2383		goto out;
2384	do {
2385		/*
2386		 * If a submit got punted to a workqueue, we can have the
2387		 * application entering polling for a command before it gets
2388		 * issued. That app will hold the uring_lock for the duration
2389		 * of the poll right here, so we need to take a breather every
2390		 * now and then to ensure that the issue has a chance to add
2391		 * the poll to the issued list. Otherwise we can spin here
2392		 * forever, while the workqueue is stuck trying to acquire the
2393		 * very same mutex.
2394		 */
2395		if (list_empty(&ctx->iopoll_list)) {
2396			u32 tail = ctx->cached_cq_tail;
2397
2398			mutex_unlock(&ctx->uring_lock);
2399			io_run_task_work();
2400			mutex_lock(&ctx->uring_lock);
2401
2402			/* some requests don't go through iopoll_list */
2403			if (tail != ctx->cached_cq_tail ||
2404			    list_empty(&ctx->iopoll_list))
2405				break;
2406		}
2407		ret = io_do_iopoll(ctx, &nr_events, min, true);
2408	} while (!ret && nr_events < min && !need_resched());
2409out:
2410	mutex_unlock(&ctx->uring_lock);
2411	return ret;
2412}
2413
2414static void kiocb_end_write(struct io_kiocb *req)
2415{
2416	/*
2417	 * Tell lockdep we inherited freeze protection from submission
2418	 * thread.
2419	 */
2420	if (req->flags & REQ_F_ISREG) {
2421		struct super_block *sb = file_inode(req->file)->i_sb;
2422
2423		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
2424		sb_end_write(sb);
2425	}
2426}
2427
2428#ifdef CONFIG_BLOCK
2429static bool io_resubmit_prep(struct io_kiocb *req)
2430{
2431	struct io_async_rw *rw = req->async_data;
2432
2433	if (!rw)
2434		return !io_req_prep_async(req);
2435	/* may have left rw->iter inconsistent on -EIOCBQUEUED */
2436	iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2437	return true;
2438}
2439
2440static bool io_rw_should_reissue(struct io_kiocb *req)
2441{
2442	umode_t mode = file_inode(req->file)->i_mode;
2443	struct io_ring_ctx *ctx = req->ctx;
2444
2445	if (!S_ISBLK(mode) && !S_ISREG(mode))
2446		return false;
2447	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2448	    !(ctx->flags & IORING_SETUP_IOPOLL)))
2449		return false;
2450	/*
2451	 * If ref is dying, we might be running poll reap from the exit work.
2452	 * Don't attempt to reissue from that path, just let it fail with
2453	 * -EAGAIN.
2454	 */
2455	if (percpu_ref_is_dying(&ctx->refs))
2456		return false;
2457	/*
2458	 * Play it safe and assume not safe to re-import and reissue if we're
2459	 * not in the original thread group (or in task context).
2460	 */
2461	if (!same_thread_group(req->task, current) || !in_task())
2462		return false;
2463	return true;
2464}
2465#else
2466static bool io_resubmit_prep(struct io_kiocb *req)
2467{
2468	return false;
2469}
2470static bool io_rw_should_reissue(struct io_kiocb *req)
2471{
2472	return false;
2473}
2474#endif
2475
2476static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2477			     unsigned int issue_flags)
2478{
2479	int cflags = 0;
2480
2481	if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2482		kiocb_end_write(req);
2483	if (res != req->result) {
2484		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2485		    io_rw_should_reissue(req)) {
2486			req->flags |= REQ_F_REISSUE;
2487			return;
2488		}
2489		req_set_fail(req);
2490	}
2491	if (req->flags & REQ_F_BUFFER_SELECTED)
2492		cflags = io_put_rw_kbuf(req);
2493	__io_req_complete(req, issue_flags, res, cflags);
2494}
2495
2496static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2497{
2498	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2499
2500	__io_complete_rw(req, res, res2, 0);
2501}
2502
2503static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2504{
2505	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2506
2507	if (kiocb->ki_flags & IOCB_WRITE)
2508		kiocb_end_write(req);
2509	if (unlikely(res != req->result)) {
2510		if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2511		    io_resubmit_prep(req))) {
2512			req_set_fail(req);
2513			req->flags |= REQ_F_DONT_REISSUE;
2514		}
2515	}
2516
2517	WRITE_ONCE(req->result, res);
2518	/* order with io_iopoll_complete() checking ->result */
2519	smp_wmb();
2520	WRITE_ONCE(req->iopoll_completed, 1);
2521}
2522
2523/*
2524 * After the iocb has been issued, it's safe to be found on the poll list.
2525 * Adding the kiocb to the list AFTER submission ensures that we don't
2526 * find it from a io_do_iopoll() thread before the issuer is done
2527 * accessing the kiocb cookie.
2528 */
2529static void io_iopoll_req_issued(struct io_kiocb *req)
2530{
2531	struct io_ring_ctx *ctx = req->ctx;
2532	const bool in_async = io_wq_current_is_worker();
2533
2534	/* workqueue context doesn't hold uring_lock, grab it now */
2535	if (unlikely(in_async))
2536		mutex_lock(&ctx->uring_lock);
2537
2538	/*
2539	 * Track whether we have multiple files in our lists. This will impact
2540	 * how we do polling eventually, not spinning if we're on potentially
2541	 * different devices.
2542	 */
2543	if (list_empty(&ctx->iopoll_list)) {
2544		ctx->poll_multi_queue = false;
2545	} else if (!ctx->poll_multi_queue) {
2546		struct io_kiocb *list_req;
2547		unsigned int queue_num0, queue_num1;
2548
2549		list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2550						inflight_entry);
2551
2552		if (list_req->file != req->file) {
2553			ctx->poll_multi_queue = true;
2554		} else {
2555			queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2556			queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2557			if (queue_num0 != queue_num1)
2558				ctx->poll_multi_queue = true;
2559		}
2560	}
2561
2562	/*
2563	 * For fast devices, IO may have already completed. If it has, add
2564	 * it to the front so we find it first.
2565	 */
2566	if (READ_ONCE(req->iopoll_completed))
2567		list_add(&req->inflight_entry, &ctx->iopoll_list);
2568	else
2569		list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2570
2571	if (unlikely(in_async)) {
2572		/*
2573		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2574		 * in sq thread task context or in io worker task context. If
2575		 * current task context is sq thread, we don't need to check
2576		 * whether should wake up sq thread.
2577		 */
2578		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2579		    wq_has_sleeper(&ctx->sq_data->wait))
2580			wake_up(&ctx->sq_data->wait);
2581
2582		mutex_unlock(&ctx->uring_lock);
2583	}
2584}
2585
2586static inline void io_state_file_put(struct io_submit_state *state)
2587{
2588	if (state->file_refs) {
2589		fput_many(state->file, state->file_refs);
2590		state->file_refs = 0;
2591	}
2592}
2593
2594/*
2595 * Get as many references to a file as we have IOs left in this submission,
2596 * assuming most submissions are for one file, or at least that each file
2597 * has more than one submission.
2598 */
2599static struct file *__io_file_get(struct io_submit_state *state, int fd)
2600{
2601	if (!state)
2602		return fget(fd);
2603
2604	if (state->file_refs) {
2605		if (state->fd == fd) {
2606			state->file_refs--;
2607			return state->file;
2608		}
2609		io_state_file_put(state);
2610	}
2611	state->file = fget_many(fd, state->ios_left);
2612	if (unlikely(!state->file))
2613		return NULL;
2614
2615	state->fd = fd;
2616	state->file_refs = state->ios_left - 1;
2617	return state->file;
2618}
2619
2620static bool io_bdev_nowait(struct block_device *bdev)
2621{
2622	return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2623}
2624
2625/*
2626 * If we tracked the file through the SCM inflight mechanism, we could support
2627 * any file. For now, just ensure that anything potentially problematic is done
2628 * inline.
2629 */
2630static bool __io_file_supports_nowait(struct file *file, int rw)
2631{
2632	umode_t mode = file_inode(file)->i_mode;
2633
2634	if (S_ISBLK(mode)) {
2635		if (IS_ENABLED(CONFIG_BLOCK) &&
2636		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
2637			return true;
2638		return false;
2639	}
2640	if (S_ISSOCK(mode))
2641		return true;
2642	if (S_ISREG(mode)) {
2643		if (IS_ENABLED(CONFIG_BLOCK) &&
2644		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2645		    file->f_op != &io_uring_fops)
2646			return true;
2647		return false;
2648	}
2649
2650	/* any ->read/write should understand O_NONBLOCK */
2651	if (file->f_flags & O_NONBLOCK)
2652		return true;
2653
2654	if (!(file->f_mode & FMODE_NOWAIT))
2655		return false;
2656
2657	if (rw == READ)
2658		return file->f_op->read_iter != NULL;
2659
2660	return file->f_op->write_iter != NULL;
2661}
2662
2663static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
2664{
2665	if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
2666		return true;
2667	else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
2668		return true;
2669
2670	return __io_file_supports_nowait(req->file, rw);
2671}
2672
2673static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2674{
2675	struct io_ring_ctx *ctx = req->ctx;
2676	struct kiocb *kiocb = &req->rw.kiocb;
2677	struct file *file = req->file;
2678	unsigned ioprio;
2679	int ret;
2680
2681	if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
2682		req->flags |= REQ_F_ISREG;
2683
2684	kiocb->ki_pos = READ_ONCE(sqe->off);
2685	if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
2686		req->flags |= REQ_F_CUR_POS;
2687		kiocb->ki_pos = file->f_pos;
2688	}
2689	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2690	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2691	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2692	if (unlikely(ret))
2693		return ret;
2694
2695	/* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2696	if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2697		req->flags |= REQ_F_NOWAIT;
2698
2699	ioprio = READ_ONCE(sqe->ioprio);
2700	if (ioprio) {
2701		ret = ioprio_check_cap(ioprio);
2702		if (ret)
2703			return ret;
2704
2705		kiocb->ki_ioprio = ioprio;
2706	} else
2707		kiocb->ki_ioprio = get_current_ioprio();
2708
2709	if (ctx->flags & IORING_SETUP_IOPOLL) {
2710		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2711		    !kiocb->ki_filp->f_op->iopoll)
2712			return -EOPNOTSUPP;
2713
2714		kiocb->ki_flags |= IOCB_HIPRI;
2715		kiocb->ki_complete = io_complete_rw_iopoll;
2716		req->iopoll_completed = 0;
2717	} else {
2718		if (kiocb->ki_flags & IOCB_HIPRI)
2719			return -EINVAL;
2720		kiocb->ki_complete = io_complete_rw;
2721	}
2722
2723	if (req->opcode == IORING_OP_READ_FIXED ||
2724	    req->opcode == IORING_OP_WRITE_FIXED) {
2725		req->imu = NULL;
2726		io_req_set_rsrc_node(req);
2727	}
2728
2729	req->rw.addr = READ_ONCE(sqe->addr);
2730	req->rw.len = READ_ONCE(sqe->len);
2731	req->buf_index = READ_ONCE(sqe->buf_index);
2732	return 0;
2733}
2734
2735static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2736{
2737	switch (ret) {
2738	case -EIOCBQUEUED:
2739		break;
2740	case -ERESTARTSYS:
2741	case -ERESTARTNOINTR:
2742	case -ERESTARTNOHAND:
2743	case -ERESTART_RESTARTBLOCK:
2744		/*
2745		 * We can't just restart the syscall, since previously
2746		 * submitted sqes may already be in progress. Just fail this
2747		 * IO with EINTR.
2748		 */
2749		ret = -EINTR;
2750		fallthrough;
2751	default:
2752		kiocb->ki_complete(kiocb, ret, 0);
2753	}
2754}
2755
2756static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2757		       unsigned int issue_flags)
2758{
2759	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2760	struct io_async_rw *io = req->async_data;
2761	bool check_reissue = kiocb->ki_complete == io_complete_rw;
2762
2763	/* add previously done IO, if any */
2764	if (io && io->bytes_done > 0) {
2765		if (ret < 0)
2766			ret = io->bytes_done;
2767		else
2768			ret += io->bytes_done;
2769	}
2770
2771	if (req->flags & REQ_F_CUR_POS)
2772		req->file->f_pos = kiocb->ki_pos;
2773	if (ret >= 0 && check_reissue)
2774		__io_complete_rw(req, ret, 0, issue_flags);
2775	else
2776		io_rw_done(kiocb, ret);
2777
2778	if (check_reissue && (req->flags & REQ_F_REISSUE)) {
2779		req->flags &= ~REQ_F_REISSUE;
2780		if (io_resubmit_prep(req)) {
2781			req_ref_get(req);
2782			io_req_task_queue_reissue(req);
2783		} else {
2784			int cflags = 0;
2785
2786			req_set_fail(req);
2787			if (req->flags & REQ_F_BUFFER_SELECTED)
2788				cflags = io_put_rw_kbuf(req);
2789			__io_req_complete(req, issue_flags, ret, cflags);
2790		}
2791	}
2792}
2793
2794static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2795			     struct io_mapped_ubuf *imu)
2796{
2797	size_t len = req->rw.len;
2798	u64 buf_end, buf_addr = req->rw.addr;
2799	size_t offset;
2800
2801	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
2802		return -EFAULT;
2803	/* not inside the mapped region */
2804	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
2805		return -EFAULT;
2806
2807	/*
2808	 * May not be a start of buffer, set size appropriately
2809	 * and advance us to the beginning.
2810	 */
2811	offset = buf_addr - imu->ubuf;
2812	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2813
2814	if (offset) {
2815		/*
2816		 * Don't use iov_iter_advance() here, as it's really slow for
2817		 * using the latter parts of a big fixed buffer - it iterates
2818		 * over each segment manually. We can cheat a bit here, because
2819		 * we know that:
2820		 *
2821		 * 1) it's a BVEC iter, we set it up
2822		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2823		 *    first and last bvec
2824		 *
2825		 * So just find our index, and adjust the iterator afterwards.
2826		 * If the offset is within the first bvec (or the whole first
2827		 * bvec, just use iov_iter_advance(). This makes it easier
2828		 * since we can just skip the first segment, which may not
2829		 * be PAGE_SIZE aligned.
2830		 */
2831		const struct bio_vec *bvec = imu->bvec;
2832
2833		if (offset <= bvec->bv_len) {
2834			iov_iter_advance(iter, offset);
2835		} else {
2836			unsigned long seg_skip;
2837
2838			/* skip first vec */
2839			offset -= bvec->bv_len;
2840			seg_skip = 1 + (offset >> PAGE_SHIFT);
2841
2842			iter->bvec = bvec + seg_skip;
2843			iter->nr_segs -= seg_skip;
2844			iter->count -= bvec->bv_len + offset;
2845			iter->iov_offset = offset & ~PAGE_MASK;
2846		}
2847	}
2848
2849	return 0;
2850}
2851
2852static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2853{
2854	struct io_ring_ctx *ctx = req->ctx;
2855	struct io_mapped_ubuf *imu = req->imu;
2856	u16 index, buf_index = req->buf_index;
2857
2858	if (likely(!imu)) {
2859		if (unlikely(buf_index >= ctx->nr_user_bufs))
2860			return -EFAULT;
2861		index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2862		imu = READ_ONCE(ctx->user_bufs[index]);
2863		req->imu = imu;
2864	}
2865	return __io_import_fixed(req, rw, iter, imu);
2866}
2867
2868static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2869{
2870	if (needs_lock)
2871		mutex_unlock(&ctx->uring_lock);
2872}
2873
2874static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2875{
2876	/*
2877	 * "Normal" inline submissions always hold the uring_lock, since we
2878	 * grab it from the system call. Same is true for the SQPOLL offload.
2879	 * The only exception is when we've detached the request and issue it
2880	 * from an async worker thread, grab the lock for that case.
2881	 */
2882	if (needs_lock)
2883		mutex_lock(&ctx->uring_lock);
2884}
2885
2886static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2887					  int bgid, struct io_buffer *kbuf,
2888					  bool needs_lock)
2889{
2890	struct io_buffer *head;
2891
2892	if (req->flags & REQ_F_BUFFER_SELECTED)
2893		return kbuf;
2894
2895	io_ring_submit_lock(req->ctx, needs_lock);
2896
2897	lockdep_assert_held(&req->ctx->uring_lock);
2898
2899	head = xa_load(&req->ctx->io_buffers, bgid);
2900	if (head) {
2901		if (!list_empty(&head->list)) {
2902			kbuf = list_last_entry(&head->list, struct io_buffer,
2903							list);
2904			list_del(&kbuf->list);
2905		} else {
2906			kbuf = head;
2907			xa_erase(&req->ctx->io_buffers, bgid);
2908		}
2909		if (*len > kbuf->len)
2910			*len = kbuf->len;
2911	} else {
2912		kbuf = ERR_PTR(-ENOBUFS);
2913	}
2914
2915	io_ring_submit_unlock(req->ctx, needs_lock);
2916
2917	return kbuf;
2918}
2919
2920static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2921					bool needs_lock)
2922{
2923	struct io_buffer *kbuf;
2924	u16 bgid;
2925
2926	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2927	bgid = req->buf_index;
2928	kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2929	if (IS_ERR(kbuf))
2930		return kbuf;
2931	req->rw.addr = (u64) (unsigned long) kbuf;
2932	req->flags |= REQ_F_BUFFER_SELECTED;
2933	return u64_to_user_ptr(kbuf->addr);
2934}
2935
2936#ifdef CONFIG_COMPAT
2937static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2938				bool needs_lock)
2939{
2940	struct compat_iovec __user *uiov;
2941	compat_ssize_t clen;
2942	void __user *buf;
2943	ssize_t len;
2944
2945	uiov = u64_to_user_ptr(req->rw.addr);
2946	if (!access_ok(uiov, sizeof(*uiov)))
2947		return -EFAULT;
2948	if (__get_user(clen, &uiov->iov_len))
2949		return -EFAULT;
2950	if (clen < 0)
2951		return -EINVAL;
2952
2953	len = clen;
2954	buf = io_rw_buffer_select(req, &len, needs_lock);
2955	if (IS_ERR(buf))
2956		return PTR_ERR(buf);
2957	iov[0].iov_base = buf;
2958	iov[0].iov_len = (compat_size_t) len;
2959	return 0;
2960}
2961#endif
2962
2963static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2964				      bool needs_lock)
2965{
2966	struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2967	void __user *buf;
2968	ssize_t len;
2969
2970	if (copy_from_user(iov, uiov, sizeof(*uiov)))
2971		return -EFAULT;
2972
2973	len = iov[0].iov_len;
2974	if (len < 0)
2975		return -EINVAL;
2976	buf = io_rw_buffer_select(req, &len, needs_lock);
2977	if (IS_ERR(buf))
2978		return PTR_ERR(buf);
2979	iov[0].iov_base = buf;
2980	iov[0].iov_len = len;
2981	return 0;
2982}
2983
2984static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2985				    bool needs_lock)
2986{
2987	if (req->flags & REQ_F_BUFFER_SELECTED) {
2988		struct io_buffer *kbuf;
2989
2990		kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2991		iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2992		iov[0].iov_len = kbuf->len;
2993		return 0;
2994	}
2995	if (req->rw.len != 1)
2996		return -EINVAL;
2997
2998#ifdef CONFIG_COMPAT
2999	if (req->ctx->compat)
3000		return io_compat_import(req, iov, needs_lock);
3001#endif
3002
3003	return __io_iov_buffer_select(req, iov, needs_lock);
3004}
3005
3006static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3007			   struct iov_iter *iter, bool needs_lock)
3008{
3009	void __user *buf = u64_to_user_ptr(req->rw.addr);
3010	size_t sqe_len = req->rw.len;
3011	u8 opcode = req->opcode;
3012	ssize_t ret;
3013
3014	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3015		*iovec = NULL;
3016		return io_import_fixed(req, rw, iter);
3017	}
3018
3019	/* buffer index only valid with fixed read/write, or buffer select  */
3020	if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3021		return -EINVAL;
3022
3023	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3024		if (req->flags & REQ_F_BUFFER_SELECT) {
3025			buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3026			if (IS_ERR(buf))
3027				return PTR_ERR(buf);
3028			req->rw.len = sqe_len;
3029		}
3030
3031		ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3032		*iovec = NULL;
3033		return ret;
3034	}
3035
3036	if (req->flags & REQ_F_BUFFER_SELECT) {
3037		ret = io_iov_buffer_select(req, *iovec, needs_lock);
3038		if (!ret)
3039			iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
3040		*iovec = NULL;
3041		return ret;
3042	}
3043
3044	return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3045			      req->ctx->compat);
3046}
3047
3048static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3049{
3050	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3051}
3052
3053/*
3054 * For files that don't have ->read_iter() and ->write_iter(), handle them
3055 * by looping over ->read() or ->write() manually.
3056 */
3057static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3058{
3059	struct kiocb *kiocb = &req->rw.kiocb;
3060	struct file *file = req->file;
3061	ssize_t ret = 0;
3062
3063	/*
3064	 * Don't support polled IO through this interface, and we can't
3065	 * support non-blocking either. For the latter, this just causes
3066	 * the kiocb to be handled from an async context.
3067	 */
3068	if (kiocb->ki_flags & IOCB_HIPRI)
3069		return -EOPNOTSUPP;
3070	if (kiocb->ki_flags & IOCB_NOWAIT)
3071		return -EAGAIN;
3072
3073	while (iov_iter_count(iter)) {
3074		struct iovec iovec;
3075		ssize_t nr;
3076
3077		if (!iov_iter_is_bvec(iter)) {
3078			iovec = iov_iter_iovec(iter);
3079		} else {
3080			iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3081			iovec.iov_len = req->rw.len;
3082		}
3083
3084		if (rw == READ) {
3085			nr = file->f_op->read(file, iovec.iov_base,
3086					      iovec.iov_len, io_kiocb_ppos(kiocb));
3087		} else {
3088			nr = file->f_op->write(file, iovec.iov_base,
3089					       iovec.iov_len, io_kiocb_ppos(kiocb));
3090		}
3091
3092		if (nr < 0) {
3093			if (!ret)
3094				ret = nr;
3095			break;
3096		}
3097		ret += nr;
3098		if (nr != iovec.iov_len)
3099			break;
3100		req->rw.len -= nr;
3101		req->rw.addr += nr;
3102		iov_iter_advance(iter, nr);
3103	}
3104
3105	return ret;
3106}
3107
3108static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3109			  const struct iovec *fast_iov, struct iov_iter *iter)
3110{
3111	struct io_async_rw *rw = req->async_data;
3112
3113	memcpy(&rw->iter, iter, sizeof(*iter));
3114	rw->free_iovec = iovec;
3115	rw->bytes_done = 0;
3116	/* can only be fixed buffers, no need to do anything */
3117	if (iov_iter_is_bvec(iter))
3118		return;
3119	if (!iovec) {
3120		unsigned iov_off = 0;
3121
3122		rw->iter.iov = rw->fast_iov;
3123		if (iter->iov != fast_iov) {
3124			iov_off = iter->iov - fast_iov;
3125			rw->iter.iov += iov_off;
3126		}
3127		if (rw->fast_iov != fast_iov)
3128			memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3129			       sizeof(struct iovec) * iter->nr_segs);
3130	} else {
3131		req->flags |= REQ_F_NEED_CLEANUP;
3132	}
3133}
3134
3135static inline int io_alloc_async_data(struct io_kiocb *req)
3136{
3137	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3138	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3139	return req->async_data == NULL;
3140}
3141
3142static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3143			     const struct iovec *fast_iov,
3144			     struct iov_iter *iter, bool force)
3145{
3146	if (!force && !io_op_defs[req->opcode].needs_async_setup)
3147		return 0;
3148	if (!req->async_data) {
3149		if (io_alloc_async_data(req)) {
3150			kfree(iovec);
3151			return -ENOMEM;
3152		}
3153
3154		io_req_map_rw(req, iovec, fast_iov, iter);
3155	}
3156	return 0;
3157}
3158
3159static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3160{
3161	struct io_async_rw *iorw = req->async_data;
3162	struct iovec *iov = iorw->fast_iov;
3163	int ret;
3164
3165	ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3166	if (unlikely(ret < 0))
3167		return ret;
3168
3169	iorw->bytes_done = 0;
3170	iorw->free_iovec = iov;
3171	if (iov)
3172		req->flags |= REQ_F_NEED_CLEANUP;
3173	return 0;
3174}
3175
3176static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3177{
3178	if (unlikely(!(req->file->f_mode & FMODE_READ)))
3179		return -EBADF;
3180	return io_prep_rw(req, sqe);
3181}
3182
3183/*
3184 * This is our waitqueue callback handler, registered through lock_page_async()
3185 * when we initially tried to do the IO with the iocb armed our waitqueue.
3186 * This gets called when the page is unlocked, and we generally expect that to
3187 * happen when the page IO is completed and the page is now uptodate. This will
3188 * queue a task_work based retry of the operation, attempting to copy the data
3189 * again. If the latter fails because the page was NOT uptodate, then we will
3190 * do a thread based blocking retry of the operation. That's the unexpected
3191 * slow path.
3192 */
3193static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3194			     int sync, void *arg)
3195{
3196	struct wait_page_queue *wpq;
3197	struct io_kiocb *req = wait->private;
3198	struct wait_page_key *key = arg;
3199
3200	wpq = container_of(wait, struct wait_page_queue, wait);
3201
3202	if (!wake_page_match(wpq, key))
3203		return 0;
3204
3205	req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3206	list_del_init(&wait->entry);
3207
3208	/* submit ref gets dropped, acquire a new one */
3209	req_ref_get(req);
3210	io_req_task_queue(req);
3211	return 1;
3212}
3213
3214/*
3215 * This controls whether a given IO request should be armed for async page
3216 * based retry. If we return false here, the request is handed to the async
3217 * worker threads for retry. If we're doing buffered reads on a regular file,
3218 * we prepare a private wait_page_queue entry and retry the operation. This
3219 * will either succeed because the page is now uptodate and unlocked, or it
3220 * will register a callback when the page is unlocked at IO completion. Through
3221 * that callback, io_uring uses task_work to setup a retry of the operation.
3222 * That retry will attempt the buffered read again. The retry will generally
3223 * succeed, or in rare cases where it fails, we then fall back to using the
3224 * async worker threads for a blocking retry.
3225 */
3226static bool io_rw_should_retry(struct io_kiocb *req)
3227{
3228	struct io_async_rw *rw = req->async_data;
3229	struct wait_page_queue *wait = &rw->wpq;
3230	struct kiocb *kiocb = &req->rw.kiocb;
3231
3232	/* never retry for NOWAIT, we just complete with -EAGAIN */
3233	if (req->flags & REQ_F_NOWAIT)
3234		return false;
3235
3236	/* Only for buffered IO */
3237	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3238		return false;
3239
3240	/*
3241	 * just use poll if we can, and don't attempt if the fs doesn't
3242	 * support callback based unlocks
3243	 */
3244	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3245		return false;
3246
3247	wait->wait.func = io_async_buf_func;
3248	wait->wait.private = req;
3249	wait->wait.flags = 0;
3250	INIT_LIST_HEAD(&wait->wait.entry);
3251	kiocb->ki_flags |= IOCB_WAITQ;
3252	kiocb->ki_flags &= ~IOCB_NOWAIT;
3253	kiocb->ki_waitq = wait;
3254	return true;
3255}
3256
3257static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3258{
3259	if (req->file->f_op->read_iter)
3260		return call_read_iter(req->file, &req->rw.kiocb, iter);
3261	else if (req->file->f_op->read)
3262		return loop_rw_iter(READ, req, iter);
3263	else
3264		return -EINVAL;
3265}
3266
3267static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3268{
3269	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3270	struct kiocb *kiocb = &req->rw.kiocb;
3271	struct iov_iter __iter, *iter = &__iter;
3272	struct io_async_rw *rw = req->async_data;
3273	ssize_t io_size, ret, ret2;
3274	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3275
3276	if (rw) {
3277		iter = &rw->iter;
3278		iovec = NULL;
3279	} else {
3280		ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3281		if (ret < 0)
3282			return ret;
3283	}
3284	io_size = iov_iter_count(iter);
3285	req->result = io_size;
3286
3287	/* Ensure we clear previously set non-block flag */
3288	if (!force_nonblock)
3289		kiocb->ki_flags &= ~IOCB_NOWAIT;
3290	else
3291		kiocb->ki_flags |= IOCB_NOWAIT;
3292
3293	/* If the file doesn't support async, just async punt */
3294	if (force_nonblock && !io_file_supports_nowait(req, READ)) {
3295		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3296		return ret ?: -EAGAIN;
3297	}
3298
3299	ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3300	if (unlikely(ret)) {
3301		kfree(iovec);
3302		return ret;
3303	}
3304
3305	ret = io_iter_do_read(req, iter);
3306
3307	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
3308		req->flags &= ~REQ_F_REISSUE;
3309		/* IOPOLL retry should happen for io-wq threads */
3310		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3311			goto done;
3312		/* no retry on NONBLOCK nor RWF_NOWAIT */
3313		if (req->flags & REQ_F_NOWAIT)
3314			goto done;
3315		/* some cases will consume bytes even on error returns */
3316		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3317		ret = 0;
3318	} else if (ret == -EIOCBQUEUED) {
3319		goto out_free;
3320	} else if (ret <= 0 || ret == io_size || !force_nonblock ||
3321		   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
3322		/* read all, failed, already did sync or don't want to retry */
3323		goto done;
3324	}
3325
3326	ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3327	if (ret2)
3328		return ret2;
3329
3330	iovec = NULL;
3331	rw = req->async_data;
3332	/* now use our persistent iterator, if we aren't already */
3333	iter = &rw->iter;
3334
3335	do {
3336		io_size -= ret;
3337		rw->bytes_done += ret;
3338		/* if we can retry, do so with the callbacks armed */
3339		if (!io_rw_should_retry(req)) {
3340			kiocb->ki_flags &= ~IOCB_WAITQ;
3341			return -EAGAIN;
3342		}
3343
3344		/*
3345		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3346		 * we get -EIOCBQUEUED, then we'll get a notification when the
3347		 * desired page gets unlocked. We can also get a partial read
3348		 * here, and if we do, then just retry at the new offset.
3349		 */
3350		ret = io_iter_do_read(req, iter);
3351		if (ret == -EIOCBQUEUED)
3352			return 0;
3353		/* we got some bytes, but not all. retry. */
3354		kiocb->ki_flags &= ~IOCB_WAITQ;
3355	} while (ret > 0 && ret < io_size);
3356done:
3357	kiocb_done(kiocb, ret, issue_flags);
3358out_free:
3359	/* it's faster to check here then delegate to kfree */
3360	if (iovec)
3361		kfree(iovec);
3362	return 0;
3363}
3364
3365static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3366{
3367	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3368		return -EBADF;
3369	return io_prep_rw(req, sqe);
3370}
3371
3372static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3373{
3374	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3375	struct kiocb *kiocb = &req->rw.kiocb;
3376	struct iov_iter __iter, *iter = &__iter;
3377	struct io_async_rw *rw = req->async_data;
3378	ssize_t ret, ret2, io_size;
3379	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3380
3381	if (rw) {
3382		iter = &rw->iter;
3383		iovec = NULL;
3384	} else {
3385		ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3386		if (ret < 0)
3387			return ret;
3388	}
3389	io_size = iov_iter_count(iter);
3390	req->result = io_size;
3391
3392	/* Ensure we clear previously set non-block flag */
3393	if (!force_nonblock)
3394		kiocb->ki_flags &= ~IOCB_NOWAIT;
3395	else
3396		kiocb->ki_flags |= IOCB_NOWAIT;
3397
3398	/* If the file doesn't support async, just async punt */
3399	if (force_nonblock && !io_file_supports_nowait(req, WRITE))
3400		goto copy_iov;
3401
3402	/* file path doesn't support NOWAIT for non-direct_IO */
3403	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3404	    (req->flags & REQ_F_ISREG))
3405		goto copy_iov;
3406
3407	ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3408	if (unlikely(ret))
3409		goto out_free;
3410
3411	/*
3412	 * Open-code file_start_write here to grab freeze protection,
3413	 * which will be released by another thread in
3414	 * io_complete_rw().  Fool lockdep by telling it the lock got
3415	 * released so that it doesn't complain about the held lock when
3416	 * we return to userspace.
3417	 */
3418	if (req->flags & REQ_F_ISREG) {
3419		sb_start_write(file_inode(req->file)->i_sb);
3420		__sb_writers_release(file_inode(req->file)->i_sb,
3421					SB_FREEZE_WRITE);
3422	}
3423	kiocb->ki_flags |= IOCB_WRITE;
3424
3425	if (req->file->f_op->write_iter)
3426		ret2 = call_write_iter(req->file, kiocb, iter);
3427	else if (req->file->f_op->write)
3428		ret2 = loop_rw_iter(WRITE, req, iter);
3429	else
3430		ret2 = -EINVAL;
3431
3432	if (req->flags & REQ_F_REISSUE) {
3433		req->flags &= ~REQ_F_REISSUE;
3434		ret2 = -EAGAIN;
3435	}
3436
3437	/*
3438	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3439	 * retry them without IOCB_NOWAIT.
3440	 */
3441	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3442		ret2 = -EAGAIN;
3443	/* no retry on NONBLOCK nor RWF_NOWAIT */
3444	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3445		goto done;
3446	if (!force_nonblock || ret2 != -EAGAIN) {
3447		/* IOPOLL retry should happen for io-wq threads */
3448		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3449			goto copy_iov;
3450done:
3451		kiocb_done(kiocb, ret2, issue_flags);
3452	} else {
3453copy_iov:
3454		/* some cases will consume bytes even on error returns */
3455		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3456		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3457		return ret ?: -EAGAIN;
3458	}
3459out_free:
3460	/* it's reportedly faster than delegating the null check to kfree() */
3461	if (iovec)
3462		kfree(iovec);
3463	return ret;
3464}
3465
3466static int io_renameat_prep(struct io_kiocb *req,
3467			    const struct io_uring_sqe *sqe)
3468{
3469	struct io_rename *ren = &req->rename;
3470	const char __user *oldf, *newf;
3471
3472	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3473		return -EINVAL;
3474	if (sqe->ioprio || sqe->buf_index)
3475		return -EINVAL;
3476	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3477		return -EBADF;
3478
3479	ren->old_dfd = READ_ONCE(sqe->fd);
3480	oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3481	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3482	ren->new_dfd = READ_ONCE(sqe->len);
3483	ren->flags = READ_ONCE(sqe->rename_flags);
3484
3485	ren->oldpath = getname(oldf);
3486	if (IS_ERR(ren->oldpath))
3487		return PTR_ERR(ren->oldpath);
3488
3489	ren->newpath = getname(newf);
3490	if (IS_ERR(ren->newpath)) {
3491		putname(ren->oldpath);
3492		return PTR_ERR(ren->newpath);
3493	}
3494
3495	req->flags |= REQ_F_NEED_CLEANUP;
3496	return 0;
3497}
3498
3499static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3500{
3501	struct io_rename *ren = &req->rename;
3502	int ret;
3503
3504	if (issue_flags & IO_URING_F_NONBLOCK)
3505		return -EAGAIN;
3506
3507	ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3508				ren->newpath, ren->flags);
3509
3510	req->flags &= ~REQ_F_NEED_CLEANUP;
3511	if (ret < 0)
3512		req_set_fail(req);
3513	io_req_complete(req, ret);
3514	return 0;
3515}
3516
3517static int io_unlinkat_prep(struct io_kiocb *req,
3518			    const struct io_uring_sqe *sqe)
3519{
3520	struct io_unlink *un = &req->unlink;
3521	const char __user *fname;
3522
3523	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3524		return -EINVAL;
3525	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3526		return -EINVAL;
3527	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3528		return -EBADF;
3529
3530	un->dfd = READ_ONCE(sqe->fd);
3531
3532	un->flags = READ_ONCE(sqe->unlink_flags);
3533	if (un->flags & ~AT_REMOVEDIR)
3534		return -EINVAL;
3535
3536	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3537	un->filename = getname(fname);
3538	if (IS_ERR(un->filename))
3539		return PTR_ERR(un->filename);
3540
3541	req->flags |= REQ_F_NEED_CLEANUP;
3542	return 0;
3543}
3544
3545static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3546{
3547	struct io_unlink *un = &req->unlink;
3548	int ret;
3549
3550	if (issue_flags & IO_URING_F_NONBLOCK)
3551		return -EAGAIN;
3552
3553	if (un->flags & AT_REMOVEDIR)
3554		ret = do_rmdir(un->dfd, un->filename);
3555	else
3556		ret = do_unlinkat(un->dfd, un->filename);
3557
3558	req->flags &= ~REQ_F_NEED_CLEANUP;
3559	if (ret < 0)
3560		req_set_fail(req);
3561	io_req_complete(req, ret);
3562	return 0;
3563}
3564
3565static int io_shutdown_prep(struct io_kiocb *req,
3566			    const struct io_uring_sqe *sqe)
3567{
3568#if defined(CONFIG_NET)
3569	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3570		return -EINVAL;
3571	if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3572	    sqe->buf_index)
3573		return -EINVAL;
3574
3575	req->shutdown.how = READ_ONCE(sqe->len);
3576	return 0;
3577#else
3578	return -EOPNOTSUPP;
3579#endif
3580}
3581
3582static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3583{
3584#if defined(CONFIG_NET)
3585	struct socket *sock;
3586	int ret;
3587
3588	if (issue_flags & IO_URING_F_NONBLOCK)
3589		return -EAGAIN;
3590
3591	sock = sock_from_file(req->file);
3592	if (unlikely(!sock))
3593		return -ENOTSOCK;
3594
3595	ret = __sys_shutdown_sock(sock, req->shutdown.how);
3596	if (ret < 0)
3597		req_set_fail(req);
3598	io_req_complete(req, ret);
3599	return 0;
3600#else
3601	return -EOPNOTSUPP;
3602#endif
3603}
3604
3605static int __io_splice_prep(struct io_kiocb *req,
3606			    const struct io_uring_sqe *sqe)
3607{
3608	struct io_splice *sp = &req->splice;
3609	unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3610
3611	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3612		return -EINVAL;
3613
3614	sp->file_in = NULL;
3615	sp->len = READ_ONCE(sqe->len);
3616	sp->flags = READ_ONCE(sqe->splice_flags);
3617
3618	if (unlikely(sp->flags & ~valid_flags))
3619		return -EINVAL;
3620
3621	sp->file_in = io_file_get(req->ctx, NULL, req,
3622				  READ_ONCE(sqe->splice_fd_in),
3623				  (sp->flags & SPLICE_F_FD_IN_FIXED));
3624	if (!sp->file_in)
3625		return -EBADF;
3626	req->flags |= REQ_F_NEED_CLEANUP;
3627	return 0;
3628}
3629
3630static int io_tee_prep(struct io_kiocb *req,
3631		       const struct io_uring_sqe *sqe)
3632{
3633	if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3634		return -EINVAL;
3635	return __io_splice_prep(req, sqe);
3636}
3637
3638static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
3639{
3640	struct io_splice *sp = &req->splice;
3641	struct file *in = sp->file_in;
3642	struct file *out = sp->file_out;
3643	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3644	long ret = 0;
3645
3646	if (issue_flags & IO_URING_F_NONBLOCK)
3647		return -EAGAIN;
3648	if (sp->len)
3649		ret = do_tee(in, out, sp->len, flags);
3650
3651	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3652		io_put_file(in);
3653	req->flags &= ~REQ_F_NEED_CLEANUP;
3654
3655	if (ret != sp->len)
3656		req_set_fail(req);
3657	io_req_complete(req, ret);
3658	return 0;
3659}
3660
3661static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3662{
3663	struct io_splice *sp = &req->splice;
3664
3665	sp->off_in = READ_ONCE(sqe->splice_off_in);
3666	sp->off_out = READ_ONCE(sqe->off);
3667	return __io_splice_prep(req, sqe);
3668}
3669
3670static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
3671{
3672	struct io_splice *sp = &req->splice;
3673	struct file *in = sp->file_in;
3674	struct file *out = sp->file_out;
3675	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3676	loff_t *poff_in, *poff_out;
3677	long ret = 0;
3678
3679	if (issue_flags & IO_URING_F_NONBLOCK)
3680		return -EAGAIN;
3681
3682	poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3683	poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3684
3685	if (sp->len)
3686		ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3687
3688	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3689		io_put_file(in);
3690	req->flags &= ~REQ_F_NEED_CLEANUP;
3691
3692	if (ret != sp->len)
3693		req_set_fail(req);
3694	io_req_complete(req, ret);
3695	return 0;
3696}
3697
3698/*
3699 * IORING_OP_NOP just posts a completion event, nothing else.
3700 */
3701static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
3702{
3703	struct io_ring_ctx *ctx = req->ctx;
3704
3705	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3706		return -EINVAL;
3707
3708	__io_req_complete(req, issue_flags, 0, 0);
3709	return 0;
3710}
3711
3712static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3713{
3714	struct io_ring_ctx *ctx = req->ctx;
3715
3716	if (!req->file)
3717		return -EBADF;
3718
3719	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3720		return -EINVAL;
3721	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3722		return -EINVAL;
3723
3724	req->sync.flags = READ_ONCE(sqe->fsync_flags);
3725	if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3726		return -EINVAL;
3727
3728	req->sync.off = READ_ONCE(sqe->off);
3729	req->sync.len = READ_ONCE(sqe->len);
3730	return 0;
3731}
3732
3733static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
3734{
3735	loff_t end = req->sync.off + req->sync.len;
3736	int ret;
3737
3738	/* fsync always requires a blocking context */
3739	if (issue_flags & IO_URING_F_NONBLOCK)
3740		return -EAGAIN;
3741
3742	ret = vfs_fsync_range(req->file, req->sync.off,
3743				end > 0 ? end : LLONG_MAX,
3744				req->sync.flags & IORING_FSYNC_DATASYNC);
3745	if (ret < 0)
3746		req_set_fail(req);
3747	io_req_complete(req, ret);
3748	return 0;
3749}
3750
3751static int io_fallocate_prep(struct io_kiocb *req,
3752			     const struct io_uring_sqe *sqe)
3753{
3754	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3755		return -EINVAL;
3756	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3757		return -EINVAL;
3758
3759	req->sync.off = READ_ONCE(sqe->off);
3760	req->sync.len = READ_ONCE(sqe->addr);
3761	req->sync.mode = READ_ONCE(sqe->len);
3762	return 0;
3763}
3764
3765static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
3766{
3767	int ret;
3768
3769	/* fallocate always requiring blocking context */
3770	if (issue_flags & IO_URING_F_NONBLOCK)
3771		return -EAGAIN;
3772	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3773				req->sync.len);
3774	if (ret < 0)
3775		req_set_fail(req);
3776	io_req_complete(req, ret);
3777	return 0;
3778}
3779
3780static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3781{
3782	const char __user *fname;
3783	int ret;
3784
3785	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3786		return -EINVAL;
3787	if (unlikely(sqe->ioprio || sqe->buf_index))
3788		return -EINVAL;
3789	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3790		return -EBADF;
3791
3792	/* open.how should be already initialised */
3793	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3794		req->open.how.flags |= O_LARGEFILE;
3795
3796	req->open.dfd = READ_ONCE(sqe->fd);
3797	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3798	req->open.filename = getname(fname);
3799	if (IS_ERR(req->open.filename)) {
3800		ret = PTR_ERR(req->open.filename);
3801		req->open.filename = NULL;
3802		return ret;
3803	}
3804	req->open.nofile = rlimit(RLIMIT_NOFILE);
3805	req->flags |= REQ_F_NEED_CLEANUP;
3806	return 0;
3807}
3808
3809static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3810{
3811	u64 mode = READ_ONCE(sqe->len);
3812	u64 flags = READ_ONCE(sqe->open_flags);
3813
3814	req->open.how = build_open_how(flags, mode);
3815	return __io_openat_prep(req, sqe);
3816}
3817
3818static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3819{
3820	struct open_how __user *how;
3821	size_t len;
3822	int ret;
3823
3824	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3825	len = READ_ONCE(sqe->len);
3826	if (len < OPEN_HOW_SIZE_VER0)
3827		return -EINVAL;
3828
3829	ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3830					len);
3831	if (ret)
3832		return ret;
3833
3834	return __io_openat_prep(req, sqe);
3835}
3836
3837static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
3838{
3839	struct open_flags op;
3840	struct file *file;
3841	bool nonblock_set;
3842	bool resolve_nonblock;
3843	int ret;
3844
3845	ret = build_open_flags(&req->open.how, &op);
3846	if (ret)
3847		goto err;
3848	nonblock_set = op.open_flag & O_NONBLOCK;
3849	resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
3850	if (issue_flags & IO_URING_F_NONBLOCK) {
3851		/*
3852		 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3853		 * it'll always -EAGAIN
3854		 */
3855		if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3856			return -EAGAIN;
3857		op.lookup_flags |= LOOKUP_CACHED;
3858		op.open_flag |= O_NONBLOCK;
3859	}
3860
3861	ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3862	if (ret < 0)
3863		goto err;
3864
3865	file = do_filp_open(req->open.dfd, req->open.filename, &op);
3866	if (IS_ERR(file)) {
3867		/*
3868		 * We could hang on to this 'fd' on retrying, but seems like
3869		 * marginal gain for something that is now known to be a slower
3870		 * path. So just put it, and we'll get a new one when we retry.
3871		 */
3872		put_unused_fd(ret);
3873
3874		ret = PTR_ERR(file);
3875		/* only retry if RESOLVE_CACHED wasn't already set by application */
3876		if (ret == -EAGAIN &&
3877		    (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3878			return -EAGAIN;
3879		goto err;
3880	}
3881
3882	if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3883		file->f_flags &= ~O_NONBLOCK;
3884	fsnotify_open(file);
3885	fd_install(ret, file);
3886err:
3887	putname(req->open.filename);
3888	req->flags &= ~REQ_F_NEED_CLEANUP;
3889	if (ret < 0)
3890		req_set_fail(req);
3891	__io_req_complete(req, issue_flags, ret, 0);
3892	return 0;
3893}
3894
3895static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
3896{
3897	return io_openat2(req, issue_flags);
3898}
3899
3900static int io_remove_buffers_prep(struct io_kiocb *req,
3901				  const struct io_uring_sqe *sqe)
3902{
3903	struct io_provide_buf *p = &req->pbuf;
3904	u64 tmp;
3905
3906	if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3907		return -EINVAL;
3908
3909	tmp = READ_ONCE(sqe->fd);
3910	if (!tmp || tmp > USHRT_MAX)
3911		return -EINVAL;
3912
3913	memset(p, 0, sizeof(*p));
3914	p->nbufs = tmp;
3915	p->bgid = READ_ONCE(sqe->buf_group);
3916	return 0;
3917}
3918
3919static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3920			       int bgid, unsigned nbufs)
3921{
3922	unsigned i = 0;
3923
3924	/* shouldn't happen */
3925	if (!nbufs)
3926		return 0;
3927
3928	/* the head kbuf is the list itself */
3929	while (!list_empty(&buf->list)) {
3930		struct io_buffer *nxt;
3931
3932		nxt = list_first_entry(&buf->list, struct io_buffer, list);
3933		list_del(&nxt->list);
3934		kfree(nxt);
3935		if (++i == nbufs)
3936			return i;
3937	}
3938	i++;
3939	kfree(buf);
3940	xa_erase(&ctx->io_buffers, bgid);
3941
3942	return i;
3943}
3944
3945static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
3946{
3947	struct io_provide_buf *p = &req->pbuf;
3948	struct io_ring_ctx *ctx = req->ctx;
3949	struct io_buffer *head;
3950	int ret = 0;
3951	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3952
3953	io_ring_submit_lock(ctx, !force_nonblock);
3954
3955	lockdep_assert_held(&ctx->uring_lock);
3956
3957	ret = -ENOENT;
3958	head = xa_load(&ctx->io_buffers, p->bgid);
3959	if (head)
3960		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3961	if (ret < 0)
3962		req_set_fail(req);
3963
3964	/* complete before unlock, IOPOLL may need the lock */
3965	__io_req_complete(req, issue_flags, ret, 0);
3966	io_ring_submit_unlock(ctx, !force_nonblock);
3967	return 0;
3968}
3969
3970static int io_provide_buffers_prep(struct io_kiocb *req,
3971				   const struct io_uring_sqe *sqe)
3972{
3973	unsigned long size, tmp_check;
3974	struct io_provide_buf *p = &req->pbuf;
3975	u64 tmp;
3976
3977	if (sqe->ioprio || sqe->rw_flags)
3978		return -EINVAL;
3979
3980	tmp = READ_ONCE(sqe->fd);
3981	if (!tmp || tmp > USHRT_MAX)
3982		return -E2BIG;
3983	p->nbufs = tmp;
3984	p->addr = READ_ONCE(sqe->addr);
3985	p->len = READ_ONCE(sqe->len);
3986
3987	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
3988				&size))
3989		return -EOVERFLOW;
3990	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
3991		return -EOVERFLOW;
3992
3993	size = (unsigned long)p->len * p->nbufs;
3994	if (!access_ok(u64_to_user_ptr(p->addr), size))
3995		return -EFAULT;
3996
3997	p->bgid = READ_ONCE(sqe->buf_group);
3998	tmp = READ_ONCE(sqe->off);
3999	if (tmp > USHRT_MAX)
4000		return -E2BIG;
4001	p->bid = tmp;
4002	return 0;
4003}
4004
4005static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4006{
4007	struct io_buffer *buf;
4008	u64 addr = pbuf->addr;
4009	int i, bid = pbuf->bid;
4010
4011	for (i = 0; i < pbuf->nbufs; i++) {
4012		buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4013		if (!buf)
4014			break;
4015
4016		buf->addr = addr;
4017		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
4018		buf->bid = bid;
4019		addr += pbuf->len;
4020		bid++;
4021		if (!*head) {
4022			INIT_LIST_HEAD(&buf->list);
4023			*head = buf;
4024		} else {
4025			list_add_tail(&buf->list, &(*head)->list);
4026		}
4027	}
4028
4029	return i ? i : -ENOMEM;
4030}
4031
4032static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4033{
4034	struct io_provide_buf *p = &req->pbuf;
4035	struct io_ring_ctx *ctx = req->ctx;
4036	struct io_buffer *head, *list;
4037	int ret = 0;
4038	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4039
4040	io_ring_submit_lock(ctx, !force_nonblock);
4041
4042	lockdep_assert_held(&ctx->uring_lock);
4043
4044	list = head = xa_load(&ctx->io_buffers, p->bgid);
4045
4046	ret = io_add_buffers(p, &head);
4047	if (ret >= 0 && !list) {
4048		ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4049		if (ret < 0)
4050			__io_remove_buffers(ctx, head, p->bgid, -1U);
4051	}
4052	if (ret < 0)
4053		req_set_fail(req);
4054	/* complete before unlock, IOPOLL may need the lock */
4055	__io_req_complete(req, issue_flags, ret, 0);
4056	io_ring_submit_unlock(ctx, !force_nonblock);
4057	return 0;
4058}
4059
4060static int io_epoll_ctl_prep(struct io_kiocb *req,
4061			     const struct io_uring_sqe *sqe)
4062{
4063#if defined(CONFIG_EPOLL)
4064	if (sqe->ioprio || sqe->buf_index)
4065		return -EINVAL;
4066	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4067		return -EINVAL;
4068
4069	req->epoll.epfd = READ_ONCE(sqe->fd);
4070	req->epoll.op = READ_ONCE(sqe->len);
4071	req->epoll.fd = READ_ONCE(sqe->off);
4072
4073	if (ep_op_has_event(req->epoll.op)) {
4074		struct epoll_event __user *ev;
4075
4076		ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4077		if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4078			return -EFAULT;
4079	}
4080
4081	return 0;
4082#else
4083	return -EOPNOTSUPP;
4084#endif
4085}
4086
4087static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4088{
4089#if defined(CONFIG_EPOLL)
4090	struct io_epoll *ie = &req->epoll;
4091	int ret;
4092	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4093
4094	ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4095	if (force_nonblock && ret == -EAGAIN)
4096		return -EAGAIN;
4097
4098	if (ret < 0)
4099		req_set_fail(req);
4100	__io_req_complete(req, issue_flags, ret, 0);
4101	return 0;
4102#else
4103	return -EOPNOTSUPP;
4104#endif
4105}
4106
4107static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4108{
4109#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4110	if (sqe->ioprio || sqe->buf_index || sqe->off)
4111		return -EINVAL;
4112	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4113		return -EINVAL;
4114
4115	req->madvise.addr = READ_ONCE(sqe->addr);
4116	req->madvise.len = READ_ONCE(sqe->len);
4117	req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4118	return 0;
4119#else
4120	return -EOPNOTSUPP;
4121#endif
4122}
4123
4124static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4125{
4126#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4127	struct io_madvise *ma = &req->madvise;
4128	int ret;
4129
4130	if (issue_flags & IO_URING_F_NONBLOCK)
4131		return -EAGAIN;
4132
4133	ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4134	if (ret < 0)
4135		req_set_fail(req);
4136	io_req_complete(req, ret);
4137	return 0;
4138#else
4139	return -EOPNOTSUPP;
4140#endif
4141}
4142
4143static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4144{
4145	if (sqe->ioprio || sqe->buf_index || sqe->addr)
4146		return -EINVAL;
4147	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4148		return -EINVAL;
4149
4150	req->fadvise.offset = READ_ONCE(sqe->off);
4151	req->fadvise.len = READ_ONCE(sqe->len);
4152	req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4153	return 0;
4154}
4155
4156static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4157{
4158	struct io_fadvise *fa = &req->fadvise;
4159	int ret;
4160
4161	if (issue_flags & IO_URING_F_NONBLOCK) {
4162		switch (fa->advice) {
4163		case POSIX_FADV_NORMAL:
4164		case POSIX_FADV_RANDOM:
4165		case POSIX_FADV_SEQUENTIAL:
4166			break;
4167		default:
4168			return -EAGAIN;
4169		}
4170	}
4171
4172	ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4173	if (ret < 0)
4174		req_set_fail(req);
4175	__io_req_complete(req, issue_flags, ret, 0);
4176	return 0;
4177}
4178
4179static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4180{
4181	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4182		return -EINVAL;
4183	if (sqe->ioprio || sqe->buf_index)
4184		return -EINVAL;
4185	if (req->flags & REQ_F_FIXED_FILE)
4186		return -EBADF;
4187
4188	req->statx.dfd = READ_ONCE(sqe->fd);
4189	req->statx.mask = READ_ONCE(sqe->len);
4190	req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4191	req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4192	req->statx.flags = READ_ONCE(sqe->statx_flags);
4193
4194	return 0;
4195}
4196
4197static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4198{
4199	struct io_statx *ctx = &req->statx;
4200	int ret;
4201
4202	if (issue_flags & IO_URING_F_NONBLOCK)
4203		return -EAGAIN;
4204
4205	ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4206		       ctx->buffer);
4207
4208	if (ret < 0)
4209		req_set_fail(req);
4210	io_req_complete(req, ret);
4211	return 0;
4212}
4213
4214static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4215{
4216	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4217		return -EINVAL;
4218	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4219	    sqe->rw_flags || sqe->buf_index)
4220		return -EINVAL;
4221	if (req->flags & REQ_F_FIXED_FILE)
4222		return -EBADF;
4223
4224	req->close.fd = READ_ONCE(sqe->fd);
4225	return 0;
4226}
4227
4228static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4229{
4230	struct files_struct *files = current->files;
4231	struct io_close *close = &req->close;
4232	struct fdtable *fdt;
4233	struct file *file = NULL;
4234	int ret = -EBADF;
4235
4236	spin_lock(&files->file_lock);
4237	fdt = files_fdtable(files);
4238	if (close->fd >= fdt->max_fds) {
4239		spin_unlock(&files->file_lock);
4240		goto err;
4241	}
4242	file = fdt->fd[close->fd];
4243	if (!file || file->f_op == &io_uring_fops) {
4244		spin_unlock(&files->file_lock);
4245		file = NULL;
4246		goto err;
4247	}
4248
4249	/* if the file has a flush method, be safe and punt to async */
4250	if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
4251		spin_unlock(&files->file_lock);
4252		return -EAGAIN;
4253	}
4254
4255	ret = __close_fd_get_file(close->fd, &file);
4256	spin_unlock(&files->file_lock);
4257	if (ret < 0) {
4258		if (ret == -ENOENT)
4259			ret = -EBADF;
4260		goto err;
4261	}
4262
4263	/* No ->flush() or already async, safely close from here */
4264	ret = filp_close(file, current->files);
4265err:
4266	if (ret < 0)
4267		req_set_fail(req);
4268	if (file)
4269		fput(file);
4270	__io_req_complete(req, issue_flags, ret, 0);
4271	return 0;
4272}
4273
4274static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4275{
4276	struct io_ring_ctx *ctx = req->ctx;
4277
4278	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4279		return -EINVAL;
4280	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4281		return -EINVAL;
4282
4283	req->sync.off = READ_ONCE(sqe->off);
4284	req->sync.len = READ_ONCE(sqe->len);
4285	req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4286	return 0;
4287}
4288
4289static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4290{
4291	int ret;
4292
4293	/* sync_file_range always requires a blocking context */
4294	if (issue_flags & IO_URING_F_NONBLOCK)
4295		return -EAGAIN;
4296
4297	ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4298				req->sync.flags);
4299	if (ret < 0)
4300		req_set_fail(req);
4301	io_req_complete(req, ret);
4302	return 0;
4303}
4304
4305#if defined(CONFIG_NET)
4306static int io_setup_async_msg(struct io_kiocb *req,
4307			      struct io_async_msghdr *kmsg)
4308{
4309	struct io_async_msghdr *async_msg = req->async_data;
4310
4311	if (async_msg)
4312		return -EAGAIN;
4313	if (io_alloc_async_data(req)) {
4314		kfree(kmsg->free_iov);
4315		return -ENOMEM;
4316	}
4317	async_msg = req->async_data;
4318	req->flags |= REQ_F_NEED_CLEANUP;
4319	memcpy(async_msg, kmsg, sizeof(*kmsg));
4320	async_msg->msg.msg_name = &async_msg->addr;
4321	/* if were using fast_iov, set it to the new one */
4322	if (!async_msg->free_iov)
4323		async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4324
4325	return -EAGAIN;
4326}
4327
4328static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4329			       struct io_async_msghdr *iomsg)
4330{
4331	iomsg->msg.msg_name = &iomsg->addr;
4332	iomsg->free_iov = iomsg->fast_iov;
4333	return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4334				   req->sr_msg.msg_flags, &iomsg->free_iov);
4335}
4336
4337static int io_sendmsg_prep_async(struct io_kiocb *req)
4338{
4339	int ret;
4340
4341	ret = io_sendmsg_copy_hdr(req, req->async_data);
4342	if (!ret)
4343		req->flags |= REQ_F_NEED_CLEANUP;
4344	return ret;
4345}
4346
4347static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4348{
4349	struct io_sr_msg *sr = &req->sr_msg;
4350
4351	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4352		return -EINVAL;
4353
4354	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4355	sr->len = READ_ONCE(sqe->len);
4356	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4357	if (sr->msg_flags & MSG_DONTWAIT)
4358		req->flags |= REQ_F_NOWAIT;
4359
4360#ifdef CONFIG_COMPAT
4361	if (req->ctx->compat)
4362		sr->msg_flags |= MSG_CMSG_COMPAT;
4363#endif
4364	return 0;
4365}
4366
4367static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4368{
4369	struct io_async_msghdr iomsg, *kmsg;
4370	struct socket *sock;
4371	unsigned flags;
4372	int min_ret = 0;
4373	int ret;
4374
4375	sock = sock_from_file(req->file);
4376	if (unlikely(!sock))
4377		return -ENOTSOCK;
4378
4379	kmsg = req->async_data;
4380	if (!kmsg) {
4381		ret = io_sendmsg_copy_hdr(req, &iomsg);
4382		if (ret)
4383			return ret;
4384		kmsg = &iomsg;
4385	}
4386
4387	flags = req->sr_msg.msg_flags;
4388	if (issue_flags & IO_URING_F_NONBLOCK)
4389		flags |= MSG_DONTWAIT;
4390	if (flags & MSG_WAITALL)
4391		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4392
4393	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4394	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4395		return io_setup_async_msg(req, kmsg);
4396	if (ret == -ERESTARTSYS)
4397		ret = -EINTR;
4398
4399	/* fast path, check for non-NULL to avoid function call */
4400	if (kmsg->free_iov)
4401		kfree(kmsg->free_iov);
4402	req->flags &= ~REQ_F_NEED_CLEANUP;
4403	if (ret < min_ret)
4404		req_set_fail(req);
4405	__io_req_complete(req, issue_flags, ret, 0);
4406	return 0;
4407}
4408
4409static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4410{
4411	struct io_sr_msg *sr = &req->sr_msg;
4412	struct msghdr msg;
4413	struct iovec iov;
4414	struct socket *sock;
4415	unsigned flags;
4416	int min_ret = 0;
4417	int ret;
4418
4419	sock = sock_from_file(req->file);
4420	if (unlikely(!sock))
4421		return -ENOTSOCK;
4422
4423	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4424	if (unlikely(ret))
4425		return ret;
4426
4427	msg.msg_name = NULL;
4428	msg.msg_control = NULL;
4429	msg.msg_controllen = 0;
4430	msg.msg_namelen = 0;
4431
4432	flags = req->sr_msg.msg_flags;
4433	if (issue_flags & IO_URING_F_NONBLOCK)
4434		flags |= MSG_DONTWAIT;
4435	if (flags & MSG_WAITALL)
4436		min_ret = iov_iter_count(&msg.msg_iter);
4437
4438	msg.msg_flags = flags;
4439	ret = sock_sendmsg(sock, &msg);
4440	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4441		return -EAGAIN;
4442	if (ret == -ERESTARTSYS)
4443		ret = -EINTR;
4444
4445	if (ret < min_ret)
4446		req_set_fail(req);
4447	__io_req_complete(req, issue_flags, ret, 0);
4448	return 0;
4449}
4450
4451static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4452				 struct io_async_msghdr *iomsg)
4453{
4454	struct io_sr_msg *sr = &req->sr_msg;
4455	struct iovec __user *uiov;
4456	size_t iov_len;
4457	int ret;
4458
4459	ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4460					&iomsg->uaddr, &uiov, &iov_len);
4461	if (ret)
4462		return ret;
4463
4464	if (req->flags & REQ_F_BUFFER_SELECT) {
4465		if (iov_len > 1)
4466			return -EINVAL;
4467		if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
4468			return -EFAULT;
4469		sr->len = iomsg->fast_iov[0].iov_len;
4470		iomsg->free_iov = NULL;
4471	} else {
4472		iomsg->free_iov = iomsg->fast_iov;
4473		ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4474				     &iomsg->free_iov, &iomsg->msg.msg_iter,
4475				     false);
4476		if (ret > 0)
4477			ret = 0;
4478	}
4479
4480	return ret;
4481}
4482
4483#ifdef CONFIG_COMPAT
4484static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4485					struct io_async_msghdr *iomsg)
4486{
4487	struct io_sr_msg *sr = &req->sr_msg;
4488	struct compat_iovec __user *uiov;
4489	compat_uptr_t ptr;
4490	compat_size_t len;
4491	int ret;
4492
4493	ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4494				  &ptr, &len);
4495	if (ret)
4496		return ret;
4497
4498	uiov = compat_ptr(ptr);
4499	if (req->flags & REQ_F_BUFFER_SELECT) {
4500		compat_ssize_t clen;
4501
4502		if (len > 1)
4503			return -EINVAL;
4504		if (!access_ok(uiov, sizeof(*uiov)))
4505			return -EFAULT;
4506		if (__get_user(clen, &uiov->iov_len))
4507			return -EFAULT;
4508		if (clen < 0)
4509			return -EINVAL;
4510		sr->len = clen;
4511		iomsg->free_iov = NULL;
4512	} else {
4513		iomsg->free_iov = iomsg->fast_iov;
4514		ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4515				   UIO_FASTIOV, &iomsg->free_iov,
4516				   &iomsg->msg.msg_iter, true);
4517		if (ret < 0)
4518			return ret;
4519	}
4520
4521	return 0;
4522}
4523#endif
4524
4525static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4526			       struct io_async_msghdr *iomsg)
4527{
4528	iomsg->msg.msg_name = &iomsg->addr;
4529
4530#ifdef CONFIG_COMPAT
4531	if (req->ctx->compat)
4532		return __io_compat_recvmsg_copy_hdr(req, iomsg);
4533#endif
4534
4535	return __io_recvmsg_copy_hdr(req, iomsg);
4536}
4537
4538static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4539					       bool needs_lock)
4540{
4541	struct io_sr_msg *sr = &req->sr_msg;
4542	struct io_buffer *kbuf;
4543
4544	kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4545	if (IS_ERR(kbuf))
4546		return kbuf;
4547
4548	sr->kbuf = kbuf;
4549	req->flags |= REQ_F_BUFFER_SELECTED;
4550	return kbuf;
4551}
4552
4553static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4554{
4555	return io_put_kbuf(req, req->sr_msg.kbuf);
4556}
4557
4558static int io_recvmsg_prep_async(struct io_kiocb *req)
4559{
4560	int ret;
4561
4562	ret = io_recvmsg_copy_hdr(req, req->async_data);
4563	if (!ret)
4564		req->flags |= REQ_F_NEED_CLEANUP;
4565	return ret;
4566}
4567
4568static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4569{
4570	struct io_sr_msg *sr = &req->sr_msg;
4571
4572	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4573		return -EINVAL;
4574
4575	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4576	sr->len = READ_ONCE(sqe->len);
4577	sr->bgid = READ_ONCE(sqe->buf_group);
4578	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4579	if (sr->msg_flags & MSG_DONTWAIT)
4580		req->flags |= REQ_F_NOWAIT;
4581
4582#ifdef CONFIG_COMPAT
4583	if (req->ctx->compat)
4584		sr->msg_flags |= MSG_CMSG_COMPAT;
4585#endif
4586	return 0;
4587}
4588
4589static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
4590{
4591	struct io_async_msghdr iomsg, *kmsg;
4592	struct socket *sock;
4593	struct io_buffer *kbuf;
4594	unsigned flags;
4595	int min_ret = 0;
4596	int ret, cflags = 0;
4597	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4598
4599	sock = sock_from_file(req->file);
4600	if (unlikely(!sock))
4601		return -ENOTSOCK;
4602
4603	kmsg = req->async_data;
4604	if (!kmsg) {
4605		ret = io_recvmsg_copy_hdr(req, &iomsg);
4606		if (ret)
4607			return ret;
4608		kmsg = &iomsg;
4609	}
4610
4611	if (req->flags & REQ_F_BUFFER_SELECT) {
4612		kbuf = io_recv_buffer_select(req, !force_nonblock);
4613		if (IS_ERR(kbuf))
4614			return PTR_ERR(kbuf);
4615		kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4616		kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4617		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
4618				1, req->sr_msg.len);
4619	}
4620
4621	flags = req->sr_msg.msg_flags;
4622	if (force_nonblock)
4623		flags |= MSG_DONTWAIT;
4624	if (flags & MSG_WAITALL)
4625		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4626
4627	ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4628					kmsg->uaddr, flags);
4629	if (force_nonblock && ret == -EAGAIN)
4630		return io_setup_async_msg(req, kmsg);
4631	if (ret == -ERESTARTSYS)
4632		ret = -EINTR;
4633
4634	if (req->flags & REQ_F_BUFFER_SELECTED)
4635		cflags = io_put_recv_kbuf(req);
4636	/* fast path, check for non-NULL to avoid function call */
4637	if (kmsg->free_iov)
4638		kfree(kmsg->free_iov);
4639	req->flags &= ~REQ_F_NEED_CLEANUP;
4640	if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4641		req_set_fail(req);
4642	__io_req_complete(req, issue_flags, ret, cflags);
4643	return 0;
4644}
4645
4646static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
4647{
4648	struct io_buffer *kbuf;
4649	struct io_sr_msg *sr = &req->sr_msg;
4650	struct msghdr msg;
4651	void __user *buf = sr->buf;
4652	struct socket *sock;
4653	struct iovec iov;
4654	unsigned flags;
4655	int min_ret = 0;
4656	int ret, cflags = 0;
4657	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4658
4659	sock = sock_from_file(req->file);
4660	if (unlikely(!sock))
4661		return -ENOTSOCK;
4662
4663	if (req->flags & REQ_F_BUFFER_SELECT) {
4664		kbuf = io_recv_buffer_select(req, !force_nonblock);
4665		if (IS_ERR(kbuf))
4666			return PTR_ERR(kbuf);
4667		buf = u64_to_user_ptr(kbuf->addr);
4668	}
4669
4670	ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4671	if (unlikely(ret))
4672		goto out_free;
4673
4674	msg.msg_name = NULL;
4675	msg.msg_control = NULL;
4676	msg.msg_controllen = 0;
4677	msg.msg_namelen = 0;
4678	msg.msg_iocb = NULL;
4679	msg.msg_flags = 0;
4680
4681	flags = req->sr_msg.msg_flags;
4682	if (force_nonblock)
4683		flags |= MSG_DONTWAIT;
4684	if (flags & MSG_WAITALL)
4685		min_ret = iov_iter_count(&msg.msg_iter);
4686
4687	ret = sock_recvmsg(sock, &msg, flags);
4688	if (force_nonblock && ret == -EAGAIN)
4689		return -EAGAIN;
4690	if (ret == -ERESTARTSYS)
4691		ret = -EINTR;
4692out_free:
4693	if (req->flags & REQ_F_BUFFER_SELECTED)
4694		cflags = io_put_recv_kbuf(req);
4695	if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4696		req_set_fail(req);
4697	__io_req_complete(req, issue_flags, ret, cflags);
4698	return 0;
4699}
4700
4701static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4702{
4703	struct io_accept *accept = &req->accept;
4704
4705	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4706		return -EINVAL;
4707	if (sqe->ioprio || sqe->len || sqe->buf_index)
4708		return -EINVAL;
4709
4710	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4711	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4712	accept->flags = READ_ONCE(sqe->accept_flags);
4713	accept->nofile = rlimit(RLIMIT_NOFILE);
4714	return 0;
4715}
4716
4717static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
4718{
4719	struct io_accept *accept = &req->accept;
4720	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4721	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
4722	int ret;
4723
4724	if (req->file->f_flags & O_NONBLOCK)
4725		req->flags |= REQ_F_NOWAIT;
4726
4727	ret = __sys_accept4_file(req->file, file_flags, accept->addr,
4728					accept->addr_len, accept->flags,
4729					accept->nofile);
4730	if (ret == -EAGAIN && force_nonblock)
4731		return -EAGAIN;
4732	if (ret < 0) {
4733		if (ret == -ERESTARTSYS)
4734			ret = -EINTR;
4735		req_set_fail(req);
4736	}
4737	__io_req_complete(req, issue_flags, ret, 0);
4738	return 0;
4739}
4740
4741static int io_connect_prep_async(struct io_kiocb *req)
4742{
4743	struct io_async_connect *io = req->async_data;
4744	struct io_connect *conn = &req->connect;
4745
4746	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4747}
4748
4749static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4750{
4751	struct io_connect *conn = &req->connect;
4752
4753	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4754		return -EINVAL;
4755	if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4756		return -EINVAL;
4757
4758	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4759	conn->addr_len =  READ_ONCE(sqe->addr2);
4760	return 0;
4761}
4762
4763static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
4764{
4765	struct io_async_connect __io, *io;
4766	unsigned file_flags;
4767	int ret;
4768	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4769
4770	if (req->async_data) {
4771		io = req->async_data;
4772	} else {
4773		ret = move_addr_to_kernel(req->connect.addr,
4774						req->connect.addr_len,
4775						&__io.address);
4776		if (ret)
4777			goto out;
4778		io = &__io;
4779	}
4780
4781	file_flags = force_nonblock ? O_NONBLOCK : 0;
4782
4783	ret = __sys_connect_file(req->file, &io->address,
4784					req->connect.addr_len, file_flags);
4785	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4786		if (req->async_data)
4787			return -EAGAIN;
4788		if (io_alloc_async_data(req)) {
4789			ret = -ENOMEM;
4790			goto out;
4791		}
4792		memcpy(req->async_data, &__io, sizeof(__io));
4793		return -EAGAIN;
4794	}
4795	if (ret == -ERESTARTSYS)
4796		ret = -EINTR;
4797out:
4798	if (ret < 0)
4799		req_set_fail(req);
4800	__io_req_complete(req, issue_flags, ret, 0);
4801	return 0;
4802}
4803#else /* !CONFIG_NET */
4804#define IO_NETOP_FN(op)							\
4805static int io_##op(struct io_kiocb *req, unsigned int issue_flags)	\
4806{									\
4807	return -EOPNOTSUPP;						\
4808}
4809
4810#define IO_NETOP_PREP(op)						\
4811IO_NETOP_FN(op)								\
4812static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4813{									\
4814	return -EOPNOTSUPP;						\
4815}									\
4816
4817#define IO_NETOP_PREP_ASYNC(op)						\
4818IO_NETOP_PREP(op)							\
4819static int io_##op##_prep_async(struct io_kiocb *req)			\
4820{									\
4821	return -EOPNOTSUPP;						\
4822}
4823
4824IO_NETOP_PREP_ASYNC(sendmsg);
4825IO_NETOP_PREP_ASYNC(recvmsg);
4826IO_NETOP_PREP_ASYNC(connect);
4827IO_NETOP_PREP(accept);
4828IO_NETOP_FN(send);
4829IO_NETOP_FN(recv);
4830#endif /* CONFIG_NET */
4831
4832struct io_poll_table {
4833	struct poll_table_struct pt;
4834	struct io_kiocb *req;
4835	int nr_entries;
4836	int error;
4837};
4838
4839static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4840			   __poll_t mask, io_req_tw_func_t func)
4841{
4842	/* for instances that support it check for an event match first: */
4843	if (mask && !(mask & poll->events))
4844		return 0;
4845
4846	trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4847
4848	list_del_init(&poll->wait.entry);
4849
4850	req->result = mask;
4851	req->io_task_work.func = func;
4852
4853	/*
4854	 * If this fails, then the task is exiting. When a task exits, the
4855	 * work gets canceled, so just cancel this request as well instead
4856	 * of executing it. We can't safely execute it anyway, as we may not
4857	 * have the needed state needed for it anyway.
4858	 */
4859	io_req_task_work_add(req);
4860	return 1;
4861}
4862
4863static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4864	__acquires(&req->ctx->completion_lock)
4865{
4866	struct io_ring_ctx *ctx = req->ctx;
4867
4868	if (unlikely(req->task->flags & PF_EXITING))
4869		WRITE_ONCE(poll->canceled, true);
4870
4871	if (!req->result && !READ_ONCE(poll->canceled)) {
4872		struct poll_table_struct pt = { ._key = poll->events };
4873
4874		req->result = vfs_poll(req->file, &pt) & poll->events;
4875	}
4876
4877	spin_lock_irq(&ctx->completion_lock);
4878	if (!req->result && !READ_ONCE(poll->canceled)) {
4879		add_wait_queue(poll->head, &poll->wait);
4880		return true;
4881	}
4882
4883	return false;
4884}
4885
4886static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
4887{
4888	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
4889	if (req->opcode == IORING_OP_POLL_ADD)
4890		return req->async_data;
4891	return req->apoll->double_poll;
4892}
4893
4894static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4895{
4896	if (req->opcode == IORING_OP_POLL_ADD)
4897		return &req->poll;
4898	return &req->apoll->poll;
4899}
4900
4901static void io_poll_remove_double(struct io_kiocb *req)
4902	__must_hold(&req->ctx->completion_lock)
4903{
4904	struct io_poll_iocb *poll = io_poll_get_double(req);
4905
4906	lockdep_assert_held(&req->ctx->completion_lock);
4907
4908	if (poll && poll->head) {
4909		struct wait_queue_head *head = poll->head;
4910
4911		spin_lock(&head->lock);
4912		list_del_init(&poll->wait.entry);
4913		if (poll->wait.private)
4914			req_ref_put(req);
4915		poll->head = NULL;
4916		spin_unlock(&head->lock);
4917	}
4918}
4919
4920static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
4921	__must_hold(&req->ctx->completion_lock)
4922{
4923	struct io_ring_ctx *ctx = req->ctx;
4924	unsigned flags = IORING_CQE_F_MORE;
4925	int error;
4926
4927	if (READ_ONCE(req->poll.canceled)) {
4928		error = -ECANCELED;
4929		req->poll.events |= EPOLLONESHOT;
4930	} else {
4931		error = mangle_poll(mask);
4932	}
4933	if (req->poll.events & EPOLLONESHOT)
4934		flags = 0;
4935	if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
4936		req->poll.done = true;
4937		flags = 0;
4938	}
4939	if (flags & IORING_CQE_F_MORE)
4940		ctx->cq_extra++;
4941
4942	io_commit_cqring(ctx);
4943	return !(flags & IORING_CQE_F_MORE);
4944}
4945
4946static void io_poll_task_func(struct io_kiocb *req)
4947{
4948	struct io_ring_ctx *ctx = req->ctx;
4949	struct io_kiocb *nxt;
4950
4951	if (io_poll_rewait(req, &req->poll)) {
4952		spin_unlock_irq(&ctx->completion_lock);
4953	} else {
4954		bool done;
4955
4956		done = io_poll_complete(req, req->result);
4957		if (done) {
4958			io_poll_remove_double(req);
4959			hash_del(&req->hash_node);
4960		} else {
4961			req->result = 0;
4962			add_wait_queue(req->poll.head, &req->poll.wait);
4963		}
4964		spin_unlock_irq(&ctx->completion_lock);
4965		io_cqring_ev_posted(ctx);
4966
4967		if (done) {
4968			nxt = io_put_req_find_next(req);
4969			if (nxt)
4970				io_req_task_submit(nxt);
4971		}
4972	}
4973}
4974
4975static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4976			       int sync, void *key)
4977{
4978	struct io_kiocb *req = wait->private;
4979	struct io_poll_iocb *poll = io_poll_get_single(req);
4980	__poll_t mask = key_to_poll(key);
4981
4982	/* for instances that support it check for an event match first: */
4983	if (mask && !(mask & poll->events))
4984		return 0;
4985	if (!(poll->events & EPOLLONESHOT))
4986		return poll->wait.func(&poll->wait, mode, sync, key);
4987
4988	list_del_init(&wait->entry);
4989
4990	if (poll->head) {
4991		bool done;
4992
4993		spin_lock(&poll->head->lock);
4994		done = list_empty(&poll->wait.entry);
4995		if (!done)
4996			list_del_init(&poll->wait.entry);
4997		/* make sure double remove sees this as being gone */
4998		wait->private = NULL;
4999		spin_unlock(&poll->head->lock);
5000		if (!done) {
5001			/* use wait func handler, so it matches the rq type */
5002			poll->wait.func(&poll->wait, mode, sync, key);
5003		}
5004	}
5005	req_ref_put(req);
5006	return 1;
5007}
5008
5009static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5010			      wait_queue_func_t wake_func)
5011{
5012	poll->head = NULL;
5013	poll->done = false;
5014	poll->canceled = false;
5015#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5016	/* mask in events that we always want/need */
5017	poll->events = events | IO_POLL_UNMASK;
5018	INIT_LIST_HEAD(&poll->wait.entry);
5019	init_waitqueue_func_entry(&poll->wait, wake_func);
5020}
5021
5022static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5023			    struct wait_queue_head *head,
5024			    struct io_poll_iocb **poll_ptr)
5025{
5026	struct io_kiocb *req = pt->req;
5027
5028	/*
5029	 * The file being polled uses multiple waitqueues for poll handling
5030	 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5031	 * if this happens.
5032	 */
5033	if (unlikely(pt->nr_entries)) {
5034		struct io_poll_iocb *poll_one = poll;
5035
5036		/* already have a 2nd entry, fail a third attempt */
5037		if (*poll_ptr) {
5038			pt->error = -EINVAL;
5039			return;
5040		}
5041		/*
5042		 * Can't handle multishot for double wait for now, turn it
5043		 * into one-shot mode.
5044		 */
5045		if (!(poll_one->events & EPOLLONESHOT))
5046			poll_one->events |= EPOLLONESHOT;
5047		/* double add on the same waitqueue head, ignore */
5048		if (poll_one->head == head)
5049			return;
5050		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5051		if (!poll) {
5052			pt->error = -ENOMEM;
5053			return;
5054		}
5055		io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5056		req_ref_get(req);
5057		poll->wait.private = req;
5058		*poll_ptr = poll;
5059	}
5060
5061	pt->nr_entries++;
5062	poll->head = head;
5063
5064	if (poll->events & EPOLLEXCLUSIVE)
5065		add_wait_queue_exclusive(head, &poll->wait);
5066	else
5067		add_wait_queue(head, &poll->wait);
5068}
5069
5070static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5071			       struct poll_table_struct *p)
5072{
5073	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5074	struct async_poll *apoll = pt->req->apoll;
5075
5076	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5077}
5078
5079static void io_async_task_func(struct io_kiocb *req)
5080{
5081	struct async_poll *apoll = req->apoll;
5082	struct io_ring_ctx *ctx = req->ctx;
5083
5084	trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
5085
5086	if (io_poll_rewait(req, &apoll->poll)) {
5087		spin_unlock_irq(&ctx->completion_lock);
5088		return;
5089	}
5090
5091	hash_del(&req->hash_node);
5092	io_poll_remove_double(req);
5093	spin_unlock_irq(&ctx->completion_lock);
5094
5095	if (!READ_ONCE(apoll->poll.canceled))
5096		io_req_task_submit(req);
5097	else
5098		io_req_complete_failed(req, -ECANCELED);
5099}
5100
5101static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5102			void *key)
5103{
5104	struct io_kiocb *req = wait->private;
5105	struct io_poll_iocb *poll = &req->apoll->poll;
5106
5107	trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5108					key_to_poll(key));
5109
5110	return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5111}
5112
5113static void io_poll_req_insert(struct io_kiocb *req)
5114{
5115	struct io_ring_ctx *ctx = req->ctx;
5116	struct hlist_head *list;
5117
5118	list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5119	hlist_add_head(&req->hash_node, list);
5120}
5121
5122static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5123				      struct io_poll_iocb *poll,
5124				      struct io_poll_table *ipt, __poll_t mask,
5125				      wait_queue_func_t wake_func)
5126	__acquires(&ctx->completion_lock)
5127{
5128	struct io_ring_ctx *ctx = req->ctx;
5129	bool cancel = false;
5130
5131	INIT_HLIST_NODE(&req->hash_node);
5132	io_init_poll_iocb(poll, mask, wake_func);
5133	poll->file = req->file;
5134	poll->wait.private = req;
5135
5136	ipt->pt._key = mask;
5137	ipt->req = req;
5138	ipt->error = 0;
5139	ipt->nr_entries = 0;
5140
5141	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5142	if (unlikely(!ipt->nr_entries) && !ipt->error)
5143		ipt->error = -EINVAL;
5144
5145	spin_lock_irq(&ctx->completion_lock);
5146	if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
5147		io_poll_remove_double(req);
5148	if (likely(poll->head)) {
5149		spin_lock(&poll->head->lock);
5150		if (unlikely(list_empty(&poll->wait.entry))) {
5151			if (ipt->error)
5152				cancel = true;
5153			ipt->error = 0;
5154			mask = 0;
5155		}
5156		if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
5157			list_del_init(&poll->wait.entry);
5158		else if (cancel)
5159			WRITE_ONCE(poll->canceled, true);
5160		else if (!poll->done) /* actually waiting for an event */
5161			io_poll_req_insert(req);
5162		spin_unlock(&poll->head->lock);
5163	}
5164
5165	return mask;
5166}
5167
5168enum {
5169	IO_APOLL_OK,
5170	IO_APOLL_ABORTED,
5171	IO_APOLL_READY
5172};
5173
5174static int io_arm_poll_handler(struct io_kiocb *req)
5175{
5176	const struct io_op_def *def = &io_op_defs[req->opcode];
5177	struct io_ring_ctx *ctx = req->ctx;
5178	struct async_poll *apoll;
5179	struct io_poll_table ipt;
5180	__poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
5181	int rw;
5182
5183	if (!req->file || !file_can_poll(req->file))
5184		return IO_APOLL_ABORTED;
5185	if (req->flags & REQ_F_POLLED)
5186		return IO_APOLL_ABORTED;
5187	if (!def->pollin && !def->pollout)
5188		return IO_APOLL_ABORTED;
5189
5190	if (def->pollin) {
5191		rw = READ;
5192		mask |= POLLIN | POLLRDNORM;
5193
5194		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5195		if ((req->opcode == IORING_OP_RECVMSG) &&
5196		    (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5197			mask &= ~POLLIN;
5198	} else {
5199		rw = WRITE;
5200		mask |= POLLOUT | POLLWRNORM;
5201	}
5202
5203	/* if we can't nonblock try, then no point in arming a poll handler */
5204	if (!io_file_supports_nowait(req, rw))
5205		return IO_APOLL_ABORTED;
5206
5207	apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5208	if (unlikely(!apoll))
5209		return IO_APOLL_ABORTED;
5210	apoll->double_poll = NULL;
5211	req->apoll = apoll;
5212	req->flags |= REQ_F_POLLED;
5213	ipt.pt._qproc = io_async_queue_proc;
5214
5215	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5216					io_async_wake);
5217	if (ret || ipt.error) {
5218		spin_unlock_irq(&ctx->completion_lock);
5219		if (ret)
5220			return IO_APOLL_READY;
5221		return IO_APOLL_ABORTED;
5222	}
5223	spin_unlock_irq(&ctx->completion_lock);
5224	trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5225				mask, apoll->poll.events);
5226	return IO_APOLL_OK;
5227}
5228
5229static bool __io_poll_remove_one(struct io_kiocb *req,
5230				 struct io_poll_iocb *poll, bool do_cancel)
5231	__must_hold(&req->ctx->completion_lock)
5232{
5233	bool do_complete = false;
5234
5235	if (!poll->head)
5236		return false;
5237	spin_lock(&poll->head->lock);
5238	if (do_cancel)
5239		WRITE_ONCE(poll->canceled, true);
5240	if (!list_empty(&poll->wait.entry)) {
5241		list_del_init(&poll->wait.entry);
5242		do_complete = true;
5243	}
5244	spin_unlock(&poll->head->lock);
5245	hash_del(&req->hash_node);
5246	return do_complete;
5247}
5248
5249static bool io_poll_remove_one(struct io_kiocb *req)
5250	__must_hold(&req->ctx->completion_lock)
5251{
5252	int refs;
5253	bool do_complete;
5254
5255	io_poll_remove_double(req);
5256	do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
5257
5258	if (do_complete) {
5259		io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
5260		io_commit_cqring(req->ctx);
5261		req_set_fail(req);
5262
5263		/* non-poll requests have submit ref still */
5264		refs = 1 + (req->opcode != IORING_OP_POLL_ADD);
5265		io_put_req_deferred(req, refs);
5266	}
5267	return do_complete;
5268}
5269
5270/*
5271 * Returns true if we found and killed one or more poll requests
5272 */
5273static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5274			       bool cancel_all)
5275{
5276	struct hlist_node *tmp;
5277	struct io_kiocb *req;
5278	int posted = 0, i;
5279
5280	spin_lock_irq(&ctx->completion_lock);
5281	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5282		struct hlist_head *list;
5283
5284		list = &ctx->cancel_hash[i];
5285		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5286			if (io_match_task(req, tsk, cancel_all))
5287				posted += io_poll_remove_one(req);
5288		}
5289	}
5290	spin_unlock_irq(&ctx->completion_lock);
5291
5292	if (posted)
5293		io_cqring_ev_posted(ctx);
5294
5295	return posted != 0;
5296}
5297
5298static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5299				     bool poll_only)
5300	__must_hold(&ctx->completion_lock)
5301{
5302	struct hlist_head *list;
5303	struct io_kiocb *req;
5304
5305	list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5306	hlist_for_each_entry(req, list, hash_node) {
5307		if (sqe_addr != req->user_data)
5308			continue;
5309		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5310			continue;
5311		return req;
5312	}
5313	return NULL;
5314}
5315
5316static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5317			  bool poll_only)
5318	__must_hold(&ctx->completion_lock)
5319{
5320	struct io_kiocb *req;
5321
5322	req = io_poll_find(ctx, sqe_addr, poll_only);
5323	if (!req)
5324		return -ENOENT;
5325	if (io_poll_remove_one(req))
5326		return 0;
5327
5328	return -EALREADY;
5329}
5330
5331static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5332				     unsigned int flags)
5333{
5334	u32 events;
5335
5336	events = READ_ONCE(sqe->poll32_events);
5337#ifdef __BIG_ENDIAN
5338	events = swahw32(events);
5339#endif
5340	if (!(flags & IORING_POLL_ADD_MULTI))
5341		events |= EPOLLONESHOT;
5342	return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5343}
5344
5345static int io_poll_update_prep(struct io_kiocb *req,
5346			       const struct io_uring_sqe *sqe)
5347{
5348	struct io_poll_update *upd = &req->poll_update;
5349	u32 flags;
5350
5351	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5352		return -EINVAL;
5353	if (sqe->ioprio || sqe->buf_index)
5354		return -EINVAL;
5355	flags = READ_ONCE(sqe->len);
5356	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5357		      IORING_POLL_ADD_MULTI))
5358		return -EINVAL;
5359	/* meaningless without update */
5360	if (flags == IORING_POLL_ADD_MULTI)
5361		return -EINVAL;
5362
5363	upd->old_user_data = READ_ONCE(sqe->addr);
5364	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5365	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
5366
5367	upd->new_user_data = READ_ONCE(sqe->off);
5368	if (!upd->update_user_data && upd->new_user_data)
5369		return -EINVAL;
5370	if (upd->update_events)
5371		upd->events = io_poll_parse_events(sqe, flags);
5372	else if (sqe->poll32_events)
5373		return -EINVAL;
5374
5375	return 0;
5376}
5377
5378static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5379			void *key)
5380{
5381	struct io_kiocb *req = wait->private;
5382	struct io_poll_iocb *poll = &req->poll;
5383
5384	return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5385}
5386
5387static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5388			       struct poll_table_struct *p)
5389{
5390	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5391
5392	__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5393}
5394
5395static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5396{
5397	struct io_poll_iocb *poll = &req->poll;
5398	u32 flags;
5399
5400	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5401		return -EINVAL;
5402	if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
5403		return -EINVAL;
5404	flags = READ_ONCE(sqe->len);
5405	if (flags & ~IORING_POLL_ADD_MULTI)
5406		return -EINVAL;
5407
5408	poll->events = io_poll_parse_events(sqe, flags);
5409	return 0;
5410}
5411
5412static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5413{
5414	struct io_poll_iocb *poll = &req->poll;
5415	struct io_ring_ctx *ctx = req->ctx;
5416	struct io_poll_table ipt;
5417	__poll_t mask;
5418
5419	ipt.pt._qproc = io_poll_queue_proc;
5420
5421	mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5422					io_poll_wake);
5423
5424	if (mask) { /* no async, we'd stolen it */
5425		ipt.error = 0;
5426		io_poll_complete(req, mask);
5427	}
5428	spin_unlock_irq(&ctx->completion_lock);
5429
5430	if (mask) {
5431		io_cqring_ev_posted(ctx);
5432		if (poll->events & EPOLLONESHOT)
5433			io_put_req(req);
5434	}
5435	return ipt.error;
5436}
5437
5438static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
5439{
5440	struct io_ring_ctx *ctx = req->ctx;
5441	struct io_kiocb *preq;
5442	bool completing;
5443	int ret;
5444
5445	spin_lock_irq(&ctx->completion_lock);
5446	preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
5447	if (!preq) {
5448		ret = -ENOENT;
5449		goto err;
5450	}
5451
5452	if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5453		completing = true;
5454		ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5455		goto err;
5456	}
5457
5458	/*
5459	 * Don't allow racy completion with singleshot, as we cannot safely
5460	 * update those. For multishot, if we're racing with completion, just
5461	 * let completion re-add it.
5462	 */
5463	completing = !__io_poll_remove_one(preq, &preq->poll, false);
5464	if (completing && (preq->poll.events & EPOLLONESHOT)) {
5465		ret = -EALREADY;
5466		goto err;
5467	}
5468	/* we now have a detached poll request. reissue. */
5469	ret = 0;
5470err:
5471	if (ret < 0) {
5472		spin_unlock_irq(&ctx->completion_lock);
5473		req_set_fail(req);
5474		io_req_complete(req, ret);
5475		return 0;
5476	}
5477	/* only mask one event flags, keep behavior flags */
5478	if (req->poll_update.update_events) {
5479		preq->poll.events &= ~0xffff;
5480		preq->poll.events |= req->poll_update.events & 0xffff;
5481		preq->poll.events |= IO_POLL_UNMASK;
5482	}
5483	if (req->poll_update.update_user_data)
5484		preq->user_data = req->poll_update.new_user_data;
5485	spin_unlock_irq(&ctx->completion_lock);
5486
5487	/* complete update request, we're done with it */
5488	io_req_complete(req, ret);
5489
5490	if (!completing) {
5491		ret = io_poll_add(preq, issue_flags);
5492		if (ret < 0) {
5493			req_set_fail(preq);
5494			io_req_complete(preq, ret);
5495		}
5496	}
5497	return 0;
5498}
5499
5500static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5501{
5502	struct io_timeout_data *data = container_of(timer,
5503						struct io_timeout_data, timer);
5504	struct io_kiocb *req = data->req;
5505	struct io_ring_ctx *ctx = req->ctx;
5506	unsigned long flags;
5507
5508	spin_lock_irqsave(&ctx->completion_lock, flags);
5509	list_del_init(&req->timeout.list);
5510	atomic_set(&req->ctx->cq_timeouts,
5511		atomic_read(&req->ctx->cq_timeouts) + 1);
5512
5513	io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
5514	io_commit_cqring(ctx);
5515	spin_unlock_irqrestore(&ctx->completion_lock, flags);
5516
5517	io_cqring_ev_posted(ctx);
5518	req_set_fail(req);
5519	io_put_req(req);
5520	return HRTIMER_NORESTART;
5521}
5522
5523static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5524					   __u64 user_data)
5525	__must_hold(&ctx->completion_lock)
5526{
5527	struct io_timeout_data *io;
5528	struct io_kiocb *req;
5529	bool found = false;
5530
5531	list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5532		found = user_data == req->user_data;
5533		if (found)
5534			break;
5535	}
5536	if (!found)
5537		return ERR_PTR(-ENOENT);
5538
5539	io = req->async_data;
5540	if (hrtimer_try_to_cancel(&io->timer) == -1)
5541		return ERR_PTR(-EALREADY);
5542	list_del_init(&req->timeout.list);
5543	return req;
5544}
5545
5546static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5547	__must_hold(&ctx->completion_lock)
5548{
5549	struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5550
5551	if (IS_ERR(req))
5552		return PTR_ERR(req);
5553
5554	req_set_fail(req);
5555	io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
5556	io_put_req_deferred(req, 1);
5557	return 0;
5558}
5559
5560static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5561			     struct timespec64 *ts, enum hrtimer_mode mode)
5562	__must_hold(&ctx->completion_lock)
5563{
5564	struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5565	struct io_timeout_data *data;
5566
5567	if (IS_ERR(req))
5568		return PTR_ERR(req);
5569
5570	req->timeout.off = 0; /* noseq */
5571	data = req->async_data;
5572	list_add_tail(&req->timeout.list, &ctx->timeout_list);
5573	hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5574	data->timer.function = io_timeout_fn;
5575	hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5576	return 0;
5577}
5578
5579static int io_timeout_remove_prep(struct io_kiocb *req,
5580				  const struct io_uring_sqe *sqe)
5581{
5582	struct io_timeout_rem *tr = &req->timeout_rem;
5583
5584	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5585		return -EINVAL;
5586	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5587		return -EINVAL;
5588	if (sqe->ioprio || sqe->buf_index || sqe->len)
5589		return -EINVAL;
5590
5591	tr->addr = READ_ONCE(sqe->addr);
5592	tr->flags = READ_ONCE(sqe->timeout_flags);
5593	if (tr->flags & IORING_TIMEOUT_UPDATE) {
5594		if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5595			return -EINVAL;
5596		if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5597			return -EFAULT;
5598	} else if (tr->flags) {
5599		/* timeout removal doesn't support flags */
5600		return -EINVAL;
5601	}
5602
5603	return 0;
5604}
5605
5606static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5607{
5608	return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5609					    : HRTIMER_MODE_REL;
5610}
5611
5612/*
5613 * Remove or update an existing timeout command
5614 */
5615static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5616{
5617	struct io_timeout_rem *tr = &req->timeout_rem;
5618	struct io_ring_ctx *ctx = req->ctx;
5619	int ret;
5620
5621	spin_lock_irq(&ctx->completion_lock);
5622	if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
5623		ret = io_timeout_cancel(ctx, tr->addr);
5624	else
5625		ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5626					io_translate_timeout_mode(tr->flags));
5627
5628	io_cqring_fill_event(ctx, req->user_data, ret, 0);
5629	io_commit_cqring(ctx);
5630	spin_unlock_irq(&ctx->completion_lock);
5631	io_cqring_ev_posted(ctx);
5632	if (ret < 0)
5633		req_set_fail(req);
5634	io_put_req(req);
5635	return 0;
5636}
5637
5638static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5639			   bool is_timeout_link)
5640{
5641	struct io_timeout_data *data;
5642	unsigned flags;
5643	u32 off = READ_ONCE(sqe->off);
5644
5645	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5646		return -EINVAL;
5647	if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5648		return -EINVAL;
5649	if (off && is_timeout_link)
5650		return -EINVAL;
5651	flags = READ_ONCE(sqe->timeout_flags);
5652	if (flags & ~IORING_TIMEOUT_ABS)
5653		return -EINVAL;
5654
5655	req->timeout.off = off;
5656	if (unlikely(off && !req->ctx->off_timeout_used))
5657		req->ctx->off_timeout_used = true;
5658
5659	if (!req->async_data && io_alloc_async_data(req))
5660		return -ENOMEM;
5661
5662	data = req->async_data;
5663	data->req = req;
5664
5665	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5666		return -EFAULT;
5667
5668	data->mode = io_translate_timeout_mode(flags);
5669	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5670	if (is_timeout_link)
5671		io_req_track_inflight(req);
5672	return 0;
5673}
5674
5675static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5676{
5677	struct io_ring_ctx *ctx = req->ctx;
5678	struct io_timeout_data *data = req->async_data;
5679	struct list_head *entry;
5680	u32 tail, off = req->timeout.off;
5681
5682	spin_lock_irq(&ctx->completion_lock);
5683
5684	/*
5685	 * sqe->off holds how many events that need to occur for this
5686	 * timeout event to be satisfied. If it isn't set, then this is
5687	 * a pure timeout request, sequence isn't used.
5688	 */
5689	if (io_is_timeout_noseq(req)) {
5690		entry = ctx->timeout_list.prev;
5691		goto add;
5692	}
5693
5694	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5695	req->timeout.target_seq = tail + off;
5696
5697	/* Update the last seq here in case io_flush_timeouts() hasn't.
5698	 * This is safe because ->completion_lock is held, and submissions
5699	 * and completions are never mixed in the same ->completion_lock section.
5700	 */
5701	ctx->cq_last_tm_flush = tail;
5702
5703	/*
5704	 * Insertion sort, ensuring the first entry in the list is always
5705	 * the one we need first.
5706	 */
5707	list_for_each_prev(entry, &ctx->timeout_list) {
5708		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5709						  timeout.list);
5710
5711		if (io_is_timeout_noseq(nxt))
5712			continue;
5713		/* nxt.seq is behind @tail, otherwise would've been completed */
5714		if (off >= nxt->timeout.target_seq - tail)
5715			break;
5716	}
5717add:
5718	list_add(&req->timeout.list, entry);
5719	data->timer.function = io_timeout_fn;
5720	hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5721	spin_unlock_irq(&ctx->completion_lock);
5722	return 0;
5723}
5724
5725struct io_cancel_data {
5726	struct io_ring_ctx *ctx;
5727	u64 user_data;
5728};
5729
5730static bool io_cancel_cb(struct io_wq_work *work, void *data)
5731{
5732	struct