Searched refs:io_kiocb (Results 1 - 25 of 55) sorted by relevance

123

/linux-master/io_uring/
H A Dxattr.h3 void io_xattr_cleanup(struct io_kiocb *req);
5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
H A Dfs.h3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
5 void io_renameat_cleanup(struct io_kiocb *req);
7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
9 void io_unlinkat_cleanup(struct io_kiocb *req);
11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
13 void io_mkdirat_cleanup(struct io_kiocb *req);
15 int io_symlinkat_prep(struct io_kiocb *re
[all...]
H A Dsync.h3 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags);
6 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags);
9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
10 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Depoll.h4 int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
5 int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags);
H A Dnop.h3 int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_nop(struct io_kiocb *req, unsigned int issue_flags);
H A Dtruncate.h3 int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags);
H A Dadvise.h3 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_madvise(struct io_kiocb *req, unsigned int issue_flags);
6 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags);
H A Dsplice.h3 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_tee(struct io_kiocb *req, unsigned int issue_flags);
6 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_splice(struct io_kiocb *req, unsigned int issue_flags);
H A Dstatx.h3 int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_statx(struct io_kiocb *req, unsigned int issue_flags);
5 void io_statx_cleanup(struct io_kiocb *req);
H A Dmsg_ring.h3 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
5 void io_msg_ring_cleanup(struct io_kiocb *req);
H A During_cmd.h3 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
4 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
5 int io_uring_cmd_prep_async(struct io_kiocb *req);
H A Dtimeout.h4 struct io_kiocb *req;
11 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
12 struct io_kiocb *link);
14 static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
16 struct io_kiocb *link = req->link;
29 void io_queue_linked_timeout(struct io_kiocb *req);
30 void io_disarm_next(struct io_kiocb *req);
32 int io_timeout_prep(struct io_kiocb *re
[all...]
H A Drw.h18 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
19 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
20 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
21 int io_read(struct io_kiocb *req, unsigned int issue_flags);
22 int io_readv_prep_async(struct io_kiocb *req);
23 int io_write(struct io_kiocb *req, unsigned int issue_flags);
24 int io_writev_prep_async(struct io_kiocb *req);
25 void io_readv_writev_cleanup(struct io_kiocb *req);
26 void io_rw_fail(struct io_kiocb *req);
27 void io_req_rw_complete(struct io_kiocb *re
[all...]
H A Dnet.h34 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
35 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
37 int io_sendmsg_prep_async(struct io_kiocb *req);
38 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
39 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
40 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
42 int io_send(struct io_kiocb *req, unsigned int issue_flags);
43 int io_send_prep_async(struct io_kiocb *req);
45 int io_recvmsg_prep_async(struct io_kiocb *req);
46 int io_recvmsg_prep(struct io_kiocb *re
[all...]
H A Dopenclose.h6 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
8 void io_open_cleanup(struct io_kiocb *req);
10 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
13 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
16 int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
17 int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags);
H A Dopdef.h35 int (*issue)(struct io_kiocb *, unsigned int);
36 int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
45 int (*prep_async)(struct io_kiocb *);
46 void (*cleanup)(struct io_kiocb *);
47 void (*fail)(struct io_kiocb *);
H A Dwaitid.h6 struct io_kiocb *req;
10 int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_waitid(struct io_kiocb *req, unsigned int issue_flags);
H A Dpoll.h31 static inline void io_poll_multishot_retry(struct io_kiocb *req)
36 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
37 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
39 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
40 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
45 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
51 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);
H A Dnop.c13 int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
21 int io_nop(struct io_kiocb *req, unsigned int issue_flags)
H A Drefs.h14 static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
20 static inline bool req_ref_put_and_test(struct io_kiocb *req)
29 static inline void req_ref_get(struct io_kiocb *req)
36 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
44 static inline void io_req_set_refcount(struct io_kiocb *req)
H A Dfutex.h5 int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
6 int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags);
8 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags);
9 int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
H A Dcancel.h18 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
19 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
26 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
28 static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
H A Dkbuf.h44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
60 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
62 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
68 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
85 static inline bool io_do_buffer_select(struct io_kiocb *req)
92 static inline bool io_kbuf_recycle(struct io_kiocb *re
[all...]
H A Dnotif.h22 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
23 void io_notif_set_extended(struct io_kiocb *notif);
25 static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
30 static inline void io_notif_flush(struct io_kiocb *notif)
40 static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
H A Dio_uring.h65 void io_req_cqe_overflow(struct io_kiocb *req);
67 void io_req_defer_failed(struct io_kiocb *req, s32 res);
68 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
70 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
75 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
76 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
79 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
80 bool io_alloc_async_data(struct io_kiocb *req);
81 void io_req_task_queue(struct io_kiocb *req);
82 void io_queue_iowq(struct io_kiocb *re
[all...]

Completed in 212 milliseconds

123