Lines Matching defs:flags

26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
192 if (req->flags & REQ_F_INFLIGHT)
212 if (head->flags & REQ_F_LINK_TIMEOUT) {
301 ctx->flags = p->flags;
371 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
382 if (req->flags & REQ_F_BUFFER_SELECTED) {
388 if (req->flags & REQ_F_NEED_CLEANUP) {
394 if ((req->flags & REQ_F_POLLED) && req->apoll) {
399 if (req->flags & REQ_F_INFLIGHT) {
404 if (req->flags & REQ_F_CREDS)
406 if (req->flags & REQ_F_ASYNC_DATA) {
410 req->flags &= ~IO_REQ_CLEAN_FLAGS;
415 if (!(req->flags & REQ_F_INFLIGHT)) {
416 req->flags |= REQ_F_INFLIGHT;
426 req->flags &= ~REQ_F_ARM_LTIMEOUT;
427 req->flags |= REQ_F_LINK_TIMEOUT;
437 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
449 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
458 if (!(req->flags & REQ_F_CREDS)) {
459 req->flags |= REQ_F_CREDS;
464 req->work.flags = 0;
465 if (req->flags & REQ_F_FORCE_ASYNC)
466 req->work.flags |= IO_WQ_WORK_CONCURRENT;
468 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
469 req->flags |= io_file_get_flags(req->file);
471 if (req->file && (req->flags & REQ_F_ISREG)) {
478 if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
482 req->work.flags |= IO_WQ_WORK_UNBOUND;
490 if (req->flags & REQ_F_LINK_TIMEOUT) {
522 req->work.flags |= IO_WQ_WORK_CANCEL;
681 if (ctx->flags & IORING_SETUP_CQE32)
773 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
799 ocqe->cqe.flags = cflags;
811 req->cqe.res, req->cqe.flags,
843 if (ctx->flags & IORING_SETUP_CQE32) {
870 WRITE_ONCE(cqe->flags, cflags);
872 if (ctx->flags & IORING_SETUP_CQE32) {
928 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
935 if (!(req->flags & REQ_F_CQE_SKIP)) {
1015 req->flags &= ~REQ_F_REFCOUNT;
1017 req->flags |= REQ_F_CQE_SKIP;
1041 if (unlikely(req->flags & IO_DISARM_MASK))
1052 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1147 if (unlikely(current->flags & PF_EXITING)) {
1178 static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1191 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1192 flags &= ~IOU_F_TWQ_LAZY_WAKE;
1213 if (!(flags & IOU_F_TWQ_LAZY_WAKE))
1230 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1255 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1259 if (ctx->flags & IORING_SETUP_SQPOLL) {
1273 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1275 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1277 io_req_local_work_add(req, flags);
1305 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1319 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1380 if (unlikely(req->task->flags & PF_EXITING))
1382 else if (req->flags & REQ_F_FORCE_ASYNC)
1417 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1418 if (req->flags & REQ_F_REFCOUNT) {
1423 if ((req->flags & REQ_F_POLLED) && req->apoll) {
1430 req->flags &= ~REQ_F_POLLED;
1432 if (req->flags & IO_REQ_LINK_FLAGS)
1434 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1457 if (!(req->flags & REQ_F_CQE_SKIP) &&
1490 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1638 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1664 req->flags |= REQ_F_ASYNC_DATA;
1728 if (req->flags & REQ_F_FIXED_FILE)
1745 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1773 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1792 if (req->flags & IO_REQ_LINK_FLAGS)
1808 if (!(req->flags & REQ_F_REFCOUNT))
1816 if (work->flags & IO_WQ_WORK_CANCEL) {
1823 work->flags |= IO_WQ_WORK_CANCEL;
1835 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1846 req->flags &= ~REQ_F_APOLL_MULTISHOT;
1850 if (req->flags & REQ_F_FORCE_ASYNC) {
1868 if (req->flags & REQ_F_NOWAIT)
1877 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1912 req->flags |= io_slot_flags(slot);
1936 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1978 if (unlikely(req->flags & REQ_F_FAIL)) {
1983 req->flags &= ~REQ_F_HARDLINK;
1984 req->flags |= REQ_F_LINK;
1995 * Check SQE restrictions (opcode and flags).
2031 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2055 sqe_flags = READ_ONCE(sqe->flags);
2056 req->flags = (io_req_flags_t) sqe_flags;
2089 req->flags |= REQ_F_FORCE_ASYNC;
2094 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2100 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2132 req->flags |= REQ_F_CREDS;
2154 if (head && !(head->flags & REQ_F_FAIL))
2157 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2200 if (req->flags & IO_REQ_LINK_FLAGS)
2205 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2208 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2210 if (req->flags & IO_REQ_LINK_FLAGS) {
2277 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
2300 if (ctx->flags & IORING_SETUP_SQE128)
2336 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2487 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2538 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
2561 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
2588 if (ctx->flags & IORING_SETUP_CQE32) {
2599 if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
2815 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2882 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2994 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
3016 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3025 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3138 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3140 if (flags & IORING_ENTER_EXT_ARG) {
3151 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3161 if (!(flags & IORING_ENTER_EXT_ARG)) {
3184 u32, min_complete, u32, flags, const void __user *, argp,
3191 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3200 if (flags & IORING_ENTER_REGISTERED_RING) {
3220 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3229 if (ctx->flags & IORING_SETUP_SQPOLL) {
3234 if (flags & IORING_ENTER_SQ_WAKEUP)
3236 if (flags & IORING_ENTER_SQ_WAIT)
3251 if (flags & IORING_ENTER_GETEVENTS) {
3258 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3264 if (flags & IORING_ENTER_GETEVENTS) {
3276 ret2 = io_validate_ext_arg(flags, argp, argsz);
3287 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3310 if (!(flags & IORING_ENTER_REGISTERED_RING))
3348 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3357 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3364 if (p->flags & IORING_SETUP_SQE128)
3373 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3421 if (!(p->flags & IORING_SETUP_CLAMP))
3426 if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3427 && !(p->flags & IORING_SETUP_NO_MMAP))
3439 if (p->flags & IORING_SETUP_CQSIZE) {
3448 if (!(p->flags & IORING_SETUP_CLAMP))
3463 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3464 !(ctx->flags & IORING_SETUP_IOPOLL) &&
3465 !(ctx->flags & IORING_SETUP_SQPOLL))
3468 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
3484 if (ctx->flags & IORING_SETUP_IOPOLL &&
3485 !(ctx->flags & IORING_SETUP_SQPOLL))
3497 if (ctx->flags & IORING_SETUP_SQPOLL) {
3498 /* IPI related flags don't make sense with SQPOLL */
3499 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3504 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3507 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3508 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3518 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3519 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3548 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3550 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3553 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3562 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3564 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3581 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3582 && !(ctx->flags & IORING_SETUP_R_DISABLED))
3600 if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3607 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3634 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3688 BUILD_BUG_SQE_ELEM(1, __u8, flags);
3745 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));