Lines Matching refs:ctx

34 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
41 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
42 lockdep_is_held(&ctx->uring_lock));
60 spin_lock(&ctx->completion_lock);
61 ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
62 spin_unlock(&ctx->completion_lock);
65 ctx->has_evfd = true;
66 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
72 int io_eventfd_unregister(struct io_ring_ctx *ctx)
76 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
77 lockdep_is_held(&ctx->uring_lock));
79 ctx->has_evfd = false;
80 rcu_assign_pointer(ctx->io_ev_fd, NULL);
89 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
129 int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
133 creds = xa_erase(&ctx->personalities, id);
143 static int io_register_personality(struct io_ring_ctx *ctx)
151 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
152 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
160 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
168 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
172 if (ctx->restrictions.registered)
197 ctx->restrictions.register_op);
205 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
208 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
211 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
222 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
224 ctx->restrictions.registered = true;
230 static int io_register_enable_rings(struct io_ring_ctx *ctx)
232 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
235 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
236 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
241 if (wq_has_sleeper(&ctx->poll_wq))
242 io_activate_pollwq(ctx);
245 if (ctx->restrictions.registered)
246 ctx->restricted = 1;
248 ctx->flags &= ~IORING_SETUP_R_DISABLED;
249 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
250 wake_up(&ctx->sq_data->wait);
254 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
259 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
262 mutex_unlock(&ctx->uring_lock);
263 ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
264 mutex_lock(&ctx->uring_lock);
270 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
297 ret = __io_register_iowq_aff(ctx, new_mask);
302 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
304 return __io_register_iowq_aff(ctx, NULL);
307 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
309 __must_hold(&ctx->uring_lock)
323 if (ctx->flags & IORING_SETUP_SQPOLL) {
324 sqd = ctx->sq_data;
327 * Observe the correct sqd->lock -> ctx->uring_lock
329 * a ref to the ctx.
332 mutex_unlock(&ctx->uring_lock);
334 mutex_lock(&ctx->uring_lock);
342 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
346 ctx->iowq_limits[i] = new_count[i];
347 ctx->iowq_limits_set = true;
370 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
377 new_count[i] = ctx->iowq_limits[i];
390 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
392 __releases(ctx->uring_lock)
393 __acquires(ctx->uring_lock)
401 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
404 if (ctx->submitter_task && ctx->submitter_task != current)
407 if (ctx->restricted) {
409 if (!test_bit(opcode, ctx->restrictions.register_op))
418 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
424 ret = io_sqe_buffers_unregister(ctx);
430 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
436 ret = io_sqe_files_unregister(ctx);
439 ret = io_register_files_update(ctx, arg, nr_args);
445 ret = io_eventfd_register(ctx, arg, 0);
451 ret = io_eventfd_register(ctx, arg, 1);
457 ret = io_eventfd_unregister(ctx);
463 ret = io_probe(ctx, arg, nr_args);
469 ret = io_register_personality(ctx);
475 ret = io_unregister_personality(ctx, nr_args);
481 ret = io_register_enable_rings(ctx);
484 ret = io_register_restrictions(ctx, arg, nr_args);
487 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
490 ret = io_register_rsrc_update(ctx, arg, nr_args,
494 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
497 ret = io_register_rsrc_update(ctx, arg, nr_args,
504 ret = io_register_iowq_aff(ctx, arg, nr_args);
510 ret = io_unregister_iowq_aff(ctx);
516 ret = io_register_iowq_max_workers(ctx, arg);
519 ret = io_ringfd_register(ctx, arg, nr_args);
522 ret = io_ringfd_unregister(ctx, arg, nr_args);
528 ret = io_register_pbuf_ring(ctx, arg);
534 ret = io_unregister_pbuf_ring(ctx, arg);
540 ret = io_sync_cancel(ctx, arg);
546 ret = io_register_file_alloc_range(ctx, arg);
552 ret = io_register_pbuf_status(ctx, arg);
558 ret = io_register_napi(ctx, arg);
564 ret = io_unregister_napi(ctx, arg);
577 struct io_ring_ctx *ctx;
610 ctx = file->private_data;
612 mutex_lock(&ctx->uring_lock);
613 ret = __io_uring_register(ctx, opcode, arg, nr_args);
614 mutex_unlock(&ctx->uring_lock);
615 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);