Lines Matching refs:tctx

86 #include "tctx.h"
398 struct io_uring_task *tctx = req->task->io_uring;
400 atomic_dec(&tctx->inflight_tracked);
504 struct io_uring_task *tctx = req->task->io_uring;
506 BUG_ON(!tctx);
507 BUG_ON(!tctx->io_wq);
523 io_wq_enqueue(tctx->io_wq, &req->work);
736 struct io_uring_task *tctx = task->io_uring;
738 percpu_counter_sub(&tctx->inflight, 1);
739 if (unlikely(atomic_read(&tctx->in_cancel)))
740 wake_up(&tctx->wait);
759 void io_task_refs_refill(struct io_uring_task *tctx)
761 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
763 percpu_counter_add(&tctx->inflight, refill);
765 tctx->cached_refs += refill;
770 struct io_uring_task *tctx = task->io_uring;
771 unsigned int refs = tctx->cached_refs;
774 tctx->cached_refs = 0;
775 percpu_counter_sub(&tctx->inflight, refs);
1228 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1230 struct llist_node *node = llist_del_all(&tctx->task_list);
1256 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
1263 io_fallback_tw(tctx, true);
1267 node = llist_del_all(&tctx->task_list);
1274 if (unlikely(atomic_read(&tctx->in_cancel)))
1277 trace_io_uring_task_work_run(tctx, *count);
1283 struct io_uring_task *tctx;
1287 tctx = container_of(cb, struct io_uring_task, task_work);
1288 ret = tctx_task_work_run(tctx, UINT_MAX, &count);
1363 struct io_uring_task *tctx = req->task->io_uring;
1367 if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1382 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1385 io_fallback_tw(tctx, false);
1961 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
2543 struct io_uring_task *tctx = current->io_uring;
2545 if (!tctx)
2547 return percpu_counter_read_positive(&tctx->inflight);
3022 struct io_uring_task *tctx = current->io_uring;
3029 * tctx can be NULL if the queueing of this task_work raced with
3032 if (tctx && !atomic_read(&tctx->in_cancel))
3228 struct io_uring_task *tctx = node->task->io_uring;
3234 if (!tctx || !tctx->io_wq)
3236 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3280 struct io_uring_task *tctx = task ? task->io_uring : NULL;
3296 } else if (tctx && tctx->io_wq) {
3301 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3332 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3335 return atomic_read(&tctx->inflight_tracked);
3336 return percpu_counter_sum(&tctx->inflight);
3345 struct io_uring_task *tctx = current->io_uring;
3356 if (tctx->io_wq)
3357 io_wq_exit_start(tctx->io_wq);
3359 atomic_inc(&tctx->in_cancel);
3365 inflight = tctx_inflight(tctx, !cancel_all);
3370 xa_for_each(&tctx->xa, index, node) {
3389 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3392 xa_for_each(&tctx->xa, index, node) {
3404 if (inflight == tctx_inflight(tctx, !cancel_all))
3407 finish_wait(&tctx->wait, &wait);
3410 io_uring_clean_tctx(tctx);
3416 atomic_dec(&tctx->in_cancel);
3417 /* for exec all current's requests should be gone, kill tctx */
3621 struct io_uring_task *tctx = current->io_uring;
3623 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3626 file = tctx->registered_rings[fd];
3838 struct io_uring_task *tctx;
4017 tctx = current->io_uring;
4024 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);