Searched refs:uring_lock (Results 1 - 19 of 19) sorted by relevance
/linux-master/io_uring/ |
H A D | tctx.c | 22 mutex_lock(&ctx->uring_lock); 27 mutex_unlock(&ctx->uring_lock); 34 mutex_unlock(&ctx->uring_lock); 129 mutex_lock(&ctx->uring_lock); 131 mutex_unlock(&ctx->uring_lock); 169 mutex_lock(&node->ctx->uring_lock); 171 mutex_unlock(&node->ctx->uring_lock); 191 * uring_lock) to avoid race with io_uring_try_cancel_iowq(). 264 mutex_unlock(&ctx->uring_lock); 266 mutex_lock(&ctx->uring_lock); [all...] |
H A D | io_uring.h | 121 lockdep_assert_held(&ctx->uring_lock); 232 lockdep_assert_held(&ctx->uring_lock); 234 mutex_unlock(&ctx->uring_lock); 241 * "Normal" inline submissions always hold the uring_lock, since we 247 mutex_lock(&ctx->uring_lock); 248 lockdep_assert_held(&ctx->uring_lock); 342 lockdep_assert_held(&ctx->uring_lock); 347 * Protected by ->uring_lock and can only be used either with 351 __must_hold(&req->ctx->uring_lock) 355 lockdep_assert_held(&req->ctx->uring_lock); [all...] |
H A D | register.c | 42 lockdep_is_held(&ctx->uring_lock)); 77 lockdep_is_held(&ctx->uring_lock)); 262 mutex_unlock(&ctx->uring_lock); 264 mutex_lock(&ctx->uring_lock); 309 __must_hold(&ctx->uring_lock) 327 * Observe the correct sqd->lock -> ctx->uring_lock 328 * ordering. Fine to drop uring_lock here, we hold 332 mutex_unlock(&ctx->uring_lock); 334 mutex_lock(&ctx->uring_lock); 391 __releases(ctx->uring_lock) [all...] |
H A D | notif.h | 36 __must_hold(¬if->ctx->uring_lock)
|
H A D | msg_ring.c | 38 mutex_unlock(&octx->uring_lock); 50 if (!mutex_trylock(&octx->uring_lock)) 54 mutex_lock(&octx->uring_lock); 109 * holding the uring_lock for posting completions. Other ring 114 mutex_lock(&target_ctx->uring_lock); 118 mutex_unlock(&target_ctx->uring_lock);
|
H A D | io_uring.c | 252 mutex_lock(&ctx->uring_lock); 256 mutex_unlock(&ctx->uring_lock); 322 mutex_init(&ctx->uring_lock); 675 lockdep_assert_held(&ctx->uring_lock); 716 mutex_lock(&ctx->uring_lock); 718 mutex_unlock(&ctx->uring_lock); 904 lockdep_assert_held(&ctx->uring_lock); 926 * the submitter task context, IOPOLL protects with uring_lock. 949 __must_hold(&ctx->uring_lock) 953 lockdep_assert_held(&req->ctx->uring_lock); [all...] |
H A D | rsrc.h | 80 lockdep_assert_held(&ctx->uring_lock); 95 lockdep_assert_held(&ctx->uring_lock);
|
H A D | cancel.c | 250 /* fixed must be grabbed every time since we drop the uring_lock */ 265 __must_hold(&ctx->uring_lock) 327 mutex_unlock(&ctx->uring_lock); 339 mutex_lock(&ctx->uring_lock); 343 mutex_lock(&ctx->uring_lock);
|
H A D | notif.c | 108 __must_hold(&ctx->uring_lock)
|
H A D | filetable.c | 65 __must_hold(&req->ctx->uring_lock)
|
H A D | waitid.c | 126 lockdep_assert_held(&req->ctx->uring_lock); 194 lockdep_assert_held(&ctx->uring_lock);
|
H A D | fdinfo.c | 79 * since we get cached_sq_head and cached_cq_tail without uring_lock 145 has_lock = mutex_trylock(&ctx->uring_lock); 213 mutex_unlock(&ctx->uring_lock);
|
H A D | kbuf.c | 37 lockdep_assert_held(&ctx->uring_lock); 48 * always under the ->uring_lock, but the RCU lookup from mmap does. 79 * ctx->uring_lock. If we already hold this lock, add back to this 94 lockdep_assert_held(&req->ctx->uring_lock); 314 lockdep_assert_held(&ctx->uring_lock); 360 lockdep_assert_held(&ctx->uring_lock); 496 * Completions that don't happen inline (eg not under uring_lock) will 674 lockdep_assert_held(&ctx->uring_lock); 733 lockdep_assert_held(&ctx->uring_lock); 787 * the uring_lock [all...] |
H A D | rsrc.c | 179 __must_hold(&node->ctx->uring_lock) 223 /* As We may drop ->uring_lock, other task may have started quiesce */ 248 mutex_unlock(&ctx->uring_lock); 252 mutex_lock(&ctx->uring_lock); 260 mutex_lock(&ctx->uring_lock); 478 lockdep_assert_held(&ctx->uring_lock); 688 * Quiesce may unlock ->uring_lock, and while it's not held 794 * Quiesce may unlock ->uring_lock, and while it's not held
|
H A D | sqpoll.c | 185 mutex_lock(&ctx->uring_lock); 196 mutex_unlock(&ctx->uring_lock);
|
H A D | futex.c | 151 lockdep_assert_held(&ctx->uring_lock);
|
H A D | uring_cmd.c | 56 lockdep_assert_held(&ctx->uring_lock);
|
H A D | poll.c | 148 lockdep_assert_held(&req->ctx->uring_lock); 159 * ->cancel_table_locked is protected by ->uring_lock in 607 /* io-wq doesn't hold uring_lock */ 794 __must_hold(&ctx->uring_lock) 975 * If sqpoll or single issuer, there is no contention for ->uring_lock
|
/linux-master/include/linux/ |
H A D | io_uring_types.h | 200 /* inline/task_work completion list, under ->uring_lock */ 257 struct mutex uring_lock; member in struct:io_ring_ctx::__anon2236 277 * uring_lock, and updated through io_uring_register(2) 283 * ->iopoll_list is protected by the ctx->uring_lock for 380 /* protected by ->uring_lock */ 542 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
|
Completed in 706 milliseconds