Lines Matching defs:sqd

29 void io_sq_thread_unpark(struct io_sq_data *sqd)
30 __releases(&sqd->lock)
32 WARN_ON_ONCE(sqd->thread == current);
38 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
39 if (atomic_dec_return(&sqd->park_pending))
40 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
41 mutex_unlock(&sqd->lock);
44 void io_sq_thread_park(struct io_sq_data *sqd)
45 __acquires(&sqd->lock)
47 WARN_ON_ONCE(sqd->thread == current);
49 atomic_inc(&sqd->park_pending);
50 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
51 mutex_lock(&sqd->lock);
52 if (sqd->thread)
53 wake_up_process(sqd->thread);
56 void io_sq_thread_stop(struct io_sq_data *sqd)
58 WARN_ON_ONCE(sqd->thread == current);
59 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
61 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
62 mutex_lock(&sqd->lock);
63 if (sqd->thread)
64 wake_up_process(sqd->thread);
65 mutex_unlock(&sqd->lock);
66 wait_for_completion(&sqd->exited);
69 void io_put_sq_data(struct io_sq_data *sqd)
71 if (refcount_dec_and_test(&sqd->refs)) {
72 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
74 io_sq_thread_stop(sqd);
75 kfree(sqd);
79 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
84 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
86 sqd->sq_thread_idle = sq_thread_idle;
91 struct io_sq_data *sqd = ctx->sq_data;
93 if (sqd) {
94 io_sq_thread_park(sqd);
96 io_sqd_update_thread_idle(sqd);
97 io_sq_thread_unpark(sqd);
99 io_put_sq_data(sqd);
107 struct io_sq_data *sqd;
119 sqd = ctx_attach->sq_data;
120 if (!sqd) {
124 if (sqd->task_tgid != current->tgid) {
129 refcount_inc(&sqd->refs);
131 return sqd;
137 struct io_sq_data *sqd;
141 sqd = io_attach_sq_data(p);
142 if (!IS_ERR(sqd)) {
144 return sqd;
146 /* fall through for EPERM case, setup new sqd/task */
147 if (PTR_ERR(sqd) != -EPERM)
148 return sqd;
151 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
152 if (!sqd)
155 atomic_set(&sqd->park_pending, 0);
156 refcount_set(&sqd->refs, 1);
157 INIT_LIST_HEAD(&sqd->ctx_list);
158 mutex_init(&sqd->lock);
159 init_waitqueue_head(&sqd->wait);
160 init_completion(&sqd->exited);
161 return sqd;
164 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
166 return READ_ONCE(sqd->state);
210 static bool io_sqd_handle_event(struct io_sq_data *sqd)
215 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
217 mutex_unlock(&sqd->lock);
221 mutex_lock(&sqd->lock);
222 sqd->sq_cpu = raw_smp_processor_id();
224 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
256 static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
264 sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
270 struct io_sq_data *sqd = data;
281 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
285 sqd->task_pid = current->pid;
287 if (sqd->sq_cpu != -1) {
288 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
291 sqd->sq_cpu = raw_smp_processor_id();
294 mutex_lock(&sqd->lock);
298 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
299 if (io_sqd_handle_event(sqd))
301 timeout = jiffies + sqd->sq_thread_idle;
304 cap_entries = !list_is_singular(&sqd->ctx_list);
306 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
317 io_sq_update_worktime(sqd, &start);
318 timeout = jiffies + sqd->sq_thread_idle;
321 mutex_unlock(&sqd->lock);
323 mutex_lock(&sqd->lock);
324 sqd->sq_cpu = raw_smp_processor_id();
329 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
330 if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
333 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
355 mutex_unlock(&sqd->lock);
357 mutex_lock(&sqd->lock);
358 sqd->sq_cpu = raw_smp_processor_id();
360 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
365 finish_wait(&sqd->wait, &wait);
366 timeout = jiffies + sqd->sq_thread_idle;
372 io_uring_cancel_generic(true, sqd);
373 sqd->thread = NULL;
374 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
377 mutex_unlock(&sqd->lock);
379 complete(&sqd->exited);
421 struct io_sq_data *sqd;
428 sqd = io_get_sq_data(p, &attached);
429 if (IS_ERR(sqd)) {
430 ret = PTR_ERR(sqd);
435 ctx->sq_data = sqd;
440 io_sq_thread_park(sqd);
441 list_add(&ctx->sqd_list, &sqd->ctx_list);
442 io_sqd_update_thread_idle(sqd);
444 ret = (attached && !sqd->thread) ? -ENXIO : 0;
445 io_sq_thread_unpark(sqd);
458 sqd->sq_cpu = cpu;
460 sqd->sq_cpu = -1;
463 sqd->task_pid = current->pid;
464 sqd->task_tgid = current->tgid;
465 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
471 sqd->thread = tsk;
493 struct io_sq_data *sqd = ctx->sq_data;
496 if (sqd) {
497 io_sq_thread_park(sqd);
499 if (sqd->thread)
500 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
501 io_sq_thread_unpark(sqd);