Lines Matching defs:worker

775 void __kthread_init_worker(struct kthread_worker *worker,
779 memset(worker, 0, sizeof(struct kthread_worker));
780 raw_spin_lock_init(&worker->lock);
781 lockdep_set_class_and_name(&worker->lock, key, name);
782 INIT_LIST_HEAD(&worker->work_list);
783 INIT_LIST_HEAD(&worker->delayed_work_list);
791 * This function implements the main cycle of kthread worker. It processes
799 * Also the works must not be handled by more than one worker at the same time,
804 struct kthread_worker *worker = worker_ptr;
809 * worker users are created using kthread_create_worker*() functions.
811 WARN_ON(worker->task && worker->task != current);
812 worker->task = current;
814 if (worker->flags & KTW_FREEZABLE)
822 raw_spin_lock_irq(&worker->lock);
823 worker->task = NULL;
824 raw_spin_unlock_irq(&worker->lock);
829 raw_spin_lock_irq(&worker->lock);
830 if (!list_empty(&worker->work_list)) {
831 work = list_first_entry(&worker->work_list,
835 worker->current_work = work;
836 raw_spin_unlock_irq(&worker->lock);
861 struct kthread_worker *worker;
865 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
866 if (!worker)
869 kthread_init_worker(worker);
874 task = __kthread_create_on_node(kthread_worker_fn, worker,
882 worker->flags = flags;
883 worker->task = task;
885 return worker;
888 kfree(worker);
893 * kthread_create_worker - create a kthread worker
894 * @flags: flags modifying the default behavior of the worker
895 * @namefmt: printf-style name for the kthread worker (task).
897 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
904 struct kthread_worker *worker;
908 worker = __kthread_create_worker(-1, flags, namefmt, args);
911 return worker;
916 * kthread_create_worker_on_cpu - create a kthread worker and bind it
919 * @flags: flags modifying the default behavior of the worker
920 * @namefmt: printf-style name for the kthread worker (task).
922 * Use a valid CPU number if you want to bind the kthread worker
925 * A good practice is to add the cpu number also into the worker name.
929 * The kthread worker API is simple and generic. It just provides a way
939 * - The worker might not exist when the CPU was off when the user
943 * destroy/create the worker when the CPU goes down/up.
946 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
954 struct kthread_worker *worker;
958 worker = __kthread_create_worker(cpu, flags, namefmt, args);
961 return worker;
967 * It happens when it is already pending in a worker list
970 static inline bool queuing_blocked(struct kthread_worker *worker,
973 lockdep_assert_held(&worker->lock);
978 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
981 lockdep_assert_held(&worker->lock);
983 /* Do not use a work with >1 worker, see kthread_queue_work() */
984 WARN_ON_ONCE(work->worker && work->worker != worker);
987 /* insert @work before @pos in @worker */
988 static void kthread_insert_work(struct kthread_worker *worker,
992 kthread_insert_work_sanity_check(worker, work);
994 trace_sched_kthread_work_queue_work(worker, work);
997 work->worker = worker;
998 if (!worker->current_work && likely(worker->task))
999 wake_up_process(worker->task);
1004 * @worker: target kthread_worker
1011 * Reinitialize the work if it needs to be used by another worker.
1012 * For example, when the worker was stopped and started again.
1014 bool kthread_queue_work(struct kthread_worker *worker,
1020 raw_spin_lock_irqsave(&worker->lock, flags);
1021 if (!queuing_blocked(worker, work)) {
1022 kthread_insert_work(worker, work, &worker->work_list);
1025 raw_spin_unlock_irqrestore(&worker->lock, flags);
1042 struct kthread_worker *worker = work->worker;
1049 if (WARN_ON_ONCE(!worker))
1052 raw_spin_lock_irqsave(&worker->lock, flags);
1053 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1054 WARN_ON_ONCE(work->worker != worker);
1056 /* Move the work from worker->delayed_work_list. */
1060 kthread_insert_work(worker, work, &worker->work_list);
1062 raw_spin_unlock_irqrestore(&worker->lock, flags);
1066 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1082 kthread_insert_work(worker, work, &worker->work_list);
1087 kthread_insert_work_sanity_check(worker, work);
1089 list_add(&work->node, &worker->delayed_work_list);
1090 work->worker = worker;
1098 * @worker: target kthread_worker
1110 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1118 raw_spin_lock_irqsave(&worker->lock, flags);
1120 if (!queuing_blocked(worker, work)) {
1121 __kthread_queue_delayed_work(worker, dwork, delay);
1125 raw_spin_unlock_irqrestore(&worker->lock, flags);
1154 struct kthread_worker *worker;
1157 worker = work->worker;
1158 if (!worker)
1161 raw_spin_lock_irq(&worker->lock);
1162 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1163 WARN_ON_ONCE(work->worker != worker);
1166 kthread_insert_work(worker, &fwork.work, work->node.next);
1167 else if (worker->current_work == work)
1168 kthread_insert_work(worker, &fwork.work,
1169 worker->work_list.next);
1173 raw_spin_unlock_irq(&worker->lock);
1184 * The function is called under worker->lock. The lock is temporary
1192 struct kthread_worker *worker = work->worker;
1201 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1203 raw_spin_lock_irqsave(&worker->lock, *flags);
1208 * This function removes the work from the worker queue.
1210 * It is called under worker->lock. The caller must make sure that
1215 * current_work proceed by the worker.
1223 * Try to remove the work from a worker list. It might either
1224 * be from worker->work_list or from worker->delayed_work_list.
1236 * @worker: kthread worker to use
1257 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1265 raw_spin_lock_irqsave(&worker->lock, flags);
1268 if (!work->worker) {
1273 /* Work must not be used with >1 worker, see kthread_queue_work() */
1274 WARN_ON_ONCE(work->worker != worker);
1283 * The timer must be canceled first because worker->lock is released
1297 __kthread_queue_delayed_work(worker, dwork, delay);
1299 raw_spin_unlock_irqrestore(&worker->lock, flags);
1306 struct kthread_worker *worker = work->worker;
1310 if (!worker)
1313 raw_spin_lock_irqsave(&worker->lock, flags);
1314 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1315 WARN_ON_ONCE(work->worker != worker);
1322 if (worker->current_work != work)
1330 raw_spin_unlock_irqrestore(&worker->lock, flags);
1332 raw_spin_lock_irqsave(&worker->lock, flags);
1336 raw_spin_unlock_irqrestore(&worker->lock, flags);
1352 * The caller must ensure that the worker on which @work was last
1380 * @worker: worker to flush
1382 * Wait until all currently executing or pending works on @worker are
1385 void kthread_flush_worker(struct kthread_worker *worker)
1392 kthread_queue_work(worker, &fwork.work);
1398 * kthread_destroy_worker - destroy a kthread worker
1399 * @worker: worker to be destroyed
1401 * Flush and destroy @worker. The simple flush is enough because the kthread
1402 * worker API is used only in trivial scenarios. There are no multi-step state
1409 void kthread_destroy_worker(struct kthread_worker *worker)
1413 task = worker->task;
1417 kthread_flush_worker(worker);
1419 WARN_ON(!list_empty(&worker->delayed_work_list));
1420 WARN_ON(!list_empty(&worker->work_list));
1421 kfree(worker);