Lines Matching refs:work

31 	IO_WORKER_F_BOUND	= 3,	/* is doing bounded work */
64 struct work_struct work;
160 struct io_wq_work *work)
162 return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
251 * If there's work to do, returns true with acct->lock acquired. If not,
290 * starting work or finishing work. In either case, if it does
291 * to go sleep, we'll kick off a new task for this work anyway.
308 * Most likely an attempt to queue unbounded work on an io_wq that
390 * work item after we canceled in io_wq_exit_workers().
427 * Worker will start processing some work. Move it to the busy list, if
441 * No work, worker going to sleep. Move to freelist.
452 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
454 return work->flags >> IO_WQ_HASH_SHIFT;
479 struct io_wq_work *work, *tail;
486 work = container_of(node, struct io_wq_work, list);
489 if (!io_wq_is_hashed(work)) {
491 return work;
494 hash = io_get_work_hash(work);
495 /* all items with this hash lie in [work, tail] */
502 return work;
515 * work being added and clearing the stalled bit.
532 struct io_wq_work *work)
534 if (work) {
540 worker->cur_work = work;
555 struct io_wq_work *work;
558 * If we got some work, mark us as busy. If we didn't, but
559 * the list isn't empty, it means we stalled on hashed work.
560 * Mark us stalled so we don't keep looking for work when we
561 * can't make progress, any work completion or insertion will
564 work = io_get_next_work(acct, worker);
565 if (work) {
568 * it becomes the active work. That avoids a window
569 * where the work has been removed from our general
570 * work list, but isn't yet discoverable as the
571 * current work item for this worker.
574 worker->cur_work = work;
580 if (!work)
585 io_assign_current_work(worker, work);
591 unsigned int hash = io_get_work_hash(work);
593 next_hashed = wq_next_work(work);
595 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
596 work->flags |= IO_WQ_WORK_CANCEL;
597 wq->do_work(work);
600 linked = wq->free_work(work);
601 work = next_hashed;
602 if (!work && linked && !io_wq_is_hashed(linked)) {
603 work = linked;
606 io_assign_current_work(worker, work);
619 } while (work);
647 * If we have work to do, io_acct_run_queue() returns with
710 * running and we have work pending, wake up a free one or create a new one.
742 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
806 schedule_work(&worker->work);
809 static void io_workqueue_create(struct work_struct *work)
811 struct io_worker *worker = container_of(work, struct io_worker, work);
852 INIT_WORK(&worker->work, io_workqueue_create);
853 schedule_work(&worker->work);
891 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
894 work->flags |= IO_WQ_WORK_CANCEL;
895 wq->do_work(work);
896 work = wq->free_work(work);
897 } while (work);
900 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
902 struct io_wq_acct *acct = io_work_get_acct(wq, work);
906 if (!io_wq_is_hashed(work)) {
908 wq_list_add_tail(&work->list, &acct->work_list);
912 hash = io_get_work_hash(work);
914 wq->hash_tail[hash] = work;
918 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
921 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
923 return work == data;
926 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
928 struct io_wq_acct *acct = io_work_get_acct(wq, work);
929 unsigned long work_flags = work->flags;
938 (work->flags & IO_WQ_WORK_CANCEL)) {
939 io_run_cancel(work, wq);
944 io_wq_insert_work(wq, work);
969 match.data = work,
980 void io_wq_hash_work(struct io_wq_work *work, void *val)
985 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
990 struct io_wq_work *work)
992 if (work && match->fn(work, match->data)) {
993 work->flags |= IO_WQ_WORK_CANCEL;
1007 * may dereference the passed in work.
1018 struct io_wq_work *work,
1021 struct io_wq_acct *acct = io_work_get_acct(wq, work);
1022 unsigned int hash = io_get_work_hash(work);
1025 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
1033 wq_list_del(&acct->work_list, &work->list, prev);
1041 struct io_wq_work *work;
1045 work = container_of(node, struct io_wq_work, list);
1046 if (!match->fn(work, match->data))
1048 io_wq_remove_pending(wq, work, prev);
1050 io_run_cancel(work, wq);
1095 * from there. CANCEL_OK means that the work is returned as-new,
1098 * Then check if a free (going busy) or busy worker has the work
1104 * we'll find a work item regardless of state.