Lines Matching defs:work

40  * generic delayed work implementation which delays work execution until a
41 * particular vblank has passed, and then executes the work at realtime
45 * re-arming work items can be easily implemented.
50 struct drm_vblank_work *work, *next;
56 list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
57 if (!drm_vblank_passed(count, work->count))
60 list_del_init(&work->node);
62 kthread_queue_work(vblank->worker, &work->base);
69 /* Handle cancelling any pending vblank work items and drop respective vblank
74 struct drm_vblank_work *work, *next;
78 list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
79 list_del_init(&work->node);
87 * drm_vblank_work_schedule - schedule a vblank work
88 * @work: vblank work to schedule
92 * Schedule @work for execution once the crtc vblank count reaches @count.
95 * %false the work starts to execute immediately.
98 * %true the work is deferred until the next vblank (as if @count has been
101 * If @work is already scheduled, this function will reschedule said work
102 * using the new @count. This can be used for self-rearming work items.
105 * %1 if @work was successfully (re)scheduled, %0 if it was either already
108 int drm_vblank_work_schedule(struct drm_vblank_work *work,
111 struct drm_vblank_crtc *vblank = work->vblank;
119 if (work->cancelling)
128 if (list_empty(&work->node)) {
132 } else if (work->count == count) {
139 work->count = count;
149 ret = kthread_queue_work(vblank->worker, &work->base);
152 list_del_init(&work->node);
157 list_add_tail(&work->node, &vblank->pending_work);
170 * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
172 * @work: vblank work to cancel
174 * Cancel an already scheduled vblank work and wait for its
177 * On return, @work is guaranteed to no longer be scheduled or running, even
181 * %True if the work was cancelled before it started to execute, %false
184 bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
186 struct drm_vblank_crtc *vblank = work->vblank;
191 if (!list_empty(&work->node)) {
192 list_del_init(&work->node);
197 work->cancelling++;
202 if (kthread_cancel_work_sync(&work->base))
206 work->cancelling--;
214 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
216 * @work: vblank work to flush
218 * Wait until @work has finished executing once.
220 void drm_vblank_work_flush(struct drm_vblank_work *work)
222 struct drm_vblank_crtc *vblank = work->vblank;
226 wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
230 kthread_flush_work(&work->base);
235 * drm_vblank_work_init - initialize a vblank work item
236 * @work: vblank work item
237 * @crtc: CRTC whose vblank will trigger the work execution
238 * @func: work function to be executed
240 * Initialize a vblank work item for a specific crtc.
242 void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
243 void (*func)(struct kthread_work *work))
245 kthread_init_work(&work->base, func);
246 INIT_LIST_HEAD(&work->node);
247 work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];