• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/lib/devif/backends/net/mlx4/include/linux/

Lines Matching defs:work

94  struct work_struct	work;
105 to_delayed_work(struct work_struct *work)
108 return container_of(work, struct delayed_work, work);
112 #define INIT_WORK(work, func) \
114 (work)->fn = (func); \
115 INIT_LIST_HEAD(&(work)->list); \
121 INIT_WORK(&(_work)->work, func); \
127 #define schedule_work(work) \
129 (work)->taskqueue = taskqueue_thread; \
130 taskqueue_enqueue(taskqueue_thread, &(work)->work_task); \
137 struct work_struct *work) {
138 return enqueue(wq, work);
143 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
148 work->work.taskqueue = wq->taskqueue;
150 pending = work->work.work_task.ta_pending;
152 pending = work->work.work_task.ta_pending;
153 callout_reset(&work->timer, delay, linux_delayed_work_fn, work);
155 callout_stop(&work->timer);
156 pending = taskqueue_enqueue(work->work.taskqueue,
157 &work->work.work_task);
231 cancel_work_sync(struct work_struct *work)
233 if (work->taskqueue &&
234 taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
235 taskqueue_drain(work->taskqueue, &work->work_task);
240 * This may leave work running on another CPU as it does on Linux.
243 cancel_delayed_work(struct delayed_work *work)
246 callout_stop(&work->timer);
247 if (work->work.taskqueue)
248 return (taskqueue_cancel(work->work.taskqueue,
249 &work->work.work_task, NULL) == 0);
254 cancel_delayed_work_sync(struct delayed_work *work)
257 callout_drain(&work->timer);
258 if (work->work.taskqueue &&
259 taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
260 taskqueue_drain(work->work.taskqueue, &work->work.work_task);