Searched refs:worker (Results 1 - 25 of 116) sorted by last modified time

12345

/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process.c1950 /* Process termination destroys this worker thread. So during the
2001 /* Process termination destroys this worker thread. So during the
2200 struct send_exception_work_handler_workarea worker; local
2202 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2204 worker.p = p;
2205 worker.queue_id = queue_id;
2206 worker.error_reason = error_reason;
2208 schedule_work(&worker.work);
2209 flush_work(&worker.work);
2210 destroy_work_on_stack(&worker
[all...]
/linux-master/kernel/
H A Dworkqueue.c3 * kernel/workqueue.c - generic async execution with shared worker pool
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
88 /* worker flags */
93 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
94 WORKER_REBOUND = 1 << 8, /* worker was rebound */
151 * K: Only modified by worker while holding pool->lock. Can be safely read by
155 * S: Only modified by worker self.
180 /* struct worker is defined in workqueue_internal.h */
206 struct timer_list idle_timer; /* L: worker idl
981 worker_set_flags(struct worker *worker, unsigned int flags) argument
1003 worker_clr_flags(struct worker *worker, unsigned int flags) argument
1041 worker_enter_idle(struct worker *worker) argument
1074 worker_leave_idle(struct worker *worker) argument
1121 struct worker *worker; local
1186 assign_work(struct work_struct *work, struct worker *worker, struct work_struct **nextp) argument
1244 struct worker *worker = first_idle_worker(pool); local
1396 struct worker *worker = kthread_data(task); local
1430 struct worker *worker = kthread_data(task); local
1476 struct worker *worker = kthread_data(task); local
1543 struct worker *worker = kthread_data(task); local
2275 struct worker *worker; local
2357 struct worker *worker; local
2692 struct worker *worker; local
2722 worker_attach_to_pool(struct worker *worker, struct worker_pool *pool) argument
2756 worker_detach_from_pool(struct worker *worker) argument
2795 struct worker *worker; local
2866 unbind_worker(struct worker *worker) argument
2879 struct worker *worker, *tmp; local
2909 set_worker_dying(struct worker *worker, struct list_head *list) argument
2952 struct worker *worker; local
2995 struct worker *worker; local
3134 manage_workers(struct worker *worker) argument
3337 process_scheduled_works(struct worker *worker) argument
3376 struct worker *worker = __worker; local
3583 bh_worker(struct worker *worker) argument
3731 struct worker *worker; local
3784 insert_wq_barrier(struct pool_workqueue *pwq, struct wq_barrier *barr, struct work_struct *target, struct worker *worker) argument
4152 struct worker *worker = NULL; local
4779 struct worker *worker; local
5821 struct worker *worker = current_wq_worker(); local
5837 struct worker *worker = current_wq_worker(); local
5928 struct worker *worker = current_wq_worker(); local
5959 struct worker *worker; local
6001 pr_cont_worker_id(struct worker *worker) argument
6062 struct worker *worker; local
6182 struct worker *worker; local
6286 struct worker *worker = kthread_data(task); local
6331 struct worker *worker; local
6384 struct worker *worker; local
6445 struct worker *worker; local
7345 struct worker *worker; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_gt.c533 INIT_WORK(&gt->reset.worker, gt_reset_worker);
696 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
710 queue_work(gt->ordered_wq, &gt->reset.worker);
H A Dxe_gt_pagefault.c351 queue_work(gt->usm.pf_wq, &pf_queue->worker);
364 struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
416 INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
421 INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
597 struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
653 queue_work(gt->usm.acc_wq, &acc_queue->worker);
H A Dxe_gt_types.h149 * @reset.worker: work so GT resets can done async allowing to reset
152 struct work_struct worker; member in struct:xe_gt::__anon91
226 * moved by worker which processes faults (consumer).
236 /** @usm.pf_queue.worker: to process page faults */
237 struct work_struct worker; member in struct:xe_gt::__anon93::pf_queue
252 * moved by worker which processes counters
263 /** @usm.acc_queue.worker: to process access counters */
264 struct work_struct worker; member in struct:xe_gt::__anon93::acc_queue
H A Dxe_guc_relay_types.h20 /** @worker: dispatches incoming action messages. */
21 struct work_struct worker; member in struct:xe_guc_relay
H A Dxe_guc_relay.c343 INIT_WORK(&relay->worker, relays_worker_fn);
766 queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker);
771 struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker);
/linux-master/drivers/vhost/
H A Dvhost.c236 static void vhost_worker_queue(struct vhost_worker *worker, argument
244 llist_add(&work->node, &worker->work_list);
245 vhost_task_wake(worker->vtsk);
251 struct vhost_worker *worker; local
255 worker = rcu_dereference(vq->worker);
256 if (worker) {
258 vhost_worker_queue(worker, work);
279 * vhost_worker_flush - flush a worker
280 * @worker
285 vhost_worker_flush(struct vhost_worker *worker) argument
298 struct vhost_worker *worker; local
316 struct vhost_worker *worker; local
397 struct vhost_worker *worker = data; local
591 vhost_worker_destroy(struct vhost_dev *dev, struct vhost_worker *worker) argument
605 struct vhost_worker *worker; local
624 struct vhost_worker *worker; local
662 __vhost_vq_attach_worker(struct vhost_virtqueue *vq, struct vhost_worker *worker) argument
718 struct vhost_worker *worker; local
735 struct vhost_worker *worker; local
750 struct vhost_worker *worker; local
794 struct vhost_worker *worker; local
867 struct vhost_worker *worker; local
[all...]
/linux-master/drivers/media/platform/mediatek/vcodec/decoder/
H A Dmtk_vcodec_dec_drv.h91 * @worker: worker to start a decode job
110 void (*worker)(struct work_struct *work); member in struct:mtk_vcodec_dec_pdata
154 * @decode_work: worker for the decoding
169 * @lock: protect variables accessed by V4L2 threads and worker thread such as
H A Dmtk_vcodec_dec_stateless.c881 .worker = mtk_vdec_worker,
899 .worker = mtk_vdec_worker,
916 .worker = mtk_vdec_worker,
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_kms.c213 /* clean up event worker threads */
215 if (priv->event_thread[i].worker)
216 kthread_destroy_worker(priv->event_thread[i].worker);
272 ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
273 if (IS_ERR(ev_thread->worker)) {
274 ret = PTR_ERR(ev_thread->worker);
276 ev_thread->worker = NULL;
280 sched_set_fifo(ev_thread->worker->task);
H A Dmsm_drv.h96 struct kthread_worker *worker; member in struct:msm_drm_thread
507 * @worker: the kthread worker the work will be scheduled on
512 struct kthread_worker *worker; member in struct:msm_hrtimer_work
519 struct kthread_worker *worker,
H A Dmsm_kms.h128 struct kthread_worker *worker; member in struct:msm_pending_timer
H A Dmsm_io_utils.c120 kthread_queue_work(work->worker, &work->work);
133 struct kthread_worker *worker,
140 work->worker = worker;
132 msm_hrtimer_work_init(struct msm_hrtimer_work *work, struct kthread_worker *worker, kthread_work_func_t fn, clockid_t clock_id, enum hrtimer_mode mode) argument
/linux-master/drivers/gpu/drm/msm/adreno/
H A Da6xx_gpu.c2122 /* And the final one from recover worker */
2356 kthread_queue_work(gpu->worker, &gpu->recover_work);
H A Da6xx_gmu.c33 kthread_queue_work(gpu->worker, &gpu->recover_work);
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dintel_guc_submission.c1173 * busyness to the user. In order to do that, a worker runs periodically at
1192 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1380 * hold the reset mutex. The busyness worker also attempts to acquire the
1381 * reset mutex. Synchronously flushing a worker thread requires acquiring
1382 * the worker mutex. Lockdep sees this as a conflict. It thinks that the
1383 * flush can deadlock because it holds the worker mutex while waiting for
1385 * attempt to use other worker functions.
1387 * In practice, this scenario does not exist because the busyness worker
1394 * just not flush synchronously when a rest in progress. Given that the worker
1400 * which do require a synchronous flush to make sure the worker i
[all...]
/linux-master/drivers/net/wwan/t7xx/
H A Dt7xx_hif_cldma.c246 queue_work(queue->worker, &queue->cldma_work);
353 queue_work(queue->worker, &queue->cldma_work);
578 queue_work(md_ctrl->txq[i].worker,
603 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
1274 if (md_ctrl->txq[i].worker) {
1275 destroy_workqueue(md_ctrl->txq[i].worker);
1276 md_ctrl->txq[i].worker = NULL;
1281 if (md_ctrl->rxq[i].worker) {
1282 destroy_workqueue(md_ctrl->rxq[i].worker);
1283 md_ctrl->rxq[i].worker
[all...]
/linux-master/drivers/net/wireguard/
H A Dreceive.c571 &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
/linux-master/drivers/s390/block/
H A Ddasd_eckd.c93 struct work_struct worker; member in struct:ext_pool_exhaust_work_data
98 /* definitions for the path verification worker */
100 struct work_struct worker; member in struct:pe_handler_work_data
113 struct work_struct worker; member in struct:check_attention_work_data
1438 data = container_of(work, struct pe_handler_work_data, worker);
1479 INIT_WORK(&data->worker, do_pe_handler_work);
1484 schedule_work(&data->worker);
1700 data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1725 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1736 schedule_work(&data->worker);
[all...]
H A Ddasd_alias.c146 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
254 cancel_work_sync(&lcu->suc_data.worker);
904 worker);
956 "previous instance of summary unit check worker"
966 if (!schedule_work(&lcu->suc_data.worker))
/linux-master/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim.c72 kthread_queue_work(vdpasim->worker, work);
232 vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
234 if (IS_ERR(vdpasim->worker))
288 kthread_queue_work(vdpasim->worker, &vdpasim->work);
754 kthread_destroy_worker(vdpasim->worker);
/linux-master/drivers/platform/chrome/
H A Dcros_ec_spi.c707 static void cros_ec_spi_high_pri_release(void *worker) argument
709 kthread_destroy_worker(worker);
722 dev_err(dev, "Can't create cros_ec high pri worker: %d\n", err);
/linux-master/drivers/md/
H A Ddm-thin.c253 struct work_struct worker; member in struct:pool
358 * Ensures the thin is not destroyed until the worker has finished
438 queue_work(pool->wq, &pool->worker);
2425 struct pool *pool = container_of(ws, struct pool, worker);
2472 struct work_struct worker; member in struct:pool_work
2478 return container_of(ws, struct pool_work, worker);
2489 INIT_WORK_ONSTACK(&pw->worker, fn);
2491 queue_work(pool->wq, &pw->worker);
3002 INIT_WORK(&pool->worker, do_worker);
/linux-master/drivers/gpu/drm/xe/tests/
H A Dxe_mocs.c151 flush_work(&gt->reset.worker);

Completed in 296 milliseconds

12345