Lines Matching defs:work

71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
402 static void svm_range_bo_wq_release(struct work_struct *work)
406 svm_bo = container_of(work, struct svm_range_bo, release_work);
1743 * svm_range_list_lock_and_flush_work - flush pending deferred work
1748 * Context: Returns with mmap write lock held, pending deferred work flushed
1766 static void svm_range_restore_work(struct work_struct *work)
1768 struct delayed_work *dwork = to_delayed_work(work);
1873 * If invalidation happens while restore work is running, restore work will
2243 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2285 static void svm_range_deferred_list_work(struct work_struct *work)
2291 svms = container_of(work, struct svm_range_list, deferred_list_work);
2322 * deferred_list work is actually waiting for mmap lock.
2347 * last mm refcount, schedule release work to avoid circular locking
2364 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2377 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2487 * work thread, and split prange if only part of prange is unmapped.
2813 * deferred list work will drain the stale fault before free the prange.
2930 /* p->lead_thread is available as kfd_process_wq_release flush the work
2956 * Also flush pending deferred work to make sure the interval
3151 /* Ensure list work is finished before process is destroyed */
3382 * and restore work:
3384 * stops all queues, schedule restore work
3388 * 3. restore work update mappings of GPU, resume all queues.
3439 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3445 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3567 * case because the rollback wouldn't be guaranteed to work either.
3645 /* Flush pending deferred work to avoid racing with deferred actions from