Lines Matching defs:work
356 static void __vma_bind(struct dma_fence_work *work)
358 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
363 * signaled the work to potentially clear/move the pages underneath. If
374 static void __vma_release(struct dma_fence_work *work)
376 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
388 .work = __vma_bind,
465 * @work: preallocated worker for allocating and binding the PTE
476 struct i915_vma_work *work,
514 if (work && bind_flags & vma->vm->bind_async_flags)
516 &work->base.chain,
540 if (work && bind_flags & vma->vm->bind_async_flags) {
543 work->vma_res = i915_vma_resource_get(vma->resource);
544 work->pat_index = pat_index;
545 work->flags = bind_flags;
556 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
558 __i915_sw_fence_await_dma_fence(&work->base.chain,
560 &work->cb);
564 work->base.dma.error = 0; /* enable the queue_work() */
565 work->obj = i915_gem_object_get(vma->obj);
819 * node.start + guard, the easiest way to make that work is
1436 struct i915_vma_work *work = NULL;
1474 work = i915_vma_work();
1475 if (!work) {
1480 work->vm = vma->vm;
1486 dma_fence_work_chain(&work->base, moving);
1491 &work->stash,
1496 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1570 flags, work, vma_res);
1599 if (work)
1600 dma_fence_work_commit_imm(&work->base);
1705 * client, avoiding the work required to rebind the VMA. This is
1711 * of wasted work for the steady state.
2239 * async unbind work if needed, but we can't because it uses