Lines Matching refs:fence

50 				       struct dma_fence *fence)
55 * fence's parent set before test_bit()
57 smp_store_release(&s_fence->parent, dma_fence_get(fence));
60 dma_fence_set_deadline(fence, s_fence->deadline);
63 void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
66 /* Set the parent before signaling the scheduled fence, such that,
73 drm_sched_fence_set_parent(fence, parent);
75 dma_fence_signal(&fence->scheduled);
78 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
81 dma_fence_set_error(&fence->finished, result);
82 dma_fence_signal(&fence->finished);
85 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
92 struct drm_sched_fence *fence = to_drm_sched_fence(f);
93 return (const char *)fence->sched->name;
99 struct drm_sched_fence *fence = to_drm_sched_fence(f);
101 if (!WARN_ON_ONCE(!fence))
102 kmem_cache_free(sched_fence_slab, fence);
106 * drm_sched_fence_free - free up an uninitialized fence
108 * @fence: fence to free
110 * Free up the fence memory. Should only be used if drm_sched_fence_init()
113 void drm_sched_fence_free(struct drm_sched_fence *fence)
115 /* This function should not be called if the fence has been initialized. */
116 if (!WARN_ON_ONCE(fence->sched))
117 kmem_cache_free(sched_fence_slab, fence);
121 * drm_sched_fence_release_scheduled - callback that fence can be freed
123 * @f: fence
126 * It just RCU schedules freeing up the fence.
130 struct drm_sched_fence *fence = to_drm_sched_fence(f);
132 dma_fence_put(fence->parent);
133 call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
139 * @f: fence
141 * Drop the extra reference from the scheduled fence to the base fence.
145 struct drm_sched_fence *fence = to_drm_sched_fence(f);
147 dma_fence_put(&fence->scheduled);
153 struct drm_sched_fence *fence = to_drm_sched_fence(f);
157 spin_lock_irqsave(&fence->lock, flags);
161 ktime_before(fence->deadline, deadline)) {
162 spin_unlock_irqrestore(&fence->lock, flags);
166 fence->deadline = deadline;
169 spin_unlock_irqrestore(&fence->lock, flags);
176 parent = smp_load_acquire(&fence->parent);
209 struct drm_sched_fence *fence = NULL;
211 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
212 if (fence == NULL)
215 fence->owner = owner;
216 spin_lock_init(&fence->lock);
218 return fence;
221 void drm_sched_fence_init(struct drm_sched_fence *fence,
226 fence->sched = entity->rq->sched;
228 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
229 &fence->lock, entity->fence_context, seq);
230 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
231 &fence->lock, entity->fence_context + 1, seq);