Lines Matching refs:active

24 static void __live_get(struct live_active *active)
26 kref_get(&active->ref);
29 static void __live_free(struct live_active *active)
31 i915_active_fini(&active->base);
32 kfree(active);
37 struct live_active *active = container_of(ref, typeof(*active), ref);
39 __live_free(active);
42 static void __live_put(struct live_active *active)
44 kref_put(&active->ref, __live_release);
49 struct live_active *active = container_of(base, typeof(*active), base);
51 __live_get(active);
57 struct live_active *active = container_of(base, typeof(*active), base);
59 active->retired = true;
60 __live_put(active);
65 struct live_active *active;
67 active = kzalloc(sizeof(*active), GFP_KERNEL);
68 if (!active)
71 kref_init(&active->ref);
72 i915_active_init(&active->base, __live_active, __live_retire, 0);
74 return active;
82 struct live_active *active;
86 active = __live_alloc(i915);
87 if (!active)
92 kfree(active);
96 err = i915_active_acquire(&active->base);
113 err = i915_active_add_request(&active->base, rq);
116 pr_err("Failed to track active ref!\n");
123 i915_active_release(&active->base);
124 if (READ_ONCE(active->retired) && count) {
128 if (atomic_read(&active->base.count) != count) {
130 atomic_read(&active->base.count), count);
138 __live_put(active);
139 active = ERR_PTR(err);
142 return active;
148 struct live_active *active;
153 active = __live_active_setup(i915);
154 if (IS_ERR(active))
155 return PTR_ERR(active);
157 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
158 if (!READ_ONCE(active->retired)) {
162 i915_active_print(&active->base, &p);
167 __live_put(active);
178 struct live_active *active;
183 active = __live_active_setup(i915);
184 if (IS_ERR(active))
185 return PTR_ERR(active);
191 if (!READ_ONCE(active->retired)) {
195 i915_active_print(&active->base, &p);
200 __live_put(active);
209 struct live_active *active;
214 active = __live_alloc(i915);
215 if (!active)
218 err = i915_active_acquire(&active->base);
223 err = i915_active_acquire_preallocate_barrier(&active->base,
228 i915_active_acquire_barrier(&active->base);
231 i915_active_release(&active->base);
235 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
236 if (!READ_ONCE(active->retired)) {
242 __live_put(active);
281 drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
316 struct i915_active_fence *active)
320 fence = xchg(__active_fence_slot(active), NULL);
325 __list_del_entry(&active->cb.node);
337 /* Wait for all active callbacks */