Lines Matching refs:array

3  * dma-fence-array: aggregate fences to be waited together
14 #include <linux/dma-fence-array.h>
28 static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
36 cmpxchg(&array->base.error, PENDING_ERROR, error);
39 static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
42 cmpxchg(&array->base.error, PENDING_ERROR, 0);
47 struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
49 dma_fence_array_clear_pending_error(array);
51 dma_fence_signal(&array->base);
52 dma_fence_put(&array->base);
60 struct dma_fence_array *array = array_cb->array;
62 dma_fence_array_set_pending_error(array, f->error);
64 if (atomic_dec_and_test(&array->num_pending))
65 irq_work_queue(&array->work);
67 dma_fence_put(&array->base);
72 struct dma_fence_array *array = to_dma_fence_array(fence);
73 struct dma_fence_array_cb *cb = (void *)(&array[1]);
76 for (i = 0; i < array->num_fences; ++i) {
77 cb[i].array = array;
81 * reference count on the array so that we do not free it too
83 * until we signal the array as complete (but that is now
86 dma_fence_get(&array->base);
87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
89 int error = array->fences[i]->error;
91 dma_fence_array_set_pending_error(array, error);
92 dma_fence_put(&array->base);
93 if (atomic_dec_and_test(&array->num_pending)) {
94 dma_fence_array_clear_pending_error(array);
105 struct dma_fence_array *array = to_dma_fence_array(fence);
107 if (atomic_read(&array->num_pending) > 0)
110 dma_fence_array_clear_pending_error(array);
116 struct dma_fence_array *array = to_dma_fence_array(fence);
119 for (i = 0; i < array->num_fences; ++i)
120 dma_fence_put(array->fences[i]);
122 kfree(array->fences);
129 struct dma_fence_array *array = to_dma_fence_array(fence);
132 for (i = 0; i < array->num_fences; ++i)
133 dma_fence_set_deadline(array->fences[i], deadline);
147 * dma_fence_array_create - Create a custom fence array
148 * @num_fences: [in] number of fences to add in the array
149 * @fences: [in] array containing the fences
152 * @signal_on_any: [in] signal on any fence in the array
158 * The caller should allocate the fences array with num_fences size
160 * array is taken and dma_fence_put() is used on each fence on release.
162 * If @signal_on_any is true the fence array signals if any fence in the array
163 * signals, otherwise it signals when all fences in the array signal.
170 struct dma_fence_array *array;
171 size_t size = sizeof(*array);
175 /* Allocate the callback structures behind the array. */
177 array = kzalloc(size, GFP_KERNEL);
178 if (!array)
181 spin_lock_init(&array->lock);
182 dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
184 init_irq_work(&array->work, irq_dma_fence_array_work);
186 array->num_fences = num_fences;
187 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
188 array->fences = fences;
190 array->base.error = PENDING_ERROR;
197 * The correct way of handling this is to flatten out the array by the
206 return array;
212 * @fence: [in] fence or fence array
215 * Checks the provided fence or, for a fence array, all fences in the array
221 struct dma_fence_array *array = to_dma_fence_array(fence);
227 for (i = 0; i < array->num_fences; i++) {
228 if (array->fences[i]->context != context)
238 struct dma_fence_array *array;
243 array = to_dma_fence_array(head);
244 if (!array)
247 if (!array->num_fences)
250 return array->fences[0];
257 struct dma_fence_array *array = to_dma_fence_array(head);
259 if (!array || index >= array->num_fences)
262 return array->fences[index];