Searched refs:list (Results 251 - 275 of 5598) sorted by last modified time

<<11121314151617181920>>

/linux-master/drivers/media/pci/cobalt/
H A Dcobalt-v4l2.c112 list_for_each_entry(cb, &s->bufs, list) {
134 list_add_tail(&cb->list, &s->bufs);
272 cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
367 list_for_each_entry(cb, &s->bufs, list) {
401 cb = list_entry(p, struct cobalt_buffer, list);
402 list_del(&cb->list);
/linux-master/drivers/media/mc/
H A Dmc-entity.c12 #include <linux/list.h>
155 list_add_tail(&gobj->list, &mdev->entities);
158 list_add_tail(&gobj->list, &mdev->pads);
161 list_add_tail(&gobj->list, &mdev->links);
164 list_add_tail(&gobj->list, &mdev->interfaces);
183 /* Remove the object from mdev list */
184 list_del(&gobj->list);
365 link = list_entry(link_top(graph), typeof(*link), list);
432 * traversal, with a list of links to be visited to continue the traversal.
581 list_for_each_entry(ppad, &pipe->pads, list) {
[all...]
/linux-master/drivers/media/i2c/et8ek8/
H A Det8ek8_driver.c259 * Write a list of registers to i2c device.
261 * The list of registers is terminated by ET8EK8_REG_TERM.
276 /* Initialize list pointers to the start of the list */
281 * We have to go through the list to figure out how
314 * the list, this is where we snooze for the required time
320 * Update list pointers and cnt and start over ...
406 struct et8ek8_reglist **list = et8ek8_reglist_first(meta); local
423 for (; *list; list
462 struct et8ek8_reglist **list = et8ek8_reglist_first(meta); local
529 struct et8ek8_reglist *list; local
888 struct et8ek8_reglist **list = local
926 struct et8ek8_reglist **list = local
964 struct et8ek8_reglist **list = local
[all...]
/linux-master/drivers/media/common/videobuf2/
H A Dvideobuf2-v4l2.c1323 * to protect the objects list.
1333 list_for_each_entry(obj, &req->objects, list) {
1343 list_for_each_entry_continue_reverse(obj, &req->objects, list)
1358 * objects list, after all other object types. Once buffer objects
1364 list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
H A Dvideobuf2-core.c1199 /* Add the buffer to the done buffers list */
1658 list_for_each_entry(obj, &req->objects, list)
1898 * Add to the queued buffers list, a buffer will stay on it until
1963 * long as we hold the driver's lock, the list will remain not
2208 * Remove all buffers from vb2's list...
2212 * ...and done list; userspace will not receive any buffers it
/linux-master/drivers/media/cec/core/
H A Dcec-api.c252 struct cec_msg_entry, list);
254 list_del(&entry->list);
331 struct cec_event_entry, list);
344 list_del(&ev->list);
620 list_add(&fh->list, &devnode->fhs);
652 list_del(&fh->list);
671 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
673 list_del(&entry->list);
680 struct cec_event_entry, list);
682 list_del(&entry->list);
[all...]
H A Dcec-core.c185 list_for_each_entry(fh, &devnode->fhs, list)
H A Dcec-adap.c110 list_add_tail(&entry->list, &fh->events[ev_idx]);
117 list_add_tail(&entry->list, &fh->events[ev_idx]);
120 struct cec_event_entry, list);
121 list_del(&entry->list);
127 struct cec_event_entry, list);
144 list_for_each_entry(fh, &adap->devnode.fhs, list)
161 list_for_each_entry(fh, &adap->devnode.fhs, list) {
179 list_for_each_entry(fh, &adap->devnode.fhs, list)
195 list_for_each_entry(fh, &adap->devnode.fhs, list)
224 list_add_tail(&entry->list,
[all...]
/linux-master/drivers/hwtracing/coresight/
H A Dcoresight-cti-core.c16 #include <linux/list.h>
27 * hardware. We have a list of all CTIs irrespective of CPU bound or
39 /* protect the list */
48 /* quick lookup list for CPU bound CTIs when power handling */
56 * CTI device name list - for CTI not bound to cores.
246 * Add a connection entry to the list of connections for this
513 * Look for a matching connection device name in the list of connections.
544 * Search the cti list to add an associated CTI into the supplied CS device
554 /* protect the list */
569 /* for each CTI in list
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c97 static void free_preempt_fences(struct list_head *list) argument
101 list_for_each_safe(link, next, list)
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, argument
120 list_move_tail(xe_preempt_fence_link(pfence), list);
159 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) argument
167 link = list->next;
168 xe_assert(vm->xe, link != list);
417 } while (!list_empty(&vm->gpuvm.evict.list));
664 /* Pin and move to temporary list */
2039 * Create operations list fro
[all...]
H A Dxe_ttm_vram_mgr.c19 xe_ttm_vram_mgr_first_block(struct list_head *list) argument
21 return list_first_entry_or_null(list, struct drm_buddy_block, link);
H A Dxe_pt.c182 * @deferred: List head of lockless list for deferred putting. NULL for
195 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
H A Dxe_pm.c221 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
356 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
357 * also checks and delets bo entry from user fault list.
361 &xe->mem_access.vram_userfault.list, vram_userfault_link)
H A Dxe_guc_submit.c996 list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1837 list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
H A Dxe_device_types.h357 /** @pinned.lock: protected pinned BO list state */
394 * @vram_usefault.list Using mutex instead of spinlock
395 * as lock is applied to entire list operation which
401 * @mem_access.vram_userfault.list: Keep list of userfaulted
405 struct list_head list; member in struct:xe_device::__anon833::__anon834
H A Dxe_bo.c252 * rebind of the sg list upon subsequent validation to XE_PL_TT.
462 if (!list_empty(&bo->ttm.base.gpuva.list)) {
1046 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1135 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
2234 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
2237 * The @deferred list can be either an onstack local list or a global
2238 * shared list used by a workqueue.
/linux-master/drivers/gpu/drm/xe/display/
H A Dintel_fb_bo.c43 if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_validation.h31 #include <linux/list.h>
H A Dvmwgfx_drv.h145 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
146 * @binding_head: List head for the context binding list. Protected by
283 * validation list.
322 * struct vmw_piter - Page iterator that iterates over a list of pages
323 * and DMA addresses that could be either a scatter-gather list or
328 * @iter: Scatter-gather page iterator. Current position in SG list.
331 * @next: Function to advance the iterator. Returns false if past the list
593 * Surface swapping. The "surface_lru" list is protected by the
940 * Returns false if past the list of pages, true otherwise.
1198 struct list_head *list);
[all...]
H A Dttm_object.c50 #include <linux/list.h>
66 * @lock: Lock that protects the ref_list list and the
105 * @head: List entry for the per-file list of ref-objects.
383 struct list_head *list; local
395 list = tfile->ref_list.next;
396 ref = list_entry(list, struct ttm_ref_object, head);
/linux-master/drivers/gpu/drm/vc4/
H A Dvc4_drv.h101 /* Array of list heads for entries in the BO cache,
132 struct list_head list; member in struct:vc4_dev::__anon797
153 * the binner. The first job in the list is the one currently
160 * job in the list is the one currently programmed into ct1ca
276 * whether we can move the BO to the purgeable list or not (when the BO
324 * list. Units are dwords.
385 /* System memory copy of the display list for this element, computed
389 u32 dlist_size; /* Number of dwords allocated for the display list */
390 u32 dlist_count; /* Number of used dwords in the display list. */
558 * @current_dlist: Start offset of the display list currentl
[all...]
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_device.c128 * buffer object on the global::swap_lru list.
263 pr_debug("Swap list %d was clean\n", i);
272 struct list_head *list)
277 while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
271 ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev, struct list_head *list) argument
/linux-master/drivers/gpu/drm/tests/
H A Ddrm_buddy_test.c285 * Loop over both lists at the end checking that the dirty list
298 struct list_head *list; local
302 list = &dirty;
305 list = &clean;
310 ps, ps, list,
350 struct list_head *list; local
354 list = &dirty;
356 list = &clean;
359 ps, ps, list, 0),
419 * right. We can then free a list t
427 struct list_head *list; local
[all...]
/linux-master/drivers/gpu/drm/radeon/
H A Dradeon_pm.c157 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
H A Dradeon_prime.c64 list_add_tail(&bo->list, &rdev->gem.objects);

Completed in 267 milliseconds

<<11121314151617181920>>