/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_exec.c | 29 * - Passing in a list BO which are read / written to creating implicit syncs 114 struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs); 117 struct xe_sync_entry *syncs = NULL; local 150 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); 151 if (!syncs) { 160 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++], 167 if (xe_sync_is_ufence(&syncs[i])) 213 fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); 219 xe_sync_entry_signal(&syncs[ [all...] |
H A D | xe_pt.h | 39 struct xe_sync_entry *syncs, u32 num_syncs, 44 struct xe_sync_entry *syncs, u32 num_syncs);
|
H A D | xe_migrate.h | 104 struct xe_sync_entry *syncs, u32 num_syncs,
|
H A D | xe_vm.c | 807 struct xe_sync_entry *syncs, u32 num_syncs, 1656 struct xe_sync_entry *syncs, u32 num_syncs, 1694 first_op ? syncs : NULL, 1725 xe_sync_entry_signal(&syncs[i], NULL, fence); 1742 struct xe_sync_entry *syncs, u32 num_syncs, 1769 first_op ? syncs : NULL, 1799 xe_sync_entry_signal(&syncs[i], NULL, 1816 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) argument 1821 struct xe_sync_entry *e = &syncs[i]; 1831 struct xe_exec_queue *q, struct xe_sync_entry *syncs, 1655 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 1741 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 1830 __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument 1871 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument 1891 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 2053 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, u32 region, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 2375 vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, struct list_head *ops_list, bool last) argument 2945 vm_bind_ioctl_signal_fences(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, int num_syncs) argument 2979 struct xe_sync_entry *syncs = NULL; local [all...] |
H A D | xe_vm_types.h | 313 /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */ 315 /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */ 337 * @syncs: syncs for this operation, only used on first and last 340 struct xe_sync_entry *syncs; member in struct:xe_vma_op 341 /** @num_syncs: number of syncs */
|
H A D | xe_migrate.c | 1217 struct xe_sync_entry *syncs, u32 num_syncs) 1223 fence = syncs[i].fence; 1250 * @syncs: Array of xe_sync_entry to await before updating. Note that waits 1252 * @num_syncs: Number of entries in @syncs. 1275 struct xe_sync_entry *syncs, u32 num_syncs, 1296 /* Use the CPU if no in syncs and engine is idle */ 1297 if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) { 1416 err = xe_sync_entry_add_deps(&syncs[i], job); 1216 no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs) argument 1269 xe_migrate_update_pgtables(struct xe_migrate *m, struct xe_vm *vm, struct xe_bo *bo, struct xe_exec_queue *q, const struct xe_vm_pgtable_update *updates, u32 num_updates, struct xe_sync_entry *syncs, u32 num_syncs, struct xe_migrate_pt_update *pt_update) argument
|
H A D | xe_pt.c | 1190 * @syncs: Entries to sync on before binding the built tree to the live vm tree. 1208 struct xe_sync_entry *syncs, u32 num_syncs, 1281 syncs, num_syncs, 1555 * @syncs: Entries to sync on before disconnecting the tree to be destroyed. 1571 struct xe_sync_entry *syncs, u32 num_syncs) 1630 syncs, num_syncs, 1207 __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool rebind) argument 1570 __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs) argument
|
/linux-master/include/uapi/drm/ |
H A D | v3d_drm.h | 425 __u64 syncs; member in struct:drm_v3d_timestamp_query 448 __u64 syncs; member in struct:drm_v3d_reset_timestamp_query 494 __u64 syncs; member in struct:drm_v3d_copy_timestamp_query 511 __u64 syncs; member in struct:drm_v3d_reset_performance_query 563 __u64 syncs; member in struct:drm_v3d_copy_performance_query
|
H A D | xe_drm.h | 997 * .syncs = &sync, 1037 /** @num_syncs: amount of syncs to wait on */ 1040 /** @syncs: pointer to struct drm_xe_sync array */ 1041 __u64 syncs; member in struct:drm_xe_vm_bind 1229 * .syncs = &sync, 1247 /** @syncs: Pointer to struct drm_xe_sync array. */ 1248 __u64 syncs; member in struct:drm_xe_exec
|
/linux-master/drivers/gpu/drm/v3d/ |
H A D | v3d_submit.c | 453 u32 __user *offsets, *syncs; local 481 syncs = u64_to_user_ptr(timestamp.syncs); 493 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 510 u32 __user *syncs; local 534 syncs = u64_to_user_ptr(reset.syncs); 541 if (copy_from_user(&sync, syncs++, sizeof(sync))) { 559 u32 __user *offsets, *syncs; local 588 syncs 623 u32 __user *syncs; local 691 u32 __user *syncs; local [all...] |
/linux-master/drivers/hid/ |
H A D | hid-debug.c | 759 static const char *syncs[3] = { variable 1057 [EV_SYN] = syncs, [EV_KEY] = keys,
|