Searched refs:fence (Results 1 - 25 of 45) sorted by relevance

12

/freebsd-11-stable/sys/riscv/include/
H A Datomic.h42 #define fence() __asm __volatile("fence" ::: "memory"); macro
43 #define mb() fence()
44 #define rmb() fence()
45 #define wmb() fence()
52 fence(); \
58 fence(); \
200 fence();
209 fence();
221 fence();
[all...]
/freebsd-11-stable/sys/dev/drm2/radeon/
H A Dradeon_fence.c45 * for GPU/CPU synchronization. When the fence is written,
46 * it is expected that all buffers associated with that fence
54 * radeon_fence_write - write a fence value
58 * @ring: ring index the fence is associated with
60 * Writes a fence value to memory or a scratch register (all asics).
73 * radeon_fence_read - read a fence value
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
95 * radeon_fence_emit - emit a fence o
104 radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) argument
200 radeon_fence_destroy(struct radeon_fence *fence) argument
242 radeon_fence_signaled(struct radeon_fence *fence) argument
408 radeon_fence_wait(struct radeon_fence *fence, bool intr) argument
687 radeon_fence_ref(struct radeon_fence *fence) argument
700 radeon_fence_unref(struct radeon_fence **fence) argument
750 radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) argument
780 radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) argument
[all...]
H A Dradeon_sa.c41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
156 radeon_fence_unref(&sa_bo->fence);
169 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
227 * Check if either there is a fence we can wait for or
270 /* go over all fence list and try to find the closest sa_bo
283 if (!radeon_fence_signaled(sa_bo->fence)) {
284 fences[i] = sa_bo->fence;
307 ++tries[best_bo->fence
384 radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, struct radeon_fence *fence) argument
[all...]
H A Dradeon_benchmark.c45 struct radeon_fence *fence = NULL; local
54 &fence);
59 &fence);
67 r = radeon_fence_wait(fence, false);
70 radeon_fence_unref(&fence);
76 if (fence)
77 radeon_fence_unref(&fence);
H A Dradeon_gart.c527 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
537 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
655 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
686 * Returns the fence we need to sync to (if any).
698 if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
706 struct radeon_fence *fence = rdev->vm_manager.active[i]; local
708 if (fence == NULL) {
714 if (radeon_fence_is_earlier(fence, best[fence
744 radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence) argument
[all...]
H A Dradeon_test.c42 struct radeon_fence *fence = NULL; local
129 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
131 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
137 r = radeon_fence_wait(fence, false);
139 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
143 radeon_fence_unref(&fence);
175 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
177 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
183 r = radeon_fence_wait(fence, false);
185 DRM_ERROR("Failed to wait for VRAM->GTT fence
[all...]
H A Dradeon_semaphore.c112 struct radeon_fence *fence)
121 radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
110 radeon_semaphore_free(struct radeon_device *rdev, struct radeon_semaphore **semaphore, struct radeon_fence *fence) argument
H A Dradeon_ring.c82 ib->fence = NULL;
110 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
111 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
112 radeon_fence_unref(&ib->fence);
148 /* 64 dwords should be enough for fence too */
155 struct radeon_fence *fence = ib->sync_to[i]; local
156 if (radeon_fence_need_sync(fence, ib->ring)) {
159 fence->ring, ib->ring);
160 radeon_fence_note_sync(fence, ib->ring);
177 r = radeon_fence_emit(rdev, &ib->fence, i
[all...]
H A Dradeon_asic.h80 struct radeon_fence *fence);
92 struct radeon_fence **fence);
156 struct radeon_fence **fence);
169 struct radeon_fence *fence);
312 struct radeon_fence *fence);
318 struct radeon_fence *fence);
338 unsigned num_gpu_pages, struct radeon_fence **fence);
341 unsigned num_gpu_pages, struct radeon_fence **fence);
387 struct radeon_fence **fence, struct radeon_sa_bo **vb,
389 void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
[all...]
H A Dradeon.h126 /* fence seq are set to this number when signaled */
246 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
248 bool radeon_fence_signaled(struct radeon_fence *fence);
249 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
255 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
256 void radeon_fence_unref(struct radeon_fence **fence);
258 bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
259 void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
414 struct radeon_fence *fence; member in struct:radeon_sa_bo
463 struct radeon_fence *fence);
553 struct radeon_fence *fence; member in struct:radeon_unpin_work
638 struct radeon_fence *fence; member in struct:radeon_ib
703 struct radeon_fence *fence; member in struct:radeon_vm
[all...]
H A Dradeon_cs.c138 struct radeon_fence *fence)
142 if (!fence)
145 other = p->ib.sync_to[fence->ring];
146 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
326 parser->ib.fence);
489 radeon_cs_sync_to(parser, vm->fence);
500 radeon_vm_fence(rdev, vm, parser->ib.fence);
137 radeon_cs_sync_to(struct radeon_cs_parser *p, struct radeon_fence *fence) argument
H A Dr600_blit_kms.c514 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
666 struct radeon_fence **fence, struct radeon_sa_bo **vb,
706 if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
707 radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
709 radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
719 void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, argument
725 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
732 radeon_sa_bo_free(rdev, &vb, *fence);
733 radeon_semaphore_free(rdev, &sem, *fence);
665 r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence **fence, struct radeon_sa_bo **vb, struct radeon_semaphore **sem) argument
H A Dr200.c91 struct radeon_fence **fence)
125 if (fence) {
126 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
87 r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
H A Dradeon_object.h186 struct radeon_fence *fence);
H A Dr600.c2561 struct radeon_fence *fence)
2563 struct radeon_ring *ring = &rdev->ring[fence->ring];
2566 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2580 radeon_ring_write(ring, fence->seq);
2597 /* Emit fence sequence & fire IRQ */
2599 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2600 radeon_ring_write(ring, fence->seq);
2628 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2631 * @fence: radeon fence objec
2560 r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
2637 r600_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
2676 r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
2708 r600_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
[all...]
H A Dradeon_ttm.c227 struct radeon_fence *fence; local
265 fence = bo->sync_obj;
268 &fence);
270 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
272 radeon_fence_unref(&fence);
H A Drv770.c900 * @fence: radeon fence object
909 struct radeon_fence **fence)
933 if (radeon_fence_need_sync(*fence, ring->idx)) {
934 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
936 radeon_fence_note_sync(*fence, ring->idx);
955 r = radeon_fence_emit(rdev, fence, ring->idx);
962 radeon_semaphore_free(rdev, &sem, *fence);
906 rv770_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
/freebsd-11-stable/sys/riscv/riscv/
H A Dcpufunc_asm.S91 fence.i
100 fence.i
H A Ddb_interface.c154 fence();
/freebsd-11-stable/sys/dev/hyperv/vmbus/amd64/
H A Dhyperv_machdep.c135 #define HYPERV_TSC_TIMECOUNT(fence) \
137 hyperv_tc64_tsc_##fence(void) \
147 fence(); \
167 hyperv_tsc_timecount_##fence(struct timecounter *tc __unused) \
170 return (hyperv_tc64_tsc_##fence()); \
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Support/
H A DAtomic.cpp39 # error No memory fence implementation for your platform!
/freebsd-11-stable/sys/dev/ed/
H A Dif_ed_hpp.c445 uint32_t *const fence = dl + (amount >> 2); local
451 while (dl < fence)
464 u_short *const fence = d + (amount >> 1); local
467 while (d < fence)
524 u_short *const fence = s + (len >> 1); local
536 while (s < fence)
604 uint32_t *fence = sl + (len >> 2); local
606 while (sl < fence)
615 u_short *fence = s + (len >> 1); local
617 while (s < fence)
[all...]
/freebsd-11-stable/sys/dev/ioat/
H A Dioat_internal.h132 uint32_t fence:1; member in struct:generic_dma_control
165 uint32_t fence:1; member in struct:ioat_dma_hw_descriptor::__anon4523::__anon4524
196 uint32_t fence:1; member in struct:ioat_fill_hw_descriptor::__anon4525::__anon4526
224 uint32_t fence:1; member in struct:ioat_crc32_hw_descriptor::__anon4527::__anon4528
303 uint32_t fence:1; member in struct:ioat_xor_hw_descriptor::__anon4529::__anon4530
341 uint32_t fence:1; member in struct:ioat_pq_hw_descriptor::__anon4531::__anon4532
383 uint32_t fence:1; member in struct:ioat_pq_update_hw_descriptor::__anon4533::__anon4534
/freebsd-11-stable/contrib/gdb/gdb/
H A Dalpha-tdep.c912 CORE_ADDR fence = pc - heuristic_fence_post;
927 || fence < tdep->vm_min_address)
928 fence = tdep->vm_min_address;
933 for (pc -= 4; pc >= fence; pc -= 4)
959 if (fence == tdep->vm_min_address)
962 warning ("Hit heuristic-fence-post without finding");
970 increase the size of the search with the `set heuristic-fence-post' command.\n\
1238 callable as an sfunc. Used by the "set heuristic-fence-post" command. */
1609 /* Let the user set the fence post for heuristic_proc_start. */
1614 c = add_set_cmd ("heuristic-fence
905 CORE_ADDR fence = pc - heuristic_fence_post; local
[all...]
/freebsd-11-stable/sys/x86/iommu/
H A Dintel_qi.c161 bool memw, bool fence)
168 (fence ? DMAR_IQ_DESCR_WAIT_FN : 0) |
160 dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr, bool memw, bool fence) argument

Completed in 184 milliseconds

12