Searched refs:fence (Results 1 - 25 of 45) sorted by path

12

/freebsd-11-stable/contrib/gdb/gdb/
H A Dalpha-tdep.c912 CORE_ADDR fence = pc - heuristic_fence_post;
927 || fence < tdep->vm_min_address)
928 fence = tdep->vm_min_address;
933 for (pc -= 4; pc >= fence; pc -= 4)
959 if (fence == tdep->vm_min_address)
962 warning ("Hit heuristic-fence-post without finding");
970 increase the size of the search with the `set heuristic-fence-post' command.\n\
1238 callable as an sfunc. Used by the "set heuristic-fence-post" command. */
1609 /* Let the user set the fence post for heuristic_proc_start. */
1614 c = add_set_cmd ("heuristic-fence
905 CORE_ADDR fence = pc - heuristic_fence_post; local
[all...]
H A Dmips-tdep.c1864 CORE_ADDR fence; local
1870 fence = start_pc - heuristic_fence_post;
1874 if (heuristic_fence_post == UINT_MAX || fence < VM_MIN_ADDRESS)
1875 fence = VM_MIN_ADDRESS;
1881 if (start_pc < fence)
1913 heuristic-fence-post' command.\n", paddr_nz (pc), paddr_nz (pc));
6135 /* Let the user turn off floating point and set the fence post for
6165 c = add_set_cmd ("heuristic-fence-post", class_support, var_zinteger,
/freebsd-11-stable/contrib/ofed/libmlx5/
H A Dqp.c633 uint8_t fence; local
668 fence = MLX5_WQE_CTRL_FENCE;
670 fence = next_fence;
676 ctrl->fm_ce_se = qp->sq_signal_bits | fence |
/freebsd-11-stable/sys/dev/drm/
H A Dmga_state.c1081 u32 *fence = data; local
1091 /* I would normal do this assignment in the declaration of fence,
1095 *fence = dev_priv->next_fence_to_post;
1111 u32 *fence = data; local
1120 mga_driver_fence_wait(dev, fence);
/freebsd-11-stable/sys/dev/drm2/i915/
H A Di915_debug.c124 seq_printf(m, " (fence: %d)", obj->fence_reg);
603 seq_printf(m, " (fence: %d)", err->fence_reg);
678 seq_printf(m, " fence[%d] = %08jx\n", i,
679 (uintmax_t)error->fence[i]);
H A Di915_drv.h217 u64 fence[I915_MAX_NUM_FENCES]; member in struct:drm_i915_error_state
818 /** LRU list of objects with fence regs on them. */
1019 * Whether the tiling parameters for the currently associated fence
1023 * command (such as BLT on gen2/3), as a "fence".
1055 * Is the GPU currently using a fence to access this buffer,
1215 * rows, which changed the alignment requirements and fence programming.
H A Di915_gem.c85 struct drm_i915_fence_reg *fence,
107 /* As we do not have an associated fence register, we will force
1464 * the GTT (if needed), allocating and programming a fence register (again,
1469 * from the GTT and/or fence registers to make room. So performance may
1470 * suffer if the GTT working set is large or there are few fence registers
1630 * object through the GTT and then lose the fence register due to
1678 /* Previous chips need a power-of-two fence region when tiling */
1695 * potential fence register mapping.
1704 * if a fence register is needed for the object.
1712 * fence registe
2931 fence_number(struct drm_i915_private *dev_priv, struct drm_i915_fence_reg *fence) argument
2942 i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) argument
[all...]
H A Di915_irq.c918 /* Simply ignore tiling or any overlapping fence.
1062 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1067 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1072 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1075 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
/freebsd-11-stable/sys/dev/drm2/radeon/
H A Dni.c922 struct radeon_fence *fence)
924 struct radeon_ring *ring = &rdev->ring[fence->ring];
925 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
941 radeon_ring_write(ring, fence->seq);
921 cayman_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
H A Dr100.c848 struct radeon_fence *fence)
850 struct radeon_ring *ring = &rdev->ring[fence->ring];
866 /* Emit fence sequence & fire IRQ */
867 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
868 radeon_ring_write(ring, fence->seq);
886 struct radeon_fence **fence)
904 /* Ask for enough room for blit + flush + fence */
949 if (fence) {
950 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
3825 r = radeon_fence_wait(ib.fence, fals
847 r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
882 r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
[all...]
H A Dr200.c91 struct radeon_fence **fence)
125 if (fence) {
126 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
87 r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
H A Dr300.c179 struct radeon_fence *fence)
181 struct radeon_ring *ring = &rdev->ring[fence->ring];
205 /* Emit fence sequence & fire IRQ */
206 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
207 radeon_ring_write(ring, fence->seq);
178 r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
H A Dr600.c2561 struct radeon_fence *fence)
2563 struct radeon_ring *ring = &rdev->ring[fence->ring];
2566 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2580 radeon_ring_write(ring, fence->seq);
2597 /* Emit fence sequence & fire IRQ */
2599 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2600 radeon_ring_write(ring, fence->seq);
2628 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2631 * @fence: radeon fence objec
2560 r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
2637 r600_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) argument
2676 r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
2708 r600_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) argument
[all...]
H A Dradeon.h126 /* fence seq are set to this number when signaled */
246 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
248 bool radeon_fence_signaled(struct radeon_fence *fence);
249 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
255 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
256 void radeon_fence_unref(struct radeon_fence **fence);
258 bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
259 void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
414 struct radeon_fence *fence; member in struct:radeon_sa_bo
463 struct radeon_fence *fence);
553 struct radeon_fence *fence; member in struct:radeon_unpin_work
638 struct radeon_fence *fence; member in struct:radeon_ib
703 struct radeon_fence *fence; member in struct:radeon_vm
[all...]
H A Dradeon_asic.h80 struct radeon_fence *fence);
92 struct radeon_fence **fence);
156 struct radeon_fence **fence);
169 struct radeon_fence *fence);
312 struct radeon_fence *fence);
318 struct radeon_fence *fence);
338 unsigned num_gpu_pages, struct radeon_fence **fence);
341 unsigned num_gpu_pages, struct radeon_fence **fence);
387 struct radeon_fence **fence, struct radeon_sa_bo **vb,
389 void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
[all...]
H A Dradeon_benchmark.c45 struct radeon_fence *fence = NULL; local
54 &fence);
59 &fence);
67 r = radeon_fence_wait(fence, false);
70 radeon_fence_unref(&fence);
76 if (fence)
77 radeon_fence_unref(&fence);
H A Dradeon_cs.c138 struct radeon_fence *fence)
142 if (!fence)
145 other = p->ib.sync_to[fence->ring];
146 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
326 parser->ib.fence);
489 radeon_cs_sync_to(parser, vm->fence);
500 radeon_vm_fence(rdev, vm, parser->ib.fence);
137 radeon_cs_sync_to(struct radeon_cs_parser *p, struct radeon_fence *fence) argument
H A Dradeon_display.c285 (work->fence && !radeon_fence_signaled(work->fence))) {
344 radeon_fence_unref(&work->fence);
386 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
493 radeon_fence_unref(&work->fence);
H A Dradeon_fence.c45 * for GPU/CPU synchronization. When the fence is written,
46 * it is expected that all buffers associated with that fence
54 * radeon_fence_write - write a fence value
58 * @ring: ring index the fence is associated with
60 * Writes a fence value to memory or a scratch register (all asics).
73 * radeon_fence_read - read a fence value
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
95 * radeon_fence_emit - emit a fence o
104 radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) argument
200 radeon_fence_destroy(struct radeon_fence *fence) argument
242 radeon_fence_signaled(struct radeon_fence *fence) argument
408 radeon_fence_wait(struct radeon_fence *fence, bool intr) argument
687 radeon_fence_ref(struct radeon_fence *fence) argument
700 radeon_fence_unref(struct radeon_fence **fence) argument
750 radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) argument
780 radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) argument
[all...]
H A Dradeon_gart.c527 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
537 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
655 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
686 * Returns the fence we need to sync to (if any).
698 if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
706 struct radeon_fence *fence = rdev->vm_manager.active[i]; local
708 if (fence == NULL) {
714 if (radeon_fence_is_earlier(fence, best[fence
744 radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence) argument
[all...]
H A Dradeon_object.h186 struct radeon_fence *fence);
H A Dradeon_ring.c82 ib->fence = NULL;
110 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
111 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
112 radeon_fence_unref(&ib->fence);
148 /* 64 dwords should be enough for fence too */
155 struct radeon_fence *fence = ib->sync_to[i]; local
156 if (radeon_fence_need_sync(fence, ib->ring)) {
159 fence->ring, ib->ring);
160 radeon_fence_note_sync(fence, ib->ring);
177 r = radeon_fence_emit(rdev, &ib->fence, i
[all...]
H A Dradeon_sa.c41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
156 radeon_fence_unref(&sa_bo->fence);
169 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
227 * Check if either there is a fence we can wait for or
270 /* go over all fence list and try to find the closest sa_bo
283 if (!radeon_fence_signaled(sa_bo->fence)) {
284 fences[i] = sa_bo->fence;
307 ++tries[best_bo->fence
384 radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, struct radeon_fence *fence) argument
[all...]
H A Dradeon_semaphore.c112 struct radeon_fence *fence)
121 radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
110 radeon_semaphore_free(struct radeon_device *rdev, struct radeon_semaphore **semaphore, struct radeon_fence *fence) argument
H A Dradeon_test.c42 struct radeon_fence *fence = NULL; local
129 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
131 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
137 r = radeon_fence_wait(fence, false);
139 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
143 radeon_fence_unref(&fence);
175 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
177 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
183 r = radeon_fence_wait(fence, false);
185 DRM_ERROR("Failed to wait for VRAM->GTT fence
[all...]

Completed in 258 milliseconds

12