Lines Matching refs:seq

57  * @seq: sequence number to write
62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
66 *drv->cpu_addr = cpu_to_le32(seq);
68 WREG32(drv->scratch_reg, seq);
84 u32 seq = 0;
87 seq = le32_to_cpu(*drv->cpu_addr);
89 seq = RREG32(drv->scratch_reg);
91 return seq;
115 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
118 CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq);
133 uint64_t seq, last_seq, last_emitted;
148 * value the other process set as last seq must be higher than
149 * the seq value we just read. Which means that current process
156 * seq but to an older one.
161 seq = radeon_fence_read(rdev, ring);
162 seq |= last_seq & 0xffffffff00000000LL;
163 if (seq < last_seq) {
164 seq &= 0xffffffff;
165 seq |= last_emitted & 0xffffffff00000000LL;
168 if (seq <= last_seq || seq > last_emitted) {
173 * seq we just read is different from the previous on.
176 last_seq = seq;
180 * seq then the current real last seq as signaled
185 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
210 * @seq: sequence number
221 u64 seq, unsigned ring)
223 if (atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) >= seq) {
228 if (atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) >= seq) {
247 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
250 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
251 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
279 uint64_t seq;
299 seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq);
303 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
304 ring, seq);
342 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
343 ring, seq);
355 if (seq != atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) {
374 (uintmax_t)target_seq, (uintmax_t)seq);
417 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
422 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
426 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
431 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
604 uint64_t seq[RADEON_NUM_RINGS];
609 seq[i] = 0;
615 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
620 seq[i] = fences[i]->seq;
623 r = radeon_fence_wait_any_seq(rdev, seq, intr);
642 uint64_t seq;
644 seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) + 1ULL;
645 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
650 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
665 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
668 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
764 if (fence->seq <= fdrv->sync_seq[fence->ring]) {