Searched refs:last (Results 276 - 300 of 1032) sorted by path

<<11121314151617181920>>

/linux-master/drivers/gpu/drm/
H A Ddrm_edid_load.c58 char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL; local
70 * the last one found one as a fallback.
98 last = edidname + strlen(edidname) - 1;
99 if (*last == '\n')
100 *last = '\0';
H A Ddrm_gpuvm.c1856 * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
1894 u64 last = addr + range - 1; local
1896 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
H A Ddrm_mm.c157 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) argument
160 start, last) ?: (struct drm_mm_node *)&mm->head_node;
382 * @last: last rb member to traverse (either rb_right or rb_left).
389 #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
399 while (usable_hole_addr(node->last, size)) \
400 node = node->last; \
H A Ddrm_vblank.c184 ktime_t t_vblank, u32 last)
190 vblank->last = last;
232 * to the last vblank occurred.
277 * update the last read value so we can deal with wraparound on the next
316 diff = (cur_vblank - vblank->last) & max_vblank_count;
359 diff, cur_vblank, vblank->last);
362 drm_WARN_ON_ONCE(dev, cur_vblank != vblank->last);
447 * Disable vblank irq's on crtc, make sure that last vblank count
1291 u64 last; local
182 store_vblank(struct drm_device *dev, unsigned int pipe, u32 vblank_count_inc, ktime_t t_vblank, u32 last) argument
[all...]
/linux-master/drivers/gpu/drm/exynos/
H A Dexynos_drm_g2d.c158 u32 last; /* last data offset */ member in struct:g2d_cmdlist
369 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
660 /* Compute the position of the last byte that the engine accesses. */
673 DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
699 reg_pos = cmdlist->last - 2 * (i + 1);
1038 index = cmdlist->last - 2 * (i + 1);
1192 cmdlist->last = 0;
1201 cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
1202 cmdlist->data[cmdlist->last
[all...]
H A Dexynos_drm_plane.c31 * 0 last
41 static int exynos_plane_get_size(int start, unsigned length, unsigned last) argument
48 size = min_t(unsigned, end, last);
49 } else if (start <= last) {
50 size = min_t(unsigned, last - start, length);
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_execbuffer.c173 * to fit the execbuf once last time before concluding that it simply will not
354 * On the last pass though, we want to try as hard
701 struct list_head last; local
706 INIT_LIST_HEAD(&last);
727 list_add(&ev->bind_link, &last);
729 list_add_tail(&ev->bind_link, &last);
732 list_splice_tail(&last, &eb->unbound);
973 * Execbuffer code expects last vma entry to be NULL,
1861 /* as last step, parse the command buffer */
1975 * the last chil
[all...]
H A Di915_gem_shmem.c37 struct folio *last = NULL; local
46 if (folio == last)
48 last = folio;
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_context_types.h106 * upon completion/cancellation of the last request.
157 u32 last; member in struct:intel_context::intel_context_stats::__anon679
267 * @last_rq: last request submitted on a parallel context, used
H A Dintel_execlists_submission.c318 * prio >= max(0, last);
832 * Sentinels are supposed to be the last request so they flush
1279 struct i915_request *last, * const *active; local
1290 * where it got up to last time, and through RING_TAIL we tell the CS
1309 * If the queue is higher priority than the last
1318 while ((last = *active) && completed(last))
1321 if (last) {
1322 if (need_preempt(engine, last)) {
1324 "preempting last
2049 post_process_csb(struct i915_request **port, struct i915_request **last) argument
4090 struct i915_request *rq, *last; local
[all...]
H A Dintel_lrc.c857 regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
1957 old = stats->runtime.last;
1958 stats->runtime.last = lrc_get_runtime(ce);
1959 dt = stats->runtime.last - old;
1964 CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
1965 old, stats->runtime.last, dt);
H A Dintel_rps.c80 ktime_t dt, last, timestamp; local
90 last = engine->stats.rps;
93 busy = ktime_to_ns(ktime_sub(dt, last));
99 last = rps->pm_timestamp;
106 dt = ktime_sub(timestamp, last);
1837 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
H A Dselftest_execlists.c2563 * preemption of the last request. It should then complete before
2966 * request higher priority than the last. Once we are ready, we release
2967 * the last batch which then precolates down the chain, each releasing
3003 * Such that the last spinner is the highest priority and
4192 struct i915_request *last = NULL; local
4234 i915_request_put(last);
4235 last = i915_request_get(rq);
4261 if (i915_request_wait(last, 0, HZ / 5) < 0) {
4286 i915_request_put(last);
H A Dselftest_hangcheck.c584 struct i915_request *last = NULL; local
612 if (last)
613 i915_request_put(last);
619 if (last)
620 i915_request_put(last);
621 last = i915_request_get(rq);
631 i915_request_put(last);
641 i915_request_put(last);
647 if (last) {
648 if (i915_request_wait(last,
[all...]
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dintel_guc.c166 unsigned long last = guc->last_dead_guc_jiffies; local
167 unsigned long delta = jiffies_to_msecs(jiffies - last);
H A Dintel_guc_submission.c808 struct i915_request *last)
811 request_to_scheduling_context(last);
918 * We expect the front end (execbuf IOCTL) to set this flag on the last
930 struct i915_request *last = NULL; local
939 last = guc->stalled_request;
958 if (last && !can_merge_rq(rq, last))
966 last = rq;
990 struct intel_context *ce = request_to_scheduling_context(last);
998 guc->stalled_request = last;
807 can_merge_rq(struct i915_request *rq, struct i915_request *last) argument
[all...]
H A Dselftest_guc.c54 struct i915_request *last[3] = {NULL, NULL, NULL}, *rq; local
95 last[i] = rq;
99 ret = i915_request_wait(last[i], 0, HZ);
104 i915_request_put(last[i]);
105 last[i] = NULL;
124 if (last[i])
125 i915_request_put(last[i]);
139 * another request which should successfully steal a guc_id. Wait on last
153 struct i915_request *spin_rq = NULL, *rq, *last = NULL; local
207 if ((ret != -EAGAIN) || !last) {
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_active.c101 if (!atomic_read(&ref->count)) /* after the last dec */
241 * at all. We can reuse the last slot if it is empty, that is
861 struct llist_node *first = NULL, *last = NULL; local
915 if (!last)
916 last = first;
921 llist_add_batch(first, last, &ref->preallocated_barriers);
1015 * __i915_active_fence_set: Update the last active fence along its timeline
1019 * Records the new @fence as the last active fence along its timeline in
H A Di915_vma_resource.c301 u64 last = offset + size - 1; local
306 i915_vma_resource_color_adjust_range(vm, &offset, &last);
307 node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
314 node = vma_res_itree_iter_next(node, offset, last);
387 u64 last = offset + size - 1; local
393 i915_vma_resource_color_adjust_range(vm, &offset, &last);
394 node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
407 node = vma_res_itree_iter_next(node, offset, last);
H A Di915_vma_resource.h245 u64 last,
251 u64 last,
H A Dintel_memory_region.c92 static resource_size_t random_page(resource_size_t last) argument
95 return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
102 resource_size_t last, page; local
108 last = resource_size(&mem->io) - PAGE_SIZE;
116 * the first and last of the specified region to confirm the backing
122 for (page = 0; page <= last; page += PAGE_SIZE) {
132 err = iopagetest(mem, last, caller);
136 err = iopagetest(mem, random_page(last), caller);
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_gem_evict.c451 struct i915_request *last = NULL; local
489 if (last)
490 i915_request_put(last);
491 last = i915_request_get(rq);
499 if (last) {
500 if (i915_request_wait(last, 0, HZ) < 0) {
502 i915_request_put(last);
503 pr_err("Failed waiting for last request (on %s)",
507 i915_request_put(last);
H A Di915_gem_gtt.c158 u64 size, last, limit; local
212 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
215 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
226 last, size - last);
1282 u64 hole_start, hole_end, last = 0; local
1290 if (hole_start < last)
1304 last = hole_end;
H A Di915_syncmap.c32 unsigned int last,
42 if (last & BIT(depth - d - 1))
80 last << 1 | ((p->bitmap >> (i + 1)) ? 1 : 0),
486 pr_err("Parent (join) of last leaf was not the sync!\n");
585 pr_err("context=%llu, last=%u this=%u did not match expectation (%d)\n",
29 __sync_print(struct i915_syncmap *p, char *buf, unsigned long *sz, unsigned int depth, unsigned int last, unsigned int idx) argument
H A Dscatterlist.c208 struct page *last,
211 return first + npages == last;
207 page_contiguous(struct page *first, struct page *last, unsigned long npages) argument

Completed in 412 milliseconds

<<11121314151617181920>>