Lines Matching refs:last

318 	 *      prio >= max(0, last);
835 * Sentinels are supposed to be the last request so they flush
1282 struct i915_request *last, * const *active;
1293 * where it got up to last time, and through RING_TAIL we tell the CS
1312 * If the queue is higher priority than the last
1321 while ((last = *active) && completed(last))
1324 if (last) {
1325 if (need_preempt(engine, last)) {
1327 "preempting last=%llx:%lld, prio=%d, hint=%d\n",
1328 last->fence.context,
1329 last->fence.seqno,
1330 last->sched.attr.priority,
1350 last = NULL;
1351 } else if (timeslice_expired(engine, last)) {
1353 "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
1355 last->fence.context, last->fence.seqno,
1356 rq_prio(last),
1358 str_yes_no(timeslice_yield(execlists, last)));
1393 last = NULL;
1433 if (last && !can_merge_rq(last, rq)) {
1476 last = rq;
1512 if (last && !can_merge_rq(last, rq)) {
1515 * combine this request with the last, then we
1526 if (last->context == rq->context)
1529 if (i915_request_has_sentinel(last))
1548 if (ctx_single_port_submission(last->context) ||
1557 *port++ = i915_request_get(last);
1558 last = NULL;
1561 GEM_BUG_ON(last &&
1562 !can_merge_ctx(last->context,
1564 GEM_BUG_ON(last &&
1565 i915_seqno_passed(last->fence.seqno,
1569 last = rq;
1577 *port++ = i915_request_get(last);
1588 * the priority of the lowest executing request, i.e. last.
1763 * we prefer the path that is self-checking and as a last resort,
1880 /* Remember who was last running under the timer */
2053 struct i915_request **last)
2055 while (port != last)
2832 * fake the HW write to point back to the last entry so that our
2833 * inline comparison of our cached head position against the last HW
4098 struct i915_request *rq, *last;
4105 last = NULL;
4111 last = rq;
4113 if (last) {
4119 show_request(m, last, "\t\t", 0);
4126 last = NULL;
4135 last = rq;
4138 if (last) {
4144 show_request(m, last, "\t\t", 0);
4147 last = NULL;
4158 last = rq;
4161 if (last) {
4167 show_request(m, last, "\t\t", 0);