Lines Matching refs:rq

158 	struct drm_sched_rq *rq = entity->rq;
161 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
169 * Both locks need to be grabbed, one to protect from entity->rq change
174 spin_lock(&entity->rq->lock);
180 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
183 spin_unlock(&entity->rq->lock);
191 * @rq: scheduler run queue
196 struct drm_sched_rq *rq)
198 spin_lock_init(&rq->lock);
199 INIT_LIST_HEAD(&rq->entities);
200 rq->rb_tree_root = RB_ROOT_CACHED;
201 rq->current_entity = NULL;
202 rq->sched = sched;
208 * @rq: scheduler run queue
213 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
219 spin_lock(&rq->lock);
221 atomic_inc(rq->sched->score);
222 list_add_tail(&entity->list, &rq->entities);
224 spin_unlock(&rq->lock);
230 * @rq: scheduler run queue
235 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
241 spin_lock(&rq->lock);
243 atomic_dec(rq->sched->score);
246 if (rq->current_entity == entity)
247 rq->current_entity = NULL;
252 spin_unlock(&rq->lock);
259 * @rq: scheduler run queue to check.
269 struct drm_sched_rq *rq)
273 spin_lock(&rq->lock);
275 entity = rq->current_entity;
277 list_for_each_entry_continue(entity, &rq->entities, list) {
283 spin_unlock(&rq->lock);
287 rq->current_entity = entity;
289 spin_unlock(&rq->lock);
295 list_for_each_entry(entity, &rq->entities, list) {
301 spin_unlock(&rq->lock);
305 rq->current_entity = entity;
307 spin_unlock(&rq->lock);
311 if (entity == rq->current_entity)
315 spin_unlock(&rq->lock);
324 * @rq: scheduler run queue to check.
334 struct drm_sched_rq *rq)
338 spin_lock(&rq->lock);
339 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
348 spin_unlock(&rq->lock);
352 rq->current_entity = entity;
357 spin_unlock(&rq->lock);
795 if (!entity->rq) {
800 drm_err(job->sched, "%s: entity has no rq!\n", __func__);
843 sched = entity->rq->sched;
1345 struct drm_sched_rq *rq = sched->sched_rq[i];
1347 spin_lock(&rq->lock);
1348 list_for_each_entry(s_entity, &rq->entities, list)
1351 * it will removed from rq in drm_sched_entity_fini
1355 spin_unlock(&rq->lock);
1397 struct drm_sched_rq *rq = sched->sched_rq[i];
1399 spin_lock(&rq->lock);
1400 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1408 spin_unlock(&rq->lock);
1409 if (&entity->list != &rq->entities)