Lines Matching refs:ctx

85 void spu_set_timeslice(struct spu_context *ctx)
87 if (ctx->prio < NORMAL_PRIO)
88 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
90 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
96 void __spu_update_sched_info(struct spu_context *ctx)
102 BUG_ON(!list_empty(&ctx->rq));
109 ctx->tid = current->pid;
118 ctx->prio = current->prio;
120 ctx->prio = current->static_prio;
121 ctx->policy = current->policy;
131 cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
134 ctx->last_ran = raw_smp_processor_id();
137 void spu_update_sched_info(struct spu_context *ctx)
141 if (ctx->state == SPU_STATE_RUNNABLE) {
142 node = ctx->spu->node;
148 __spu_update_sched_info(ctx);
151 __spu_update_sched_info(ctx);
155 static int __node_allowed(struct spu_context *ctx, int node)
160 if (cpumask_intersects(mask, &ctx->cpus_allowed))
167 static int node_allowed(struct spu_context *ctx, int node)
172 rval = __node_allowed(ctx, node);
191 struct spu_context *ctx = spu->ctx;
193 &ctx->sched_flags);
195 wake_up_all(&ctx->stop_wq);
205 * @ctx: context to bind
207 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
209 spu_context_trace(spu_bind_context__enter, ctx, spu);
211 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
213 if (ctx->flags & SPU_CREATE_NOSCHED)
216 ctx->stats.slb_flt_base = spu->stats.slb_flt;
217 ctx->stats.class2_intr_base = spu->stats.class2_intr;
219 spu_associate_mm(spu, ctx->owner);
222 spu->ctx = ctx;
224 ctx->spu = spu;
225 ctx->ops = &spu_hw_ops;
234 spu_unmap_mappings(ctx);
236 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
237 spu_restore(&ctx->csa, spu);
239 ctx->state = SPU_STATE_RUNNABLE;
241 spuctx_switch_state(ctx, SPU_UTIL_USER);
251 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
256 struct spu_context *ctx;
258 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
259 if (list_empty(&ctx->aff_list))
260 list_add(&ctx->aff_list, &gang->aff_list_head);
267 struct spu_context *ctx;
271 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
273 if (&ctx->aff_list == &gang->aff_list_head)
275 ctx->aff_offset = offset--;
279 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
280 if (&ctx->aff_list == &gang->aff_list_head)
282 ctx->aff_offset = offset++;
288 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
312 if (!node_allowed(ctx, node))
318 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
319 && spu->ctx->gang->aff_ref_spu)
320 available_spus -= spu->ctx->gang->contexts;
323 if (available_spus < ctx->gang->contexts) {
343 struct spu_context *tmp, *ctx;
352 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
354 if (&ctx->aff_list == &gang->aff_list_head)
356 lowest_offset = ctx->aff_offset;
393 static int has_affinity(struct spu_context *ctx)
395 struct spu_gang *gang = ctx->gang;
397 if (list_empty(&ctx->aff_list))
400 if (atomic_read(&ctx->gang->aff_sched_count) == 0)
401 ctx->gang->aff_ref_spu = NULL;
417 * @ctx: context to unbind
419 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
423 spu_context_trace(spu_unbind_context__enter, ctx, spu);
425 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
427 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
430 if (ctx->gang)
432 * If ctx->gang->aff_sched_count is positive, SPU affinity is
436 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
438 spu_unmap_mappings(ctx);
439 spu_save(&ctx->csa, spu);
440 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
444 ctx->state = SPU_STATE_SAVED;
451 ctx->ops = &spu_backing_ops;
453 spu->ctx = NULL;
458 ctx->stats.slb_flt +=
459 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
460 ctx->stats.class2_intr +=
461 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
464 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
465 ctx->spu = NULL;
467 if (spu_stopped(ctx, &status))
468 wake_up_all(&ctx->stop_wq);
473 * @ctx: context to add
475 static void __spu_add_to_rq(struct spu_context *ctx)
490 if (list_empty(&ctx->rq)) {
491 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
492 set_bit(ctx->prio, spu_prio->bitmap);
498 static void spu_add_to_rq(struct spu_context *ctx)
501 __spu_add_to_rq(ctx);
505 static void __spu_del_from_rq(struct spu_context *ctx)
507 int prio = ctx->prio;
509 if (!list_empty(&ctx->rq)) {
512 list_del_init(&ctx->rq);
519 void spu_del_from_rq(struct spu_context *ctx)
522 __spu_del_from_rq(ctx);
526 static void spu_prio_wait(struct spu_context *ctx)
535 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
538 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
540 __spu_add_to_rq(ctx);
542 mutex_unlock(&ctx->state_mutex);
544 mutex_lock(&ctx->state_mutex);
546 __spu_del_from_rq(ctx);
550 remove_wait_queue(&ctx->stop_wq, &wait);
553 static struct spu *spu_get_idle(struct spu_context *ctx)
558 spu_context_nospu_trace(spu_get_idle__enter, ctx);
560 if (ctx->gang) {
561 mutex_lock(&ctx->gang->aff_mutex);
562 if (has_affinity(ctx)) {
563 aff_ref_spu = ctx->gang->aff_ref_spu;
564 atomic_inc(&ctx->gang->aff_sched_count);
565 mutex_unlock(&ctx->gang->aff_mutex);
569 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
574 atomic_dec(&ctx->gang->aff_sched_count);
577 mutex_unlock(&ctx->gang->aff_mutex);
582 if (!node_allowed(ctx, node))
594 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
600 spu_context_trace(spu_get_idle__found, ctx, spu);
607 * @ctx: candidate context for running
611 static struct spu *find_victim(struct spu_context *ctx)
617 spu_context_nospu_trace(spu_find_victim__enter, ctx);
630 if (!node_allowed(ctx, node))
635 struct spu_context *tmp = spu->ctx;
637 if (tmp && tmp->prio > ctx->prio &&
640 victim = spu->ctx;
649 * This nests ctx->state_mutex, but we always lock
665 if (!spu || victim->prio <= ctx->prio) {
677 spu_context_trace(__spu_deactivate__unload, ctx, spu);
699 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
704 spu_set_timeslice(ctx);
707 if (spu->ctx == NULL) {
708 spu_bind_context(spu, ctx);
716 wake_up_all(&ctx->run_wq);
718 spu_add_to_rq(ctx);
721 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
725 mutex_lock(&ctx->state_mutex);
726 if (ctx->state == SPU_STATE_SAVED)
727 __spu_schedule(spu, ctx);
728 spu_release(ctx);
734 * @ctx: The context currently scheduled on the SPU
737 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
742 * Should be called with ctx->state_mutex held.
744 static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
753 spu_unbind_context(spu, ctx);
754 ctx->stats.invol_ctx_switch++;
761 * @ctx: spu context to schedule
764 * Tries to find a free spu to run @ctx. If no free spu is available
768 int spu_activate(struct spu_context *ctx, unsigned long flags)
778 if (ctx->spu)
785 spu = spu_get_idle(ctx);
790 if (!spu && rt_prio(ctx->prio))
791 spu = find_victim(ctx);
795 runcntl = ctx->ops->runcntl_read(ctx);
796 __spu_schedule(spu, ctx);
798 spuctx_switch_state(ctx, SPU_UTIL_USER);
803 if (ctx->flags & SPU_CREATE_NOSCHED) {
804 spu_prio_wait(ctx);
808 spu_add_to_rq(ctx);
821 struct spu_context *ctx;
829 list_for_each_entry(ctx, rq, rq) {
831 if (__node_allowed(ctx, node)) {
832 __spu_del_from_rq(ctx);
838 ctx = NULL;
841 return ctx;
844 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
846 struct spu *spu = ctx->spu;
852 spu_unschedule(spu, ctx, new == NULL);
857 spu_release(ctx);
861 mutex_lock(&ctx->state_mutex);
872 * @ctx: spu context to unbind
874 * Unbind @ctx from the physical spu it is running on and schedule
877 void spu_deactivate(struct spu_context *ctx)
879 spu_context_nospu_trace(spu_deactivate__enter, ctx);
880 __spu_deactivate(ctx, 1, MAX_PRIO);
885 * @ctx: spu context to yield
888 * unbind @ctx from the physical spu and schedule the highest
891 void spu_yield(struct spu_context *ctx)
893 spu_context_nospu_trace(spu_yield__enter, ctx);
894 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
895 mutex_lock(&ctx->state_mutex);
896 __spu_deactivate(ctx, 0, MAX_PRIO);
897 mutex_unlock(&ctx->state_mutex);
901 static noinline void spusched_tick(struct spu_context *ctx)
906 if (spu_acquire(ctx))
909 if (ctx->state != SPU_STATE_RUNNABLE)
911 if (ctx->flags & SPU_CREATE_NOSCHED)
913 if (ctx->policy == SCHED_FIFO)
916 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
919 spu = ctx->spu;
921 spu_context_trace(spusched_tick__preempt, ctx, spu);
923 new = grab_runnable_context(ctx->prio + 1, spu->node);
925 spu_unschedule(spu, ctx, 0);
926 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
927 spu_add_to_rq(ctx);
929 spu_context_nospu_trace(spusched_tick__newslice, ctx);
930 if (!ctx->time_slice)
931 ctx->time_slice++;
934 spu_release(ctx);
1002 struct spu_context *ctx = spu->ctx;
1004 if (ctx) {
1005 get_spu_context(ctx);
1007 spusched_tick(ctx);
1009 put_spu_context(ctx);
1019 void spuctx_switch_state(struct spu_context *ctx,
1029 delta = curtime - ctx->stats.tstamp;
1031 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1034 spu = ctx->spu;
1035 old_state = ctx->stats.util_state;
1036 ctx->stats.util_state = new_state;
1037 ctx->stats.tstamp = curtime;
1043 ctx->stats.times[old_state] += delta;