• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/platforms/cell/spufs/
1/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * 2006-03-31	NUMA domains added.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#undef DEBUG
24
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/slab.h>
31#include <linux/completion.h>
32#include <linux/vmalloc.h>
33#include <linux/smp.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
36#include <linux/numa.h>
37#include <linux/mutex.h>
38#include <linux/notifier.h>
39#include <linux/kthread.h>
40#include <linux/pid_namespace.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43
44#include <asm/io.h>
45#include <asm/mmu_context.h>
46#include <asm/spu.h>
47#include <asm/spu_csa.h>
48#include <asm/spu_priv1.h>
49#include "spufs.h"
50#define CREATE_TRACE_POINTS
51#include "sputrace.h"
52
53struct spu_prio_array {
54	DECLARE_BITMAP(bitmap, MAX_PRIO);
55	struct list_head runq[MAX_PRIO];
56	spinlock_t runq_lock;
57	int nr_waiting;
58};
59
60static unsigned long spu_avenrun[3];
61static struct spu_prio_array *spu_prio;
62static struct task_struct *spusched_task;
63static struct timer_list spusched_timer;
64static struct timer_list spuloadavg_timer;
65
66/*
67 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
68 */
69#define NORMAL_PRIO		120
70
71/*
72 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
73 * tick for every 10 CPU scheduler ticks.
74 */
75#define SPUSCHED_TICK		(10)
76
77/*
78 * These are the 'tuning knobs' of the scheduler:
79 *
80 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
81 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
82 */
83#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
84#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
85
86#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
87#define SCALE_PRIO(x, prio) \
88	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
89
90/*
91 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
92 * [800ms ... 100ms ... 5ms]
93 *
94 * The higher a thread's priority, the bigger timeslices
95 * it gets during one round of execution. But even the lowest
96 * priority thread gets MIN_TIMESLICE worth of execution time.
97 */
98void spu_set_timeslice(struct spu_context *ctx)
99{
100	if (ctx->prio < NORMAL_PRIO)
101		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
102	else
103		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
104}
105
106/*
107 * Update scheduling information from the owning thread.
108 */
109void __spu_update_sched_info(struct spu_context *ctx)
110{
111	/*
112	 * assert that the context is not on the runqueue, so it is safe
113	 * to change its scheduling parameters.
114	 */
115	BUG_ON(!list_empty(&ctx->rq));
116
117	/*
118	 * 32-Bit assignments are atomic on powerpc, and we don't care about
119	 * memory ordering here because retrieving the controlling thread is
120	 * per definition racy.
121	 */
122	ctx->tid = current->pid;
123
124	/*
125	 * We do our own priority calculations, so we normally want
126	 * ->static_prio to start with. Unfortunately this field
127	 * contains junk for threads with a realtime scheduling
128	 * policy so we have to look at ->prio in this case.
129	 */
130	if (rt_prio(current->prio))
131		ctx->prio = current->prio;
132	else
133		ctx->prio = current->static_prio;
134	ctx->policy = current->policy;
135
136	/*
137	 * TO DO: the context may be loaded, so we may need to activate
138	 * it again on a different node. But it shouldn't hurt anything
139	 * to update its parameters, because we know that the scheduler
140	 * is not actively looking at this field, since it is not on the
141	 * runqueue. The context will be rescheduled on the proper node
142	 * if it is timesliced or preempted.
143	 */
144	ctx->cpus_allowed = current->cpus_allowed;
145
146	/* Save the current cpu id for spu interrupt routing. */
147	ctx->last_ran = raw_smp_processor_id();
148}
149
150void spu_update_sched_info(struct spu_context *ctx)
151{
152	int node;
153
154	if (ctx->state == SPU_STATE_RUNNABLE) {
155		node = ctx->spu->node;
156
157		/*
158		 * Take list_mutex to sync with find_victim().
159		 */
160		mutex_lock(&cbe_spu_info[node].list_mutex);
161		__spu_update_sched_info(ctx);
162		mutex_unlock(&cbe_spu_info[node].list_mutex);
163	} else {
164		__spu_update_sched_info(ctx);
165	}
166}
167
168static int __node_allowed(struct spu_context *ctx, int node)
169{
170	if (nr_cpus_node(node)) {
171		const struct cpumask *mask = cpumask_of_node(node);
172
173		if (cpumask_intersects(mask, &ctx->cpus_allowed))
174			return 1;
175	}
176
177	return 0;
178}
179
180static int node_allowed(struct spu_context *ctx, int node)
181{
182	int rval;
183
184	spin_lock(&spu_prio->runq_lock);
185	rval = __node_allowed(ctx, node);
186	spin_unlock(&spu_prio->runq_lock);
187
188	return rval;
189}
190
191void do_notify_spus_active(void)
192{
193	int node;
194
195	/*
196	 * Wake up the active spu_contexts.
197	 *
198	 * When the awakened processes see their "notify_active" flag is set,
199	 * they will call spu_switch_notify().
200	 */
201	for_each_online_node(node) {
202		struct spu *spu;
203
204		mutex_lock(&cbe_spu_info[node].list_mutex);
205		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
206			if (spu->alloc_state != SPU_FREE) {
207				struct spu_context *ctx = spu->ctx;
208				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
209					&ctx->sched_flags);
210				mb();
211				wake_up_all(&ctx->stop_wq);
212			}
213		}
214		mutex_unlock(&cbe_spu_info[node].list_mutex);
215	}
216}
217
218/**
219 * spu_bind_context - bind spu context to physical spu
220 * @spu:	physical spu to bind to
221 * @ctx:	context to bind
222 */
223static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
224{
225	spu_context_trace(spu_bind_context__enter, ctx, spu);
226
227	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
228
229	if (ctx->flags & SPU_CREATE_NOSCHED)
230		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
231
232	ctx->stats.slb_flt_base = spu->stats.slb_flt;
233	ctx->stats.class2_intr_base = spu->stats.class2_intr;
234
235	spu_associate_mm(spu, ctx->owner);
236
237	spin_lock_irq(&spu->register_lock);
238	spu->ctx = ctx;
239	spu->flags = 0;
240	ctx->spu = spu;
241	ctx->ops = &spu_hw_ops;
242	spu->pid = current->pid;
243	spu->tgid = current->tgid;
244	spu->ibox_callback = spufs_ibox_callback;
245	spu->wbox_callback = spufs_wbox_callback;
246	spu->stop_callback = spufs_stop_callback;
247	spu->mfc_callback = spufs_mfc_callback;
248	spin_unlock_irq(&spu->register_lock);
249
250	spu_unmap_mappings(ctx);
251
252	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
253	spu_restore(&ctx->csa, spu);
254	spu->timestamp = jiffies;
255	spu_switch_notify(spu, ctx);
256	ctx->state = SPU_STATE_RUNNABLE;
257
258	spuctx_switch_state(ctx, SPU_UTIL_USER);
259}
260
261/*
262 * Must be used with the list_mutex held.
263 */
264static inline int sched_spu(struct spu *spu)
265{
266	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
267
268	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
269}
270
271static void aff_merge_remaining_ctxs(struct spu_gang *gang)
272{
273	struct spu_context *ctx;
274
275	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
276		if (list_empty(&ctx->aff_list))
277			list_add(&ctx->aff_list, &gang->aff_list_head);
278	}
279	gang->aff_flags |= AFF_MERGED;
280}
281
282static void aff_set_offsets(struct spu_gang *gang)
283{
284	struct spu_context *ctx;
285	int offset;
286
287	offset = -1;
288	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
289								aff_list) {
290		if (&ctx->aff_list == &gang->aff_list_head)
291			break;
292		ctx->aff_offset = offset--;
293	}
294
295	offset = 0;
296	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
297		if (&ctx->aff_list == &gang->aff_list_head)
298			break;
299		ctx->aff_offset = offset++;
300	}
301
302	gang->aff_flags |= AFF_OFFSETS_SET;
303}
304
305static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
306		 int group_size, int lowest_offset)
307{
308	struct spu *spu;
309	int node, n;
310
311	/*
312	 * TODO: A better algorithm could be used to find a good spu to be
313	 *       used as reference location for the ctxs chain.
314	 */
315	node = cpu_to_node(raw_smp_processor_id());
316	for (n = 0; n < MAX_NUMNODES; n++, node++) {
317		/*
318		 * "available_spus" counts how many spus are not potentially
319		 * going to be used by other affinity gangs whose reference
320		 * context is already in place. Although this code seeks to
321		 * avoid having affinity gangs with a summed amount of
322		 * contexts bigger than the amount of spus in the node,
323		 * this may happen sporadically. In this case, available_spus
324		 * becomes negative, which is harmless.
325		 */
326		int available_spus;
327
328		node = (node < MAX_NUMNODES) ? node : 0;
329		if (!node_allowed(ctx, node))
330			continue;
331
332		available_spus = 0;
333		mutex_lock(&cbe_spu_info[node].list_mutex);
334		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
335			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
336					&& spu->ctx->gang->aff_ref_spu)
337				available_spus -= spu->ctx->gang->contexts;
338			available_spus++;
339		}
340		if (available_spus < ctx->gang->contexts) {
341			mutex_unlock(&cbe_spu_info[node].list_mutex);
342			continue;
343		}
344
345		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
346			if ((!mem_aff || spu->has_mem_affinity) &&
347							sched_spu(spu)) {
348				mutex_unlock(&cbe_spu_info[node].list_mutex);
349				return spu;
350			}
351		}
352		mutex_unlock(&cbe_spu_info[node].list_mutex);
353	}
354	return NULL;
355}
356
357static void aff_set_ref_point_location(struct spu_gang *gang)
358{
359	int mem_aff, gs, lowest_offset;
360	struct spu_context *ctx;
361	struct spu *tmp;
362
363	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
364	lowest_offset = 0;
365	gs = 0;
366
367	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
368		gs++;
369
370	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
371								aff_list) {
372		if (&ctx->aff_list == &gang->aff_list_head)
373			break;
374		lowest_offset = ctx->aff_offset;
375	}
376
377	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
378							lowest_offset);
379}
380
381static struct spu *ctx_location(struct spu *ref, int offset, int node)
382{
383	struct spu *spu;
384
385	spu = NULL;
386	if (offset >= 0) {
387		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
388			BUG_ON(spu->node != node);
389			if (offset == 0)
390				break;
391			if (sched_spu(spu))
392				offset--;
393		}
394	} else {
395		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
396			BUG_ON(spu->node != node);
397			if (offset == 0)
398				break;
399			if (sched_spu(spu))
400				offset++;
401		}
402	}
403
404	return spu;
405}
406
407/*
408 * affinity_check is called each time a context is going to be scheduled.
409 * It returns the spu ptr on which the context must run.
410 */
411static int has_affinity(struct spu_context *ctx)
412{
413	struct spu_gang *gang = ctx->gang;
414
415	if (list_empty(&ctx->aff_list))
416		return 0;
417
418	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
419		ctx->gang->aff_ref_spu = NULL;
420
421	if (!gang->aff_ref_spu) {
422		if (!(gang->aff_flags & AFF_MERGED))
423			aff_merge_remaining_ctxs(gang);
424		if (!(gang->aff_flags & AFF_OFFSETS_SET))
425			aff_set_offsets(gang);
426		aff_set_ref_point_location(gang);
427	}
428
429	return gang->aff_ref_spu != NULL;
430}
431
432/**
433 * spu_unbind_context - unbind spu context from physical spu
434 * @spu:	physical spu to unbind from
435 * @ctx:	context to unbind
436 */
437static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
438{
439	u32 status;
440
441	spu_context_trace(spu_unbind_context__enter, ctx, spu);
442
443	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
444
445 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
446		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
447
448	if (ctx->gang)
449		/*
450		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
451		 * being considered in this gang. Using atomic_dec_if_positive
452		 * allow us to skip an explicit check for affinity in this gang
453		 */
454		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
455
456	spu_switch_notify(spu, NULL);
457	spu_unmap_mappings(ctx);
458	spu_save(&ctx->csa, spu);
459	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
460
461	spin_lock_irq(&spu->register_lock);
462	spu->timestamp = jiffies;
463	ctx->state = SPU_STATE_SAVED;
464	spu->ibox_callback = NULL;
465	spu->wbox_callback = NULL;
466	spu->stop_callback = NULL;
467	spu->mfc_callback = NULL;
468	spu->pid = 0;
469	spu->tgid = 0;
470	ctx->ops = &spu_backing_ops;
471	spu->flags = 0;
472	spu->ctx = NULL;
473	spin_unlock_irq(&spu->register_lock);
474
475	spu_associate_mm(spu, NULL);
476
477	ctx->stats.slb_flt +=
478		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
479	ctx->stats.class2_intr +=
480		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
481
482	/* This maps the underlying spu state to idle */
483	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
484	ctx->spu = NULL;
485
486	if (spu_stopped(ctx, &status))
487		wake_up_all(&ctx->stop_wq);
488}
489
490/**
491 * spu_add_to_rq - add a context to the runqueue
492 * @ctx:       context to add
493 */
494static void __spu_add_to_rq(struct spu_context *ctx)
495{
496	/*
497	 * Unfortunately this code path can be called from multiple threads
498	 * on behalf of a single context due to the way the problem state
499	 * mmap support works.
500	 *
501	 * Fortunately we need to wake up all these threads at the same time
502	 * and can simply skip the runqueue addition for every but the first
503	 * thread getting into this codepath.
504	 *
505	 * It's still quite hacky, and long-term we should proxy all other
506	 * threads through the owner thread so that spu_run is in control
507	 * of all the scheduling activity for a given context.
508	 */
509	if (list_empty(&ctx->rq)) {
510		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
511		set_bit(ctx->prio, spu_prio->bitmap);
512		if (!spu_prio->nr_waiting++)
513			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
514	}
515}
516
517static void spu_add_to_rq(struct spu_context *ctx)
518{
519	spin_lock(&spu_prio->runq_lock);
520	__spu_add_to_rq(ctx);
521	spin_unlock(&spu_prio->runq_lock);
522}
523
524static void __spu_del_from_rq(struct spu_context *ctx)
525{
526	int prio = ctx->prio;
527
528	if (!list_empty(&ctx->rq)) {
529		if (!--spu_prio->nr_waiting)
530			del_timer(&spusched_timer);
531		list_del_init(&ctx->rq);
532
533		if (list_empty(&spu_prio->runq[prio]))
534			clear_bit(prio, spu_prio->bitmap);
535	}
536}
537
538void spu_del_from_rq(struct spu_context *ctx)
539{
540	spin_lock(&spu_prio->runq_lock);
541	__spu_del_from_rq(ctx);
542	spin_unlock(&spu_prio->runq_lock);
543}
544
545static void spu_prio_wait(struct spu_context *ctx)
546{
547	DEFINE_WAIT(wait);
548
549	/*
550	 * The caller must explicitly wait for a context to be loaded
551	 * if the nosched flag is set.  If NOSCHED is not set, the caller
552	 * queues the context and waits for an spu event or error.
553	 */
554	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
555
556	spin_lock(&spu_prio->runq_lock);
557	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
558	if (!signal_pending(current)) {
559		__spu_add_to_rq(ctx);
560		spin_unlock(&spu_prio->runq_lock);
561		mutex_unlock(&ctx->state_mutex);
562		schedule();
563		mutex_lock(&ctx->state_mutex);
564		spin_lock(&spu_prio->runq_lock);
565		__spu_del_from_rq(ctx);
566	}
567	spin_unlock(&spu_prio->runq_lock);
568	__set_current_state(TASK_RUNNING);
569	remove_wait_queue(&ctx->stop_wq, &wait);
570}
571
572static struct spu *spu_get_idle(struct spu_context *ctx)
573{
574	struct spu *spu, *aff_ref_spu;
575	int node, n;
576
577	spu_context_nospu_trace(spu_get_idle__enter, ctx);
578
579	if (ctx->gang) {
580		mutex_lock(&ctx->gang->aff_mutex);
581		if (has_affinity(ctx)) {
582			aff_ref_spu = ctx->gang->aff_ref_spu;
583			atomic_inc(&ctx->gang->aff_sched_count);
584			mutex_unlock(&ctx->gang->aff_mutex);
585			node = aff_ref_spu->node;
586
587			mutex_lock(&cbe_spu_info[node].list_mutex);
588			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
589			if (spu && spu->alloc_state == SPU_FREE)
590				goto found;
591			mutex_unlock(&cbe_spu_info[node].list_mutex);
592
593			atomic_dec(&ctx->gang->aff_sched_count);
594			goto not_found;
595		}
596		mutex_unlock(&ctx->gang->aff_mutex);
597	}
598	node = cpu_to_node(raw_smp_processor_id());
599	for (n = 0; n < MAX_NUMNODES; n++, node++) {
600		node = (node < MAX_NUMNODES) ? node : 0;
601		if (!node_allowed(ctx, node))
602			continue;
603
604		mutex_lock(&cbe_spu_info[node].list_mutex);
605		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
606			if (spu->alloc_state == SPU_FREE)
607				goto found;
608		}
609		mutex_unlock(&cbe_spu_info[node].list_mutex);
610	}
611
612 not_found:
613	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
614	return NULL;
615
616 found:
617	spu->alloc_state = SPU_USED;
618	mutex_unlock(&cbe_spu_info[node].list_mutex);
619	spu_context_trace(spu_get_idle__found, ctx, spu);
620	spu_init_channels(spu);
621	return spu;
622}
623
624/**
625 * find_victim - find a lower priority context to preempt
626 * @ctx:	canidate context for running
627 *
628 * Returns the freed physical spu to run the new context on.
629 */
630static struct spu *find_victim(struct spu_context *ctx)
631{
632	struct spu_context *victim = NULL;
633	struct spu *spu;
634	int node, n;
635
636	spu_context_nospu_trace(spu_find_victim__enter, ctx);
637
638	/*
639	 * Look for a possible preemption candidate on the local node first.
640	 * If there is no candidate look at the other nodes.  This isn't
641	 * exactly fair, but so far the whole spu scheduler tries to keep
642	 * a strong node affinity.  We might want to fine-tune this in
643	 * the future.
644	 */
645 restart:
646	node = cpu_to_node(raw_smp_processor_id());
647	for (n = 0; n < MAX_NUMNODES; n++, node++) {
648		node = (node < MAX_NUMNODES) ? node : 0;
649		if (!node_allowed(ctx, node))
650			continue;
651
652		mutex_lock(&cbe_spu_info[node].list_mutex);
653		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
654			struct spu_context *tmp = spu->ctx;
655
656			if (tmp && tmp->prio > ctx->prio &&
657			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
658			    (!victim || tmp->prio > victim->prio)) {
659				victim = spu->ctx;
660			}
661		}
662		if (victim)
663			get_spu_context(victim);
664		mutex_unlock(&cbe_spu_info[node].list_mutex);
665
666		if (victim) {
667			if (!mutex_trylock(&victim->state_mutex)) {
668				put_spu_context(victim);
669				victim = NULL;
670				goto restart;
671			}
672
673			spu = victim->spu;
674			if (!spu || victim->prio <= ctx->prio) {
675				/*
676				 * This race can happen because we've dropped
677				 * the active list mutex.  Not a problem, just
678				 * restart the search.
679				 */
680				mutex_unlock(&victim->state_mutex);
681				put_spu_context(victim);
682				victim = NULL;
683				goto restart;
684			}
685
686			spu_context_trace(__spu_deactivate__unload, ctx, spu);
687
688			mutex_lock(&cbe_spu_info[node].list_mutex);
689			cbe_spu_info[node].nr_active--;
690			spu_unbind_context(spu, victim);
691			mutex_unlock(&cbe_spu_info[node].list_mutex);
692
693			victim->stats.invol_ctx_switch++;
694			spu->stats.invol_ctx_switch++;
695			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
696				spu_add_to_rq(victim);
697
698			mutex_unlock(&victim->state_mutex);
699			put_spu_context(victim);
700
701			return spu;
702		}
703	}
704
705	return NULL;
706}
707
708static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
709{
710	int node = spu->node;
711	int success = 0;
712
713	spu_set_timeslice(ctx);
714
715	mutex_lock(&cbe_spu_info[node].list_mutex);
716	if (spu->ctx == NULL) {
717		spu_bind_context(spu, ctx);
718		cbe_spu_info[node].nr_active++;
719		spu->alloc_state = SPU_USED;
720		success = 1;
721	}
722	mutex_unlock(&cbe_spu_info[node].list_mutex);
723
724	if (success)
725		wake_up_all(&ctx->run_wq);
726	else
727		spu_add_to_rq(ctx);
728}
729
730static void spu_schedule(struct spu *spu, struct spu_context *ctx)
731{
732	/* not a candidate for interruptible because it's called either
733	   from the scheduler thread or from spu_deactivate */
734	mutex_lock(&ctx->state_mutex);
735	if (ctx->state == SPU_STATE_SAVED)
736		__spu_schedule(spu, ctx);
737	spu_release(ctx);
738}
739
740/**
741 * spu_unschedule - remove a context from a spu, and possibly release it.
742 * @spu:	The SPU to unschedule from
743 * @ctx:	The context currently scheduled on the SPU
744 * @free_spu	Whether to free the SPU for other contexts
745 *
746 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
747 * SPU is made available for other contexts (ie, may be returned by
748 * spu_get_idle). If this is zero, the caller is expected to schedule another
749 * context to this spu.
750 *
751 * Should be called with ctx->state_mutex held.
752 */
753static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
754		int free_spu)
755{
756	int node = spu->node;
757
758	mutex_lock(&cbe_spu_info[node].list_mutex);
759	cbe_spu_info[node].nr_active--;
760	if (free_spu)
761		spu->alloc_state = SPU_FREE;
762	spu_unbind_context(spu, ctx);
763	ctx->stats.invol_ctx_switch++;
764	spu->stats.invol_ctx_switch++;
765	mutex_unlock(&cbe_spu_info[node].list_mutex);
766}
767
768/**
769 * spu_activate - find a free spu for a context and execute it
770 * @ctx:	spu context to schedule
771 * @flags:	flags (currently ignored)
772 *
773 * Tries to find a free spu to run @ctx.  If no free spu is available
774 * add the context to the runqueue so it gets woken up once an spu
775 * is available.
776 */
777int spu_activate(struct spu_context *ctx, unsigned long flags)
778{
779	struct spu *spu;
780
781	/*
782	 * If there are multiple threads waiting for a single context
783	 * only one actually binds the context while the others will
784	 * only be able to acquire the state_mutex once the context
785	 * already is in runnable state.
786	 */
787	if (ctx->spu)
788		return 0;
789
790spu_activate_top:
791	if (signal_pending(current))
792		return -ERESTARTSYS;
793
794	spu = spu_get_idle(ctx);
795	/*
796	 * If this is a realtime thread we try to get it running by
797	 * preempting a lower priority thread.
798	 */
799	if (!spu && rt_prio(ctx->prio))
800		spu = find_victim(ctx);
801	if (spu) {
802		unsigned long runcntl;
803
804		runcntl = ctx->ops->runcntl_read(ctx);
805		__spu_schedule(spu, ctx);
806		if (runcntl & SPU_RUNCNTL_RUNNABLE)
807			spuctx_switch_state(ctx, SPU_UTIL_USER);
808
809		return 0;
810	}
811
812	if (ctx->flags & SPU_CREATE_NOSCHED) {
813		spu_prio_wait(ctx);
814		goto spu_activate_top;
815	}
816
817	spu_add_to_rq(ctx);
818
819	return 0;
820}
821
822/**
823 * grab_runnable_context - try to find a runnable context
824 *
825 * Remove the highest priority context on the runqueue and return it
826 * to the caller.  Returns %NULL if no runnable context was found.
827 */
828static struct spu_context *grab_runnable_context(int prio, int node)
829{
830	struct spu_context *ctx;
831	int best;
832
833	spin_lock(&spu_prio->runq_lock);
834	best = find_first_bit(spu_prio->bitmap, prio);
835	while (best < prio) {
836		struct list_head *rq = &spu_prio->runq[best];
837
838		list_for_each_entry(ctx, rq, rq) {
839			if (__node_allowed(ctx, node)) {
840				__spu_del_from_rq(ctx);
841				goto found;
842			}
843		}
844		best++;
845	}
846	ctx = NULL;
847 found:
848	spin_unlock(&spu_prio->runq_lock);
849	return ctx;
850}
851
852static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
853{
854	struct spu *spu = ctx->spu;
855	struct spu_context *new = NULL;
856
857	if (spu) {
858		new = grab_runnable_context(max_prio, spu->node);
859		if (new || force) {
860			spu_unschedule(spu, ctx, new == NULL);
861			if (new) {
862				if (new->flags & SPU_CREATE_NOSCHED)
863					wake_up(&new->stop_wq);
864				else {
865					spu_release(ctx);
866					spu_schedule(spu, new);
867					/* this one can't easily be made
868					   interruptible */
869					mutex_lock(&ctx->state_mutex);
870				}
871			}
872		}
873	}
874
875	return new != NULL;
876}
877
878/**
879 * spu_deactivate - unbind a context from it's physical spu
880 * @ctx:	spu context to unbind
881 *
882 * Unbind @ctx from the physical spu it is running on and schedule
883 * the highest priority context to run on the freed physical spu.
884 */
885void spu_deactivate(struct spu_context *ctx)
886{
887	spu_context_nospu_trace(spu_deactivate__enter, ctx);
888	__spu_deactivate(ctx, 1, MAX_PRIO);
889}
890
891/**
892 * spu_yield -	yield a physical spu if others are waiting
893 * @ctx:	spu context to yield
894 *
895 * Check if there is a higher priority context waiting and if yes
896 * unbind @ctx from the physical spu and schedule the highest
897 * priority context to run on the freed physical spu instead.
898 */
899void spu_yield(struct spu_context *ctx)
900{
901	spu_context_nospu_trace(spu_yield__enter, ctx);
902	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
903		mutex_lock(&ctx->state_mutex);
904		__spu_deactivate(ctx, 0, MAX_PRIO);
905		mutex_unlock(&ctx->state_mutex);
906	}
907}
908
909static noinline void spusched_tick(struct spu_context *ctx)
910{
911	struct spu_context *new = NULL;
912	struct spu *spu = NULL;
913
914	if (spu_acquire(ctx))
915		BUG();	/* a kernel thread never has signals pending */
916
917	if (ctx->state != SPU_STATE_RUNNABLE)
918		goto out;
919	if (ctx->flags & SPU_CREATE_NOSCHED)
920		goto out;
921	if (ctx->policy == SCHED_FIFO)
922		goto out;
923
924	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
925		goto out;
926
927	spu = ctx->spu;
928
929	spu_context_trace(spusched_tick__preempt, ctx, spu);
930
931	new = grab_runnable_context(ctx->prio + 1, spu->node);
932	if (new) {
933		spu_unschedule(spu, ctx, 0);
934		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
935			spu_add_to_rq(ctx);
936	} else {
937		spu_context_nospu_trace(spusched_tick__newslice, ctx);
938		if (!ctx->time_slice)
939			ctx->time_slice++;
940	}
941out:
942	spu_release(ctx);
943
944	if (new)
945		spu_schedule(spu, new);
946}
947
948/**
949 * count_active_contexts - count nr of active tasks
950 *
951 * Return the number of tasks currently running or waiting to run.
952 *
953 * Note that we don't take runq_lock / list_mutex here.  Reading
954 * a single 32bit value is atomic on powerpc, and we don't care
955 * about memory ordering issues here.
956 */
957static unsigned long count_active_contexts(void)
958{
959	int nr_active = 0, node;
960
961	for (node = 0; node < MAX_NUMNODES; node++)
962		nr_active += cbe_spu_info[node].nr_active;
963	nr_active += spu_prio->nr_waiting;
964
965	return nr_active;
966}
967
968/**
969 * spu_calc_load - update the avenrun load estimates.
970 *
971 * No locking against reading these values from userspace, as for
972 * the CPU loadavg code.
973 */
974static void spu_calc_load(void)
975{
976	unsigned long active_tasks; /* fixed-point */
977
978	active_tasks = count_active_contexts() * FIXED_1;
979	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
980	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
981	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
982}
983
984static void spusched_wake(unsigned long data)
985{
986	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
987	wake_up_process(spusched_task);
988}
989
990static void spuloadavg_wake(unsigned long data)
991{
992	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
993	spu_calc_load();
994}
995
996static int spusched_thread(void *unused)
997{
998	struct spu *spu;
999	int node;
1000
1001	while (!kthread_should_stop()) {
1002		set_current_state(TASK_INTERRUPTIBLE);
1003		schedule();
1004		for (node = 0; node < MAX_NUMNODES; node++) {
1005			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
1006
1007			mutex_lock(mtx);
1008			list_for_each_entry(spu, &cbe_spu_info[node].spus,
1009					cbe_list) {
1010				struct spu_context *ctx = spu->ctx;
1011
1012				if (ctx) {
1013					get_spu_context(ctx);
1014					mutex_unlock(mtx);
1015					spusched_tick(ctx);
1016					mutex_lock(mtx);
1017					put_spu_context(ctx);
1018				}
1019			}
1020			mutex_unlock(mtx);
1021		}
1022	}
1023
1024	return 0;
1025}
1026
1027void spuctx_switch_state(struct spu_context *ctx,
1028		enum spu_utilization_state new_state)
1029{
1030	unsigned long long curtime;
1031	signed long long delta;
1032	struct timespec ts;
1033	struct spu *spu;
1034	enum spu_utilization_state old_state;
1035	int node;
1036
1037	ktime_get_ts(&ts);
1038	curtime = timespec_to_ns(&ts);
1039	delta = curtime - ctx->stats.tstamp;
1040
1041	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1042	WARN_ON(delta < 0);
1043
1044	spu = ctx->spu;
1045	old_state = ctx->stats.util_state;
1046	ctx->stats.util_state = new_state;
1047	ctx->stats.tstamp = curtime;
1048
1049	/*
1050	 * Update the physical SPU utilization statistics.
1051	 */
1052	if (spu) {
1053		ctx->stats.times[old_state] += delta;
1054		spu->stats.times[old_state] += delta;
1055		spu->stats.util_state = new_state;
1056		spu->stats.tstamp = curtime;
1057		node = spu->node;
1058		if (old_state == SPU_UTIL_USER)
1059			atomic_dec(&cbe_spu_info[node].busy_spus);
1060		if (new_state == SPU_UTIL_USER)
1061			atomic_inc(&cbe_spu_info[node].busy_spus);
1062	}
1063}
1064
1065#define LOAD_INT(x) ((x) >> FSHIFT)
1066#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1067
1068static int show_spu_loadavg(struct seq_file *s, void *private)
1069{
1070	int a, b, c;
1071
1072	a = spu_avenrun[0] + (FIXED_1/200);
1073	b = spu_avenrun[1] + (FIXED_1/200);
1074	c = spu_avenrun[2] + (FIXED_1/200);
1075
1076	/*
1077	 * Note that last_pid doesn't really make much sense for the
1078	 * SPU loadavg (it even seems very odd on the CPU side...),
1079	 * but we include it here to have a 100% compatible interface.
1080	 */
1081	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1082		LOAD_INT(a), LOAD_FRAC(a),
1083		LOAD_INT(b), LOAD_FRAC(b),
1084		LOAD_INT(c), LOAD_FRAC(c),
1085		count_active_contexts(),
1086		atomic_read(&nr_spu_contexts),
1087		current->nsproxy->pid_ns->last_pid);
1088	return 0;
1089}
1090
1091static int spu_loadavg_open(struct inode *inode, struct file *file)
1092{
1093	return single_open(file, show_spu_loadavg, NULL);
1094}
1095
1096static const struct file_operations spu_loadavg_fops = {
1097	.open		= spu_loadavg_open,
1098	.read		= seq_read,
1099	.llseek		= seq_lseek,
1100	.release	= single_release,
1101};
1102
1103int __init spu_sched_init(void)
1104{
1105	struct proc_dir_entry *entry;
1106	int err = -ENOMEM, i;
1107
1108	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1109	if (!spu_prio)
1110		goto out;
1111
1112	for (i = 0; i < MAX_PRIO; i++) {
1113		INIT_LIST_HEAD(&spu_prio->runq[i]);
1114		__clear_bit(i, spu_prio->bitmap);
1115	}
1116	spin_lock_init(&spu_prio->runq_lock);
1117
1118	setup_timer(&spusched_timer, spusched_wake, 0);
1119	setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1120
1121	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1122	if (IS_ERR(spusched_task)) {
1123		err = PTR_ERR(spusched_task);
1124		goto out_free_spu_prio;
1125	}
1126
1127	mod_timer(&spuloadavg_timer, 0);
1128
1129	entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
1130	if (!entry)
1131		goto out_stop_kthread;
1132
1133	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1134			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1135	return 0;
1136
1137 out_stop_kthread:
1138	kthread_stop(spusched_task);
1139 out_free_spu_prio:
1140	kfree(spu_prio);
1141 out:
1142	return err;
1143}
1144
1145void spu_sched_exit(void)
1146{
1147	struct spu *spu;
1148	int node;
1149
1150	remove_proc_entry("spu_loadavg", NULL);
1151
1152	del_timer_sync(&spusched_timer);
1153	del_timer_sync(&spuloadavg_timer);
1154	kthread_stop(spusched_task);
1155
1156	for (node = 0; node < MAX_NUMNODES; node++) {
1157		mutex_lock(&cbe_spu_info[node].list_mutex);
1158		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1159			if (spu->alloc_state != SPU_FREE)
1160				spu->alloc_state = SPU_FREE;
1161		mutex_unlock(&cbe_spu_info[node].list_mutex);
1162	}
1163	kfree(spu_prio);
1164}
1165