• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/sparc/kernel/
1/*  linux/arch/sparc/kernel/process.c
2 *
3 *  Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
4 *  Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
5 */
6
7/*
8 * This file handles the architecture-dependent parts of process handling..
9 */
10
11#include <stdarg.h>
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/ptrace.h>
20#include <linux/user.h>
21#include <linux/smp.h>
22#include <linux/reboot.h>
23#include <linux/delay.h>
24#include <linux/pm.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27
28#include <asm/auxio.h>
29#include <asm/oplib.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/delay.h>
36#include <asm/processor.h>
37#include <asm/psr.h>
38#include <asm/elf.h>
39#include <asm/prom.h>
40#include <asm/unistd.h>
41
42/*
43 * Power management idle function
44 * Set in pm platform drivers (apc.c and pmc.c)
45 */
46void (*pm_idle)(void);
47EXPORT_SYMBOL(pm_idle);
48
49/*
50 * Power-off handler instantiation for pm.h compliance
51 * This is done via auxio, but could be used as a fallback
52 * handler when auxio is not present-- unused for now...
53 */
54void (*pm_power_off)(void) = machine_power_off;
55EXPORT_SYMBOL(pm_power_off);
56
57/*
58 * sysctl - toggle power-off restriction for serial console
59 * systems in machine_power_off()
60 */
61int scons_pwroff = 1;
62
63extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
64
65struct task_struct *last_task_used_math = NULL;
66struct thread_info *current_set[NR_CPUS];
67
68#ifndef CONFIG_SMP
69
70#define SUN4C_FAULT_HIGH 100
71
72/*
73 * the idle loop on a Sparc... ;)
74 */
75void cpu_idle(void)
76{
77	/* endless idle loop with no priority at all */
78	for (;;) {
79		if (ARCH_SUN4C) {
80			static int count = HZ;
81			static unsigned long last_jiffies;
82			static unsigned long last_faults;
83			static unsigned long fps;
84			unsigned long now;
85			unsigned long faults;
86
87			extern unsigned long sun4c_kernel_faults;
88			extern void sun4c_grow_kernel_ring(void);
89
90			local_irq_disable();
91			now = jiffies;
92			count -= (now - last_jiffies);
93			last_jiffies = now;
94			if (count < 0) {
95				count += HZ;
96				faults = sun4c_kernel_faults;
97				fps = (fps + (faults - last_faults)) >> 1;
98				last_faults = faults;
99				if (fps >= SUN4C_FAULT_HIGH) {
100					sun4c_grow_kernel_ring();
101				}
102			}
103			local_irq_enable();
104		}
105
106		if (pm_idle) {
107			while (!need_resched())
108				(*pm_idle)();
109		} else {
110			while (!need_resched())
111				cpu_relax();
112		}
113		preempt_enable_no_resched();
114		schedule();
115		preempt_disable();
116		check_pgt_cache();
117	}
118}
119
120#else
121
122/* This is being executed in task 0 'user space'. */
123void cpu_idle(void)
124{
125        set_thread_flag(TIF_POLLING_NRFLAG);
126	/* endless idle loop with no priority at all */
127	while(1) {
128		while (!need_resched())
129			cpu_relax();
130		preempt_enable_no_resched();
131		schedule();
132		preempt_disable();
133		check_pgt_cache();
134	}
135}
136
137#endif
138
139void machine_halt(void)
140{
141	local_irq_enable();
142	mdelay(8);
143	local_irq_disable();
144	prom_halt();
145	panic("Halt failed!");
146}
147
148void machine_restart(char * cmd)
149{
150	char *p;
151
152	local_irq_enable();
153	mdelay(8);
154	local_irq_disable();
155
156	p = strchr (reboot_command, '\n');
157	if (p) *p = 0;
158	if (cmd)
159		prom_reboot(cmd);
160	if (*reboot_command)
161		prom_reboot(reboot_command);
162	prom_feval ("reset");
163	panic("Reboot failed!");
164}
165
166void machine_power_off(void)
167{
168	if (auxio_power_register &&
169	    (strcmp(of_console_device->type, "serial") || scons_pwroff))
170		*auxio_power_register |= AUXIO_POWER_OFF;
171	machine_halt();
172}
173
174
175void show_regs(struct pt_regs *r)
176{
177	struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
178
179        printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
180	       r->psr, r->pc, r->npc, r->y, print_tainted());
181	printk("PC: <%pS>\n", (void *) r->pc);
182	printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
183	       r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
184	       r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
185	printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
186	       r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
187	       r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
188	printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
189
190	printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
191	       rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
192	       rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
193	printk("%%I: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
194	       rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
195	       rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
196}
197
198/*
199 * The show_stack is an external API which we do not use ourselves.
200 * The oops is printed in die_if_kernel.
201 */
202void show_stack(struct task_struct *tsk, unsigned long *_ksp)
203{
204	unsigned long pc, fp;
205	unsigned long task_base;
206	struct reg_window32 *rw;
207	int count = 0;
208
209	if (tsk != NULL)
210		task_base = (unsigned long) task_stack_page(tsk);
211	else
212		task_base = (unsigned long) current_thread_info();
213
214	fp = (unsigned long) _ksp;
215	do {
216		/* Bogus frame pointer? */
217		if (fp < (task_base + sizeof(struct thread_info)) ||
218		    fp >= (task_base + (PAGE_SIZE << 1)))
219			break;
220		rw = (struct reg_window32 *) fp;
221		pc = rw->ins[7];
222		printk("[%08lx : ", pc);
223		printk("%pS ] ", (void *) pc);
224		fp = rw->ins[6];
225	} while (++count < 16);
226	printk("\n");
227}
228
229void dump_stack(void)
230{
231	unsigned long *ksp;
232
233	__asm__ __volatile__("mov	%%fp, %0"
234			     : "=r" (ksp));
235	show_stack(current, ksp);
236}
237
238EXPORT_SYMBOL(dump_stack);
239
240/*
241 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
242 */
243unsigned long thread_saved_pc(struct task_struct *tsk)
244{
245	return task_thread_info(tsk)->kpc;
246}
247
248/*
249 * Free current thread data structures etc..
250 */
251void exit_thread(void)
252{
253#ifndef CONFIG_SMP
254	if(last_task_used_math == current) {
255#else
256	if (test_thread_flag(TIF_USEDFPU)) {
257#endif
258		/* Keep process from leaving FPU in a bogon state. */
259		put_psr(get_psr() | PSR_EF);
260		fpsave(&current->thread.float_regs[0], &current->thread.fsr,
261		       &current->thread.fpqueue[0], &current->thread.fpqdepth);
262#ifndef CONFIG_SMP
263		last_task_used_math = NULL;
264#else
265		clear_thread_flag(TIF_USEDFPU);
266#endif
267	}
268}
269
270void flush_thread(void)
271{
272	current_thread_info()->w_saved = 0;
273
274#ifndef CONFIG_SMP
275	if(last_task_used_math == current) {
276#else
277	if (test_thread_flag(TIF_USEDFPU)) {
278#endif
279		/* Clean the fpu. */
280		put_psr(get_psr() | PSR_EF);
281		fpsave(&current->thread.float_regs[0], &current->thread.fsr,
282		       &current->thread.fpqueue[0], &current->thread.fpqdepth);
283#ifndef CONFIG_SMP
284		last_task_used_math = NULL;
285#else
286		clear_thread_flag(TIF_USEDFPU);
287#endif
288	}
289
290	/* Now, this task is no longer a kernel thread. */
291	current->thread.current_ds = USER_DS;
292	if (current->thread.flags & SPARC_FLAG_KTHREAD) {
293		current->thread.flags &= ~SPARC_FLAG_KTHREAD;
294
295		/* We must fixup kregs as well. */
296		current->thread.kregs = (struct pt_regs *)
297		    (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
298	}
299}
300
301static inline struct sparc_stackf __user *
302clone_stackframe(struct sparc_stackf __user *dst,
303		 struct sparc_stackf __user *src)
304{
305	unsigned long size, fp;
306	struct sparc_stackf *tmp;
307	struct sparc_stackf __user *sp;
308
309	if (get_user(tmp, &src->fp))
310		return NULL;
311
312	fp = (unsigned long) tmp;
313	size = (fp - ((unsigned long) src));
314	fp = (unsigned long) dst;
315	sp = (struct sparc_stackf __user *)(fp - size);
316
317	/* do_fork() grabs the parent semaphore, we must release it
318	 * temporarily so we can build the child clone stack frame
319	 * without deadlocking.
320	 */
321	if (__copy_user(sp, src, size))
322		sp = NULL;
323	else if (put_user(fp, &sp->fp))
324		sp = NULL;
325
326	return sp;
327}
328
329asmlinkage int sparc_do_fork(unsigned long clone_flags,
330                             unsigned long stack_start,
331                             struct pt_regs *regs,
332                             unsigned long stack_size)
333{
334	unsigned long parent_tid_ptr, child_tid_ptr;
335	unsigned long orig_i1 = regs->u_regs[UREG_I1];
336	long ret;
337
338	parent_tid_ptr = regs->u_regs[UREG_I2];
339	child_tid_ptr = regs->u_regs[UREG_I4];
340
341	ret = do_fork(clone_flags, stack_start,
342		      regs, stack_size,
343		      (int __user *) parent_tid_ptr,
344		      (int __user *) child_tid_ptr);
345
346	/* If we get an error and potentially restart the system
347	 * call, we're screwed because copy_thread() clobbered
348	 * the parent's %o1.  So detect that case and restore it
349	 * here.
350	 */
351	if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
352		regs->u_regs[UREG_I1] = orig_i1;
353
354	return ret;
355}
356
357extern void ret_from_fork(void);
358
359int copy_thread(unsigned long clone_flags, unsigned long sp,
360		unsigned long unused,
361		struct task_struct *p, struct pt_regs *regs)
362{
363	struct thread_info *ti = task_thread_info(p);
364	struct pt_regs *childregs;
365	char *new_stack;
366
367#ifndef CONFIG_SMP
368	if(last_task_used_math == current) {
369#else
370	if (test_thread_flag(TIF_USEDFPU)) {
371#endif
372		put_psr(get_psr() | PSR_EF);
373		fpsave(&p->thread.float_regs[0], &p->thread.fsr,
374		       &p->thread.fpqueue[0], &p->thread.fpqdepth);
375#ifdef CONFIG_SMP
376		clear_thread_flag(TIF_USEDFPU);
377#endif
378	}
379
380	/*
381	 *  p->thread_info         new_stack   childregs
382	 *  !                      !           !             {if(PSR_PS) }
383	 *  V                      V (stk.fr.) V  (pt_regs)  { (stk.fr.) }
384	 *  +----- - - - - - ------+===========+============={+==========}+
385	 */
386	new_stack = task_stack_page(p) + THREAD_SIZE;
387	if (regs->psr & PSR_PS)
388		new_stack -= STACKFRAME_SZ;
389	new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
390	memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
391	childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
392
393	/*
394	 * A new process must start with interrupts closed in 2.5,
395	 * because this is how Mingo's scheduler works (see schedule_tail
396	 * and finish_arch_switch). If we do not do it, a timer interrupt hits
397	 * before we unlock, attempts to re-take the rq->lock, and then we die.
398	 * Thus, kpsr|=PSR_PIL.
399	 */
400	ti->ksp = (unsigned long) new_stack;
401	ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
402	ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
403	ti->kwim = current->thread.fork_kwim;
404
405	if(regs->psr & PSR_PS) {
406		extern struct pt_regs fake_swapper_regs;
407
408		p->thread.kregs = &fake_swapper_regs;
409		new_stack += STACKFRAME_SZ + TRACEREG_SZ;
410		childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
411		p->thread.flags |= SPARC_FLAG_KTHREAD;
412		p->thread.current_ds = KERNEL_DS;
413		memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ);
414		childregs->u_regs[UREG_G6] = (unsigned long) ti;
415	} else {
416		p->thread.kregs = childregs;
417		childregs->u_regs[UREG_FP] = sp;
418		p->thread.flags &= ~SPARC_FLAG_KTHREAD;
419		p->thread.current_ds = USER_DS;
420
421		if (sp != regs->u_regs[UREG_FP]) {
422			struct sparc_stackf __user *childstack;
423			struct sparc_stackf __user *parentstack;
424
425			/*
426			 * This is a clone() call with supplied user stack.
427			 * Set some valid stack frames to give to the child.
428			 */
429			childstack = (struct sparc_stackf __user *)
430				(sp & ~0xfUL);
431			parentstack = (struct sparc_stackf __user *)
432				regs->u_regs[UREG_FP];
433
434
435			childstack = clone_stackframe(childstack, parentstack);
436			if (!childstack)
437				return -EFAULT;
438
439
440			childregs->u_regs[UREG_FP] = (unsigned long)childstack;
441		}
442	}
443
444#ifdef CONFIG_SMP
445	/* FPU must be disabled on SMP. */
446	childregs->psr &= ~PSR_EF;
447#endif
448
449	/* Set the return value for the child. */
450	childregs->u_regs[UREG_I0] = current->pid;
451	childregs->u_regs[UREG_I1] = 1;
452
453	/* Set the return value for the parent. */
454	regs->u_regs[UREG_I1] = 0;
455
456	if (clone_flags & CLONE_SETTLS)
457		childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
458
459	return 0;
460}
461
462/*
463 * fill in the fpu structure for a core dump.
464 */
465int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
466{
467	if (used_math()) {
468		memset(fpregs, 0, sizeof(*fpregs));
469		fpregs->pr_q_entrysize = 8;
470		return 1;
471	}
472#ifdef CONFIG_SMP
473	if (test_thread_flag(TIF_USEDFPU)) {
474		put_psr(get_psr() | PSR_EF);
475		fpsave(&current->thread.float_regs[0], &current->thread.fsr,
476		       &current->thread.fpqueue[0], &current->thread.fpqdepth);
477		if (regs != NULL) {
478			regs->psr &= ~(PSR_EF);
479			clear_thread_flag(TIF_USEDFPU);
480		}
481	}
482#else
483	if (current == last_task_used_math) {
484		put_psr(get_psr() | PSR_EF);
485		fpsave(&current->thread.float_regs[0], &current->thread.fsr,
486		       &current->thread.fpqueue[0], &current->thread.fpqdepth);
487		if (regs != NULL) {
488			regs->psr &= ~(PSR_EF);
489			last_task_used_math = NULL;
490		}
491	}
492#endif
493	memcpy(&fpregs->pr_fr.pr_regs[0],
494	       &current->thread.float_regs[0],
495	       (sizeof(unsigned long) * 32));
496	fpregs->pr_fsr = current->thread.fsr;
497	fpregs->pr_qcnt = current->thread.fpqdepth;
498	fpregs->pr_q_entrysize = 8;
499	fpregs->pr_en = 1;
500	if(fpregs->pr_qcnt != 0) {
501		memcpy(&fpregs->pr_q[0],
502		       &current->thread.fpqueue[0],
503		       sizeof(struct fpq) * fpregs->pr_qcnt);
504	}
505	/* Zero out the rest. */
506	memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
507	       sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
508	return 1;
509}
510
511/*
512 * sparc_execve() executes a new program after the asm stub has set
513 * things up for us.  This should basically do what I want it to.
514 */
515asmlinkage int sparc_execve(struct pt_regs *regs)
516{
517	int error, base = 0;
518	char *filename;
519
520	/* Check for indirect call. */
521	if(regs->u_regs[UREG_G1] == 0)
522		base = 1;
523
524	filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
525	error = PTR_ERR(filename);
526	if(IS_ERR(filename))
527		goto out;
528	error = do_execve(filename,
529			  (const char __user *const  __user *)
530			  regs->u_regs[base + UREG_I1],
531			  (const char __user *const  __user *)
532			  regs->u_regs[base + UREG_I2],
533			  regs);
534	putname(filename);
535out:
536	return error;
537}
538
539/*
540 * This is the mechanism for creating a new kernel thread.
541 *
542 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
543 * who haven't done an "execve()") should use this: it will work within
544 * a system call from a "real" process, but the process memory space will
545 * not be freed until both the parent and the child have exited.
546 */
547pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
548{
549	long retval;
550
551	__asm__ __volatile__("mov %4, %%g2\n\t"    /* Set aside fn ptr... */
552			     "mov %5, %%g3\n\t"    /* and arg. */
553			     "mov %1, %%g1\n\t"
554			     "mov %2, %%o0\n\t"    /* Clone flags. */
555			     "mov 0, %%o1\n\t"     /* usp arg == 0 */
556			     "t 0x10\n\t"          /* Linux/Sparc clone(). */
557			     "cmp %%o1, 0\n\t"
558			     "be 1f\n\t"           /* The parent, just return. */
559			     " nop\n\t"            /* Delay slot. */
560			     "jmpl %%g2, %%o7\n\t" /* Call the function. */
561			     " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */
562			     "mov %3, %%g1\n\t"
563			     "t 0x10\n\t"          /* Linux/Sparc exit(). */
564			     /* Notreached by child. */
565			     "1: mov %%o0, %0\n\t" :
566			     "=r" (retval) :
567			     "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
568			     "i" (__NR_exit),  "r" (fn), "r" (arg) :
569			     "g1", "g2", "g3", "o0", "o1", "memory", "cc");
570	return retval;
571}
572EXPORT_SYMBOL(kernel_thread);
573
574unsigned long get_wchan(struct task_struct *task)
575{
576	unsigned long pc, fp, bias = 0;
577	unsigned long task_base = (unsigned long) task;
578        unsigned long ret = 0;
579	struct reg_window32 *rw;
580	int count = 0;
581
582	if (!task || task == current ||
583            task->state == TASK_RUNNING)
584		goto out;
585
586	fp = task_thread_info(task)->ksp + bias;
587	do {
588		/* Bogus frame pointer? */
589		if (fp < (task_base + sizeof(struct thread_info)) ||
590		    fp >= (task_base + (2 * PAGE_SIZE)))
591			break;
592		rw = (struct reg_window32 *) fp;
593		pc = rw->ins[7];
594		if (!in_sched_functions(pc)) {
595			ret = pc;
596			goto out;
597		}
598		fp = rw->ins[6] + bias;
599	} while (++count < 16);
600
601out:
602	return ret;
603}
604