1/*
2 * BK Id: SCCS/s.process.c 1.34 11/23/01 16:38:29 paulus
3 */
4/*
5 *  linux/arch/ppc/kernel/process.c
6 *
7 *  Derived from "arch/i386/kernel/process.c"
8 *    Copyright (C) 1995  Linus Torvalds
9 *
10 *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
11 *  Paul Mackerras (paulus@cs.anu.edu.au)
12 *
13 *  PowerPC version
14 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 *  This program is free software; you can redistribute it and/or
17 *  modify it under the terms of the GNU General Public License
18 *  as published by the Free Software Foundation; either version
19 *  2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/stddef.h>
31#include <linux/unistd.h>
32#include <linux/ptrace.h>
33#include <linux/slab.h>
34#include <linux/user.h>
35#include <linux/elf.h>
36#include <linux/init.h>
37
38#include <asm/pgtable.h>
39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <asm/io.h>
42#include <asm/processor.h>
43#include <asm/mmu.h>
44#include <asm/prom.h>
45
46int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs);
47extern unsigned long _get_SP(void);
48
49struct task_struct *last_task_used_math = NULL;
50struct task_struct *last_task_used_altivec = NULL;
51static struct fs_struct init_fs = INIT_FS;
52static struct files_struct init_files = INIT_FILES;
53static struct signal_struct init_signals = INIT_SIGNALS;
54struct mm_struct init_mm = INIT_MM(init_mm);
55/* this is 16-byte aligned because it has a stack in it */
56union task_union __attribute((aligned(16))) init_task_union = {
57	INIT_TASK(init_task_union.task)
58};
59/* only used to get secondary processor up */
60struct task_struct *current_set[NR_CPUS] = {&init_task, };
61
62#undef SHOW_TASK_SWITCHES
63#undef CHECK_STACK
64
65#if defined(CHECK_STACK)
66unsigned long
67kernel_stack_top(struct task_struct *tsk)
68{
69	return ((unsigned long)tsk) + sizeof(union task_union);
70}
71
72unsigned long
73task_top(struct task_struct *tsk)
74{
75	return ((unsigned long)tsk) + sizeof(struct task_struct);
76}
77
78/* check to make sure the kernel stack is healthy */
79int check_stack(struct task_struct *tsk)
80{
81	unsigned long stack_top = kernel_stack_top(tsk);
82	unsigned long tsk_top = task_top(tsk);
83	int ret = 0;
84
85
86	if ( !tsk )
87		printk("check_stack(): tsk bad tsk %p\n",tsk);
88
89	/* check if stored ksp is bad */
90	if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
91	{
92		printk("stack out of bounds: %s/%d\n"
93		       " tsk_top %08lx ksp %08lx stack_top %08lx\n",
94		       tsk->comm,tsk->pid,
95		       tsk_top, tsk->thread.ksp, stack_top);
96		ret |= 2;
97	}
98
99	/* check if stack ptr RIGHT NOW is bad */
100	if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
101	{
102		printk("current stack ptr out of bounds: %s/%d\n"
103		       " tsk_top %08lx sp %08lx stack_top %08lx\n",
104		       current->comm,current->pid,
105		       tsk_top, _get_SP(), stack_top);
106		ret |= 4;
107	}
108
109
110	if (ret)
111	{
112		panic("bad kernel stack");
113	}
114	return(ret);
115}
116#endif /* defined(CHECK_STACK) */
117
118#ifdef CONFIG_ALTIVEC
119int
120dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
121{
122	if (regs->msr & MSR_VEC)
123		giveup_altivec(current);
124	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
125	return 1;
126}
127
128void
129enable_kernel_altivec(void)
130{
131#ifdef CONFIG_SMP
132	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
133		giveup_altivec(current);
134	else
135		giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */
136#else
137	giveup_altivec(last_task_used_altivec);
138#endif /* __SMP __ */
139}
140#endif /* CONFIG_ALTIVEC */
141
142void
143enable_kernel_fp(void)
144{
145#ifdef CONFIG_SMP
146	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
147		giveup_fpu(current);
148	else
149		giveup_fpu(NULL);	/* just enables FP for kernel */
150#else
151	giveup_fpu(last_task_used_math);
152#endif /* CONFIG_SMP */
153}
154
155int
156dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
157{
158	if (regs->msr & MSR_FP)
159		giveup_fpu(current);
160	memcpy(fpregs, &current->thread.fpr[0], sizeof(*fpregs));
161	return 1;
162}
163
164void
165_switch_to(struct task_struct *prev, struct task_struct *new,
166	  struct task_struct **last)
167{
168	struct thread_struct *new_thread, *old_thread;
169	unsigned long s;
170
171	__save_flags(s);
172	__cli();
173#if CHECK_STACK
174	check_stack(prev);
175	check_stack(new);
176#endif
177
178#ifdef CONFIG_SMP
179	/* avoid complexity of lazy save/restore of fpu
180	 * by just saving it every time we switch out if
181	 * this task used the fpu during the last quantum.
182	 *
183	 * If it tries to use the fpu again, it'll trap and
184	 * reload its fp regs.  So we don't have to do a restore
185	 * every switch, just a save.
186	 *  -- Cort
187	 */
188	if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
189		giveup_fpu(prev);
190#ifdef CONFIG_ALTIVEC
191	/*
192	 * If the previous thread used altivec in the last quantum
193	 * (thus changing altivec regs) then save them.
194	 * We used to check the VRSAVE register but not all apps
195	 * set it, so we don't rely on it now (and in fact we need
196	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
197	 *
198	 * On SMP we always save/restore altivec regs just to avoid the
199	 * complexity of changing processors.
200	 *  -- Cort
201	 */
202	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
203		giveup_altivec(prev);
204#endif /* CONFIG_ALTIVEC */
205#endif /* CONFIG_SMP */
206
207	current_set[smp_processor_id()] = new;
208
209	/* Avoid the trap.  On smp this this never happens since
210	 * we don't set last_task_used_altivec -- Cort
211	 */
212	if (new->thread.regs && last_task_used_altivec == new)
213		new->thread.regs->msr |= MSR_VEC;
214	new_thread = &new->thread;
215	old_thread = &current->thread;
216	*last = _switch(old_thread, new_thread);
217	__restore_flags(s);
218}
219
220void show_regs(struct pt_regs * regs)
221{
222	int i;
223
224	printk("NIP: %08lX XER: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx    %s\n",
225	       regs->nip, regs->xer, regs->link, regs->gpr[1], regs,regs->trap, print_tainted());
226	printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
227	       regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
228	       regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
229	       regs->msr&MSR_IR ? 1 : 0,
230	       regs->msr&MSR_DR ? 1 : 0);
231	if (regs->trap == 0x300 || regs->trap == 0x600)
232		printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
233	printk("TASK = %p[%d] '%s' ",
234	       current, current->pid, current->comm);
235	printk("Last syscall: %ld ", current->thread.last_syscall);
236	printk("\nlast math %p last altivec %p", last_task_used_math,
237	       last_task_used_altivec);
238
239#ifdef CONFIG_4xx
240	printk("\nPLB0: bear= 0x%8.8x acr=   0x%8.8x besr=  0x%8.8x\n",
241	    mfdcr(DCRN_POB0_BEAR), mfdcr(DCRN_PLB0_ACR),
242	    mfdcr(DCRN_PLB0_BESR));
243	printk("PLB0 to OPB: bear= 0x%8.8x besr0= 0x%8.8x besr1= 0x%8.8x\n",
244	    mfdcr(DCRN_PLB0_BEAR), mfdcr(DCRN_POB0_BESR0),
245	    mfdcr(DCRN_POB0_BESR1));
246#endif
247
248#ifdef CONFIG_SMP
249	printk(" CPU: %d", current->processor);
250#endif /* CONFIG_SMP */
251
252	printk("\n");
253	for (i = 0;  i < 32;  i++)
254	{
255		long r;
256		if ((i % 8) == 0)
257		{
258			printk("GPR%02d: ", i);
259		}
260
261		if ( __get_user(r, &(regs->gpr[i])) )
262		    goto out;
263		printk("%08lX ", r);
264		if ((i % 8) == 7)
265		{
266			printk("\n");
267		}
268	}
269out:
270	print_backtrace((unsigned long *)regs->gpr[1]);
271}
272
273void exit_thread(void)
274{
275	if (last_task_used_math == current)
276		last_task_used_math = NULL;
277	if (last_task_used_altivec == current)
278		last_task_used_altivec = NULL;
279}
280
281void flush_thread(void)
282{
283	if (last_task_used_math == current)
284		last_task_used_math = NULL;
285	if (last_task_used_altivec == current)
286		last_task_used_altivec = NULL;
287}
288
289void
290release_thread(struct task_struct *t)
291{
292}
293
294/*
295 * Copy a thread..
296 */
297int
298copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
299	    unsigned long unused,
300	    struct task_struct *p, struct pt_regs *regs)
301{
302	struct pt_regs *childregs, *kregs;
303	extern void ret_from_fork(void);
304	unsigned long sp = (unsigned long)p + sizeof(union task_union);
305	unsigned long childframe;
306
307	/* Copy registers */
308	sp -= sizeof(struct pt_regs);
309	childregs = (struct pt_regs *) sp;
310	*childregs = *regs;
311	if ((childregs->msr & MSR_PR) == 0) {
312		/* for kernel thread, set `current' and stackptr in new task */
313		childregs->gpr[1] = sp + sizeof(struct pt_regs);
314		childregs->gpr[2] = (unsigned long) p;
315		p->thread.regs = NULL;	/* no user register state */
316	} else
317		p->thread.regs = childregs;
318	childregs->gpr[3] = 0;  /* Result from fork() */
319	sp -= STACK_FRAME_OVERHEAD;
320	childframe = sp;
321
322	/*
323	 * The way this works is that at some point in the future
324	 * some task will call _switch to switch to the new task.
325	 * That will pop off the stack frame created below and start
326	 * the new task running at ret_from_fork.  The new task will
327	 * do some house keeping and then return from the fork or clone
328	 * system call, using the stack frame created above.
329	 */
330	sp -= sizeof(struct pt_regs);
331	kregs = (struct pt_regs *) sp;
332	sp -= STACK_FRAME_OVERHEAD;
333	p->thread.ksp = sp;
334	kregs->nip = (unsigned long)ret_from_fork;
335
336	/*
337	 * copy fpu info - assume lazy fpu switch now always
338	 *  -- Cort
339	 */
340	if (regs->msr & MSR_FP) {
341		giveup_fpu(current);
342		childregs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
343	}
344	memcpy(&p->thread.fpr, &current->thread.fpr, sizeof(p->thread.fpr));
345	p->thread.fpscr = current->thread.fpscr;
346
347#ifdef CONFIG_ALTIVEC
348	/*
349	 * copy altiVec info - assume lazy altiVec switch
350	 * - kumar
351	 */
352	if (regs->msr & MSR_VEC)
353		giveup_altivec(current);
354	memcpy(&p->thread.vr, &current->thread.vr, sizeof(p->thread.vr));
355	p->thread.vscr = current->thread.vscr;
356	childregs->msr &= ~MSR_VEC;
357#endif /* CONFIG_ALTIVEC */
358
359	p->thread.last_syscall = -1;
360
361	return 0;
362}
363
364/*
365 * Set up a thread for executing a new program
366 */
367void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
368{
369	set_fs(USER_DS);
370	memset(regs->gpr, 0, sizeof(regs->gpr));
371	memset(&regs->ctr, 0, 5 * sizeof(regs->ctr));
372	regs->nip = nip;
373	regs->gpr[1] = sp;
374	regs->msr = MSR_USER;
375	if (last_task_used_math == current)
376		last_task_used_math = 0;
377	if (last_task_used_altivec == current)
378		last_task_used_altivec = 0;
379	current->thread.fpscr = 0;
380}
381
382int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
383	      struct pt_regs *regs)
384{
385	return do_fork(p1, regs->gpr[1], regs, 0);
386}
387
388int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
389	     struct pt_regs *regs)
390{
391	return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
392}
393
394int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
395	      struct pt_regs *regs)
396{
397	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
398}
399
400int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
401	       unsigned long a3, unsigned long a4, unsigned long a5,
402	       struct pt_regs *regs)
403{
404	int error;
405	char * filename;
406
407	filename = getname((char *) a0);
408	error = PTR_ERR(filename);
409	if (IS_ERR(filename))
410		goto out;
411	if (regs->msr & MSR_FP)
412		giveup_fpu(current);
413#ifdef CONFIG_ALTIVEC
414	if (regs->msr & MSR_VEC)
415		giveup_altivec(current);
416#endif /* CONFIG_ALTIVEC */
417	error = do_execve(filename, (char **) a1, (char **) a2, regs);
418	if (error == 0)
419		current->ptrace &= ~PT_DTRACE;
420	putname(filename);
421out:
422	return error;
423}
424
425void
426print_backtrace(unsigned long *sp)
427{
428	int cnt = 0;
429	unsigned long i;
430
431	printk("Call backtrace: ");
432	while (sp) {
433		if (__get_user( i, &sp[1] ))
434			break;
435		if (cnt++ % 7 == 0)
436			printk("\n");
437		printk("%08lX ", i);
438		if (cnt > 32) break;
439		if (__get_user(sp, (unsigned long **)sp))
440			break;
441	}
442	printk("\n");
443}
444
445void show_trace_task(struct task_struct *tsk)
446{
447	unsigned long stack_top = (unsigned long) tsk + THREAD_SIZE;
448	unsigned long sp, prev_sp;
449	int count = 0;
450
451	if (tsk == NULL)
452		return;
453	sp = (unsigned long) &tsk->thread.ksp;
454	do {
455		prev_sp = sp;
456		sp = *(unsigned long *)sp;
457		if (sp <= prev_sp || sp >= stack_top || (sp & 3) != 0)
458			break;
459		if (count > 0)
460			printk("[%08lx] ", *(unsigned long *)(sp + 4));
461	} while (++count < 16);
462	if (count > 1)
463		printk("\n");
464}
465
466
467/*
468 * These bracket the sleeping functions..
469 */
470extern void scheduling_functions_start_here(void);
471extern void scheduling_functions_end_here(void);
472#define first_sched    ((unsigned long) scheduling_functions_start_here)
473#define last_sched     ((unsigned long) scheduling_functions_end_here)
474
475unsigned long get_wchan(struct task_struct *p)
476{
477	unsigned long ip, sp;
478	unsigned long stack_page = (unsigned long) p;
479	int count = 0;
480	if (!p || p == current || p->state == TASK_RUNNING)
481		return 0;
482	sp = p->thread.ksp;
483	do {
484		sp = *(unsigned long *)sp;
485		if (sp < stack_page || sp >= stack_page + 8188)
486			return 0;
487		if (count > 0) {
488			ip = *(unsigned long *)(sp + 4);
489			if (ip < first_sched || ip >= last_sched)
490				return ip;
491		}
492	} while (count++ < 16);
493	return 0;
494}
495