1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 */
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/mman.h>
21#include <linux/personality.h>
22#include <linux/sys.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/init.h>
26#include <linux/completion.h>
27#include <linux/kallsyms.h>
28
29#include <asm/bootinfo.h>
30#include <asm/cpu.h>
31#include <asm/dsp.h>
32#include <asm/fpu.h>
33#include <asm/pgtable.h>
34#include <asm/system.h>
35#include <asm/mipsregs.h>
36#include <asm/processor.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/elf.h>
40#include <asm/isadep.h>
41#include <asm/inst.h>
42#include <asm/stacktrace.h>
43
44/*
45 * The idle thread. There's no useful work to be done, so just try to conserve
46 * power and have a low exit latency (ie sit in a loop waiting for somebody to
47 * say that they'd like to reschedule)
48 */
49ATTRIB_NORET void cpu_idle(void)
50{
51	/* endless idle loop with no priority at all */
52	while (1) {
53		while (!need_resched()) {
54#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
55			extern void smtc_idle_loop_hook(void);
56
57			smtc_idle_loop_hook();
58#endif
59			if (cpu_wait)
60				(*cpu_wait)();
61		}
62		preempt_enable_no_resched();
63		schedule();
64		preempt_disable();
65	}
66}
67
68asmlinkage void ret_from_fork(void);
69
70void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
71{
72	unsigned long status;
73
74	/* New thread loses kernel privileges. */
75	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK);
76#ifdef CONFIG_64BIT
77	status &= ~ST0_FR;
78	status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
79#endif
80	status |= KU_USER;
81	regs->cp0_status = status;
82	clear_used_math();
83	clear_fpu_owner();
84	if (cpu_has_dsp)
85		__init_dsp();
86	regs->cp0_epc = pc;
87	regs->regs[29] = sp;
88	current_thread_info()->addr_limit = USER_DS;
89}
90
91void exit_thread(void)
92{
93}
94
95void flush_thread(void)
96{
97}
98
99int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
100	unsigned long unused, struct task_struct *p, struct pt_regs *regs)
101{
102	struct thread_info *ti = task_thread_info(p);
103	struct pt_regs *childregs;
104	long childksp;
105	p->set_child_tid = p->clear_child_tid = NULL;
106
107	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
108
109	preempt_disable();
110
111	if (is_fpu_owner())
112		save_fp(p);
113
114	if (cpu_has_dsp)
115		save_dsp(p);
116
117	preempt_enable();
118
119	/* set up new TSS. */
120	childregs = (struct pt_regs *) childksp - 1;
121	*childregs = *regs;
122	childregs->regs[7] = 0;	/* Clear error flag */
123
124#if defined(CONFIG_BINFMT_IRIX)
125	if (current->personality != PER_LINUX) {
126		/* Under IRIX things are a little different. */
127		childregs->regs[3] = 1;
128		regs->regs[3] = 0;
129	}
130#endif
131	childregs->regs[2] = 0;	/* Child gets zero as return value */
132	regs->regs[2] = p->pid;
133
134	if (childregs->cp0_status & ST0_CU0) {
135		childregs->regs[28] = (unsigned long) ti;
136		childregs->regs[29] = childksp;
137		ti->addr_limit = KERNEL_DS;
138	} else {
139		childregs->regs[29] = usp;
140		ti->addr_limit = USER_DS;
141	}
142	p->thread.reg29 = (unsigned long) childregs;
143	p->thread.reg31 = (unsigned long) ret_from_fork;
144
145	/*
146	 * New tasks lose permission to use the fpu. This accelerates context
147	 * switching for most programs since they don't use the fpu.
148	 */
149	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
150	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
151	clear_tsk_thread_flag(p, TIF_USEDFPU);
152
153#ifdef CONFIG_MIPS_MT_FPAFF
154	/*
155	 * FPU affinity support is cleaner if we track the
156	 * user-visible CPU affinity from the very beginning.
157	 * The generic cpus_allowed mask will already have
158	 * been copied from the parent before copy_thread
159	 * is invoked.
160	 */
161	p->thread.user_cpus_allowed = p->cpus_allowed;
162#endif /* CONFIG_MIPS_MT_FPAFF */
163
164	if (clone_flags & CLONE_SETTLS)
165		ti->tp_value = regs->regs[7];
166
167	return 0;
168}
169
170/* Fill in the fpu structure for a core dump.. */
171int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
172{
173	memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
174
175	return 1;
176}
177
178void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
179{
180	int i;
181
182	for (i = 0; i < EF_R0; i++)
183		gp[i] = 0;
184	gp[EF_R0] = 0;
185	for (i = 1; i <= 31; i++)
186		gp[EF_R0 + i] = regs->regs[i];
187	gp[EF_R26] = 0;
188	gp[EF_R27] = 0;
189	gp[EF_LO] = regs->lo;
190	gp[EF_HI] = regs->hi;
191	gp[EF_CP0_EPC] = regs->cp0_epc;
192	gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
193	gp[EF_CP0_STATUS] = regs->cp0_status;
194	gp[EF_CP0_CAUSE] = regs->cp0_cause;
195#ifdef EF_UNUSED0
196	gp[EF_UNUSED0] = 0;
197#endif
198}
199
200int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
201{
202	elf_dump_regs(*regs, task_pt_regs(tsk));
203	return 1;
204}
205
206int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
207{
208	memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
209
210	return 1;
211}
212
213/*
214 * Create a kernel thread
215 */
216static ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
217{
218	do_exit(fn(arg));
219}
220
221long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
222{
223	struct pt_regs regs;
224
225	memset(&regs, 0, sizeof(regs));
226
227	regs.regs[4] = (unsigned long) arg;
228	regs.regs[5] = (unsigned long) fn;
229	regs.cp0_epc = (unsigned long) kernel_thread_helper;
230	regs.cp0_status = read_c0_status();
231#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
232	regs.cp0_status &= ~(ST0_KUP | ST0_IEC);
233	regs.cp0_status |= ST0_IEP;
234#else
235	regs.cp0_status |= ST0_EXL;
236#endif
237
238	/* Ok, create the new process.. */
239	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
240}
241
242/*
243 *
244 */
245struct mips_frame_info {
246	void		*func;
247	unsigned long	func_size;
248	int		frame_size;
249	int		pc_offset;
250};
251
252static inline int is_ra_save_ins(union mips_instruction *ip)
253{
254	/* sw / sd $ra, offset($sp) */
255	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
256		ip->i_format.rs == 29 &&
257		ip->i_format.rt == 31;
258}
259
260static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
261{
262	if (ip->j_format.opcode == jal_op)
263		return 1;
264	if (ip->r_format.opcode != spec_op)
265		return 0;
266	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
267}
268
269static inline int is_sp_move_ins(union mips_instruction *ip)
270{
271	/* addiu/daddiu sp,sp,-imm */
272	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
273		return 0;
274	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
275		return 1;
276	return 0;
277}
278
279static int get_frame_info(struct mips_frame_info *info)
280{
281	union mips_instruction *ip = info->func;
282	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
283	unsigned i;
284
285	info->pc_offset = -1;
286	info->frame_size = 0;
287
288	if (!ip)
289		goto err;
290
291	if (max_insns == 0)
292		max_insns = 128U;	/* unknown function size */
293	max_insns = min(128U, max_insns);
294
295	for (i = 0; i < max_insns; i++, ip++) {
296
297		if (is_jal_jalr_jr_ins(ip))
298			break;
299		if (!info->frame_size) {
300			if (is_sp_move_ins(ip))
301				info->frame_size = - ip->i_format.simmediate;
302			continue;
303		}
304		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
305			info->pc_offset =
306				ip->i_format.simmediate / sizeof(long);
307			break;
308		}
309	}
310	if (info->frame_size && info->pc_offset >= 0) /* nested */
311		return 0;
312	if (info->pc_offset < 0) /* leaf */
313		return 1;
314	/* prologue seems boggus... */
315err:
316	return -1;
317}
318
319static struct mips_frame_info schedule_mfi __read_mostly;
320
321static int __init frame_info_init(void)
322{
323	unsigned long size = 0;
324#ifdef CONFIG_KALLSYMS
325	unsigned long ofs;
326
327	kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
328#endif
329	schedule_mfi.func = schedule;
330	schedule_mfi.func_size = size;
331
332	get_frame_info(&schedule_mfi);
333
334	/*
335	 * Without schedule() frame info, result given by
336	 * thread_saved_pc() and get_wchan() are not reliable.
337	 */
338	if (schedule_mfi.pc_offset < 0)
339		printk("Can't analyze schedule() prologue at %p\n", schedule);
340
341	return 0;
342}
343
344arch_initcall(frame_info_init);
345
346/*
347 * Return saved PC of a blocked thread.
348 */
349unsigned long thread_saved_pc(struct task_struct *tsk)
350{
351	struct thread_struct *t = &tsk->thread;
352
353	/* New born processes are a special case */
354	if (t->reg31 == (unsigned long) ret_from_fork)
355		return t->reg31;
356	if (schedule_mfi.pc_offset < 0)
357		return 0;
358	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
359}
360
361
362#ifdef CONFIG_KALLSYMS
363/* used by show_backtrace() */
364unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
365			   unsigned long pc, unsigned long *ra)
366{
367	unsigned long stack_page;
368	struct mips_frame_info info;
369	unsigned long size, ofs;
370	int leaf;
371	extern void ret_from_irq(void);
372	extern void ret_from_exception(void);
373
374	stack_page = (unsigned long)task_stack_page(task);
375	if (!stack_page)
376		return 0;
377
378	/*
379	 * If we reached the bottom of interrupt context,
380	 * return saved pc in pt_regs.
381	 */
382	if (pc == (unsigned long)ret_from_irq ||
383	    pc == (unsigned long)ret_from_exception) {
384		struct pt_regs *regs;
385		if (*sp >= stack_page &&
386		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
387			regs = (struct pt_regs *)*sp;
388			pc = regs->cp0_epc;
389			if (__kernel_text_address(pc)) {
390				*sp = regs->regs[29];
391				*ra = regs->regs[31];
392				return pc;
393			}
394		}
395		return 0;
396	}
397	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
398		return 0;
399	/*
400	 * Return ra if an exception occured at the first instruction
401	 */
402	if (unlikely(ofs == 0)) {
403		pc = *ra;
404		*ra = 0;
405		return pc;
406	}
407
408	info.func = (void *)(pc - ofs);
409	info.func_size = ofs;	/* analyze from start to ofs */
410	leaf = get_frame_info(&info);
411	if (leaf < 0)
412		return 0;
413
414	if (*sp < stack_page ||
415	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
416		return 0;
417
418	if (leaf)
419		/*
420		 * For some extreme cases, get_frame_info() can
421		 * consider wrongly a nested function as a leaf
422		 * one. In that cases avoid to return always the
423		 * same value.
424		 */
425		pc = pc != *ra ? *ra : 0;
426	else
427		pc = ((unsigned long *)(*sp))[info.pc_offset];
428
429	*sp += info.frame_size;
430	*ra = 0;
431	return __kernel_text_address(pc) ? pc : 0;
432}
433#endif
434
435/*
436 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
437 */
438unsigned long get_wchan(struct task_struct *task)
439{
440	unsigned long pc = 0;
441#ifdef CONFIG_KALLSYMS
442	unsigned long sp;
443	unsigned long ra = 0;
444#endif
445
446	if (!task || task == current || task->state == TASK_RUNNING)
447		goto out;
448	if (!task_stack_page(task))
449		goto out;
450
451	pc = thread_saved_pc(task);
452
453#ifdef CONFIG_KALLSYMS
454	sp = task->thread.reg29 + schedule_mfi.frame_size;
455
456	while (in_sched_functions(pc))
457		pc = unwind_stack(task, &sp, pc, &ra);
458#endif
459
460out:
461	return pc;
462}
463