1/* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/elf.h>
26#include <linux/reboot.h>
27#include <linux/interrupt.h>
28#include <linux/pagemap.h>
29
30#include <asm/asm-offsets.h>
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <asm/setup.h>
34#include <asm/pgtable.h>
35#include <asm/tlb.h>
36#include <asm/gdb-stub.h>
37#include <asm/mb-regs.h>
38
39#include "local.h"
40
41asmlinkage void ret_from_fork(void);
42
43#include <asm/pgalloc.h>
44
45void (*pm_power_off)(void);
46EXPORT_SYMBOL(pm_power_off);
47
48struct task_struct *alloc_task_struct(void)
49{
50	struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
51	if (p)
52		atomic_set((atomic_t *)(p+1), 1);
53	return p;
54}
55
56void free_task_struct(struct task_struct *p)
57{
58	if (atomic_dec_and_test((atomic_t *)(p+1)))
59		kfree(p);
60}
61
62static void core_sleep_idle(void)
63{
64#ifdef LED_DEBUG_SLEEP
65	/* Show that we're sleeping... */
66	__set_LEDS(0x55aa);
67#endif
68	frv_cpu_core_sleep();
69#ifdef LED_DEBUG_SLEEP
70	/* ... and that we woke up */
71	__set_LEDS(0);
72#endif
73	mb();
74}
75
76void (*idle)(void) = core_sleep_idle;
77
78/*
79 * The idle thread. There's no useful work to be
80 * done, so just try to conserve power and have a
81 * low exit latency (ie sit in a loop waiting for
82 * somebody to say that they'd like to reschedule)
83 */
84void cpu_idle(void)
85{
86	int cpu = smp_processor_id();
87
88	/* endless idle loop with no priority at all */
89	while (1) {
90		while (!need_resched()) {
91			irq_stat[cpu].idle_timestamp = jiffies;
92
93			check_pgt_cache();
94
95			if (!frv_dma_inprogress && idle)
96				idle();
97		}
98
99		preempt_enable_no_resched();
100		schedule();
101		preempt_disable();
102	}
103}
104
105void machine_restart(char * __unused)
106{
107	unsigned long reset_addr;
108#ifdef CONFIG_GDBSTUB
109	gdbstub_exit(0);
110#endif
111
112	if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
113		reset_addr = 0xfefff500;
114	else
115		reset_addr = 0xfeff0500;
116
117	/* Software reset. */
118	asm volatile("      dcef @(gr0,gr0),1 ! membar !"
119		     "      sti     %1,@(%0,0) !"
120		     "      nop ! nop ! nop ! nop ! nop ! "
121		     "      nop ! nop ! nop ! nop ! nop ! "
122		     "      nop ! nop ! nop ! nop ! nop ! "
123		     "      nop ! nop ! nop ! nop ! nop ! "
124		     : : "r" (reset_addr), "r" (1) );
125
126	for (;;)
127		;
128}
129
130void machine_halt(void)
131{
132#ifdef CONFIG_GDBSTUB
133	gdbstub_exit(0);
134#endif
135
136	for (;;);
137}
138
139void machine_power_off(void)
140{
141#ifdef CONFIG_GDBSTUB
142	gdbstub_exit(0);
143#endif
144
145	for (;;);
146}
147
148void flush_thread(void)
149{
150	set_fs(USER_DS);
151}
152
153inline unsigned long user_stack(const struct pt_regs *regs)
154{
155	while (regs->next_frame)
156		regs = regs->next_frame;
157	return user_mode(regs) ? regs->sp : 0;
158}
159
160asmlinkage int sys_fork(void)
161{
162#ifndef CONFIG_MMU
163	/* fork almost works, enough to trick you into looking elsewhere:-( */
164	return -EINVAL;
165#else
166	return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
167#endif
168}
169
170asmlinkage int sys_vfork(void)
171{
172	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
173		       NULL, NULL);
174}
175
176/*****************************************************************************/
177/*
178 * clone a process
179 * - tlsptr is retrieved by copy_thread()
180 */
181asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
182			 int __user *parent_tidptr, int __user *child_tidptr,
183			 int __user *tlsptr)
184{
185	if (!newsp)
186		newsp = user_stack(__frame);
187	return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
188} /* end sys_clone() */
189
190/*****************************************************************************/
191/*
192 * This gets called before we allocate a new thread and copy
193 * the current task into it.
194 */
195void prepare_to_copy(struct task_struct *tsk)
196{
197	//unlazy_fpu(tsk);
198} /* end prepare_to_copy() */
199
200/*****************************************************************************/
201/*
202 * set up the kernel stack and exception frames for a new process
203 */
204int copy_thread(int nr, unsigned long clone_flags,
205		unsigned long usp, unsigned long topstk,
206		struct task_struct *p, struct pt_regs *regs)
207{
208	struct pt_regs *childregs0, *childregs, *regs0;
209
210	regs0 = __kernel_frame0_ptr;
211	childregs0 = (struct pt_regs *)
212		(task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE);
213	childregs = childregs0;
214
215	/* set up the userspace frame (the only place that the USP is stored) */
216	*childregs0 = *regs0;
217
218	childregs0->gr8		= 0;
219	childregs0->sp		= usp;
220	childregs0->next_frame	= NULL;
221
222	/* set up the return kernel frame if called from kernel_thread() */
223	if (regs != regs0) {
224		childregs--;
225		*childregs = *regs;
226		childregs->sp = (unsigned long) childregs0;
227		childregs->next_frame = childregs0;
228		childregs->gr15 = (unsigned long) task_thread_info(p);
229		childregs->gr29 = (unsigned long) p;
230	}
231
232	p->set_child_tid = p->clear_child_tid = NULL;
233
234	p->thread.frame	 = childregs;
235	p->thread.curr	 = p;
236	p->thread.sp	 = (unsigned long) childregs;
237	p->thread.fp	 = 0;
238	p->thread.lr	 = 0;
239	p->thread.pc	 = (unsigned long) ret_from_fork;
240	p->thread.frame0 = childregs0;
241
242	/* the new TLS pointer is passed in as arg #5 to sys_clone() */
243	if (clone_flags & CLONE_SETTLS)
244		childregs->gr29 = childregs->gr12;
245
246	save_user_regs(p->thread.user);
247
248	return 0;
249} /* end copy_thread() */
250
251/*
252 * sys_execve() executes a new program.
253 */
254asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
255{
256	int error;
257	char * filename;
258
259	lock_kernel();
260	filename = getname(name);
261	error = PTR_ERR(filename);
262	if (IS_ERR(filename))
263		goto out;
264	error = do_execve(filename, argv, envp, __frame);
265	putname(filename);
266 out:
267	unlock_kernel();
268	return error;
269}
270
271unsigned long get_wchan(struct task_struct *p)
272{
273	struct pt_regs *regs0;
274	unsigned long fp, pc;
275	unsigned long stack_limit;
276	int count = 0;
277	if (!p || p == current || p->state == TASK_RUNNING)
278		return 0;
279
280	stack_limit = (unsigned long) (p + 1);
281	fp = p->thread.fp;
282	regs0 = p->thread.frame0;
283
284	do {
285		if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
286			return 0;
287
288		pc = ((unsigned long *) fp)[2];
289
290		if (!in_sched_functions(pc))
291			return pc;
292
293		fp = *(unsigned long *) fp;
294	} while (count++ < 16);
295
296	return 0;
297}
298
299unsigned long thread_saved_pc(struct task_struct *tsk)
300{
301	/* Check whether the thread is blocked in resume() */
302	if (in_sched_functions(tsk->thread.pc))
303		return ((unsigned long *)tsk->thread.fp)[2];
304	else
305		return tsk->thread.pc;
306}
307
308int elf_check_arch(const struct elf32_hdr *hdr)
309{
310	unsigned long hsr0 = __get_HSR(0);
311	unsigned long psr = __get_PSR();
312
313	if (hdr->e_machine != EM_FRV)
314		return 0;
315
316	switch (hdr->e_flags & EF_FRV_GPR_MASK) {
317	case EF_FRV_GPR64:
318		if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
319			return 0;
320	case EF_FRV_GPR32:
321	case 0:
322		break;
323	default:
324		return 0;
325	}
326
327	switch (hdr->e_flags & EF_FRV_FPR_MASK) {
328	case EF_FRV_FPR64:
329		if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
330			return 0;
331	case EF_FRV_FPR32:
332	case EF_FRV_FPR_NONE:
333	case 0:
334		break;
335	default:
336		return 0;
337	}
338
339	if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
340		if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
341		    PSR_IMPLE(psr) != PSR_IMPLE_FR451)
342			return 0;
343
344	switch (hdr->e_flags & EF_FRV_CPU_MASK) {
345	case EF_FRV_CPU_GENERIC:
346		break;
347	case EF_FRV_CPU_FR300:
348	case EF_FRV_CPU_SIMPLE:
349	case EF_FRV_CPU_TOMCAT:
350	default:
351		return 0;
352	case EF_FRV_CPU_FR400:
353		if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
354		    PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
355		    PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
356		    PSR_IMPLE(psr) != PSR_IMPLE_FR551)
357			return 0;
358		break;
359	case EF_FRV_CPU_FR450:
360		if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
361			return 0;
362		break;
363	case EF_FRV_CPU_FR500:
364		if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
365			return 0;
366		break;
367	case EF_FRV_CPU_FR550:
368		if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
369			return 0;
370		break;
371	}
372
373	return 1;
374}
375
376int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
377{
378	memcpy(fpregs,
379	       &current->thread.user->f,
380	       sizeof(current->thread.user->f));
381	return 1;
382}
383