1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/slab.h"
8#include "linux/ptrace.h"
9#include "linux/proc_fs.h"
10#include "linux/file.h"
11#include "linux/errno.h"
12#include "linux/init.h"
13#include "asm/uaccess.h"
14#include "asm/atomic.h"
15#include "kern_util.h"
16#include "as-layout.h"
17#include "skas.h"
18#include "os.h"
19#include "tlb.h"
20#include "kern.h"
21#include "mode.h"
22#include "registers.h"
23
24void switch_to_skas(void *prev, void *next)
25{
26	struct task_struct *from, *to;
27
28	from = prev;
29	to = next;
30
31	if(current->pid == 0)
32		switch_timers(0);
33
34	switch_threads(&from->thread.mode.skas.switch_buf,
35		       &to->thread.mode.skas.switch_buf);
36
37	arch_switch_to_skas(current->thread.prev_sched, current);
38
39	if(current->pid == 0)
40		switch_timers(1);
41}
42
43extern void schedule_tail(struct task_struct *prev);
44
45/* This is called magically, by its address being stuffed in a jmp_buf
46 * and being longjmp-d to.
47 */
48void new_thread_handler(void)
49{
50	int (*fn)(void *), n;
51	void *arg;
52
53	if(current->thread.prev_sched != NULL)
54		schedule_tail(current->thread.prev_sched);
55	current->thread.prev_sched = NULL;
56
57	fn = current->thread.request.u.thread.proc;
58	arg = current->thread.request.u.thread.arg;
59
60	/* The return value is 1 if the kernel thread execs a process,
61	 * 0 if it just exits
62	 */
63	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
64	if(n == 1){
65		/* Handle any immediate reschedules or signals */
66		interrupt_end();
67		userspace(&current->thread.regs.regs);
68	}
69	else do_exit(0);
70}
71
72void release_thread_skas(struct task_struct *task)
73{
74}
75
76/* Called magically, see new_thread_handler above */
77void fork_handler(void)
78{
79	force_flush_all();
80	if(current->thread.prev_sched == NULL)
81		panic("blech");
82
83	schedule_tail(current->thread.prev_sched);
84
85	arch_switch_to_skas(current->thread.prev_sched, current);
86
87	current->thread.prev_sched = NULL;
88
89/* Handle any immediate reschedules or signals */
90	interrupt_end();
91
92	userspace(&current->thread.regs.regs);
93}
94
95int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
96		     unsigned long stack_top, struct task_struct * p,
97		     struct pt_regs *regs)
98{
99	void (*handler)(void);
100
101	if(current->thread.forking){
102	  	memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
103		       sizeof(p->thread.regs.regs.skas));
104		REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
105		if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
106
107		handler = fork_handler;
108
109		arch_copy_thread(&current->thread.arch, &p->thread.arch);
110	}
111	else {
112		init_thread_registers(&p->thread.regs.regs);
113		p->thread.request.u.thread = current->thread.request.u.thread;
114		handler = new_thread_handler;
115	}
116
117	new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
118		   handler);
119	return(0);
120}
121
122int new_mm(unsigned long stack)
123{
124	int fd;
125
126	fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
127	if(fd < 0)
128		return(fd);
129
130	if(skas_needs_stub)
131		map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
132
133	return(fd);
134}
135
136void init_idle_skas(void)
137{
138	cpu_tasks[current_thread->cpu].pid = os_getpid();
139	default_idle();
140}
141
142extern void start_kernel(void);
143
144static int start_kernel_proc(void *unused)
145{
146	int pid;
147
148	block_signals();
149	pid = os_getpid();
150
151	cpu_tasks[0].pid = pid;
152	cpu_tasks[0].task = current;
153#ifdef CONFIG_SMP
154	cpu_online_map = cpumask_of_cpu(0);
155#endif
156	start_kernel();
157	return(0);
158}
159
160extern int userspace_pid[];
161
162extern char cpu0_irqstack[];
163
164int start_uml_skas(void)
165{
166	stack_protections((unsigned long) &cpu0_irqstack);
167	set_sigstack(cpu0_irqstack, THREAD_SIZE);
168	if(proc_mm)
169		userspace_pid[0] = start_userspace(0);
170
171	init_new_thread_signals();
172
173	init_task.thread.request.u.thread.proc = start_kernel_proc;
174	init_task.thread.request.u.thread.arg = NULL;
175	return(start_idle_thread(task_stack_page(&init_task),
176				 &init_task.thread.mode.skas.switch_buf));
177}
178
179int external_pid_skas(struct task_struct *task)
180{
181	return(userspace_pid[0]);
182}
183
184int thread_pid_skas(struct task_struct *task)
185{
186	return(userspace_pid[0]);
187}
188
189void kill_off_processes_skas(void)
190{
191	if(proc_mm)
192		os_kill_ptraced_process(userspace_pid[0], 1);
193	else {
194		struct task_struct *p;
195		int pid, me;
196
197		me = os_getpid();
198		for_each_process(p){
199			if(p->mm == NULL)
200				continue;
201
202			pid = p->mm->context.skas.id.u.pid;
203			os_kill_ptraced_process(pid, 1);
204		}
205	}
206}
207
208unsigned long current_stub_stack(void)
209{
210	if(current->mm == NULL)
211		return(0);
212
213	return(current->mm->context.skas.id.stack);
214}
215