• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/um/kernel/
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include <linux/stddef.h>
8#include <linux/err.h>
9#include <linux/hardirq.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/personality.h>
13#include <linux/proc_fs.h>
14#include <linux/ptrace.h>
15#include <linux/random.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/seq_file.h>
19#include <linux/tick.h>
20#include <linux/threads.h>
21#include <asm/current.h>
22#include <asm/pgtable.h>
23#include <asm/uaccess.h>
24#include "as-layout.h"
25#include "kern_util.h"
26#include "os.h"
27#include "skas.h"
28#include "tlb.h"
29
30/*
31 * This is a per-cpu array.  A processor only modifies its entry and it only
32 * cares about its entry, so it's OK if another processor is modifying its
33 * entry.
34 */
35struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
36
37static inline int external_pid(void)
38{
39	return userspace_pid[0];
40}
41
42int pid_to_processor_id(int pid)
43{
44	int i;
45
46	for (i = 0; i < ncpus; i++) {
47		if (cpu_tasks[i].pid == pid)
48			return i;
49	}
50	return -1;
51}
52
53void free_stack(unsigned long stack, int order)
54{
55	free_pages(stack, order);
56}
57
58unsigned long alloc_stack(int order, int atomic)
59{
60	unsigned long page;
61	gfp_t flags = GFP_KERNEL;
62
63	if (atomic)
64		flags = GFP_ATOMIC;
65	page = __get_free_pages(flags, order);
66
67	return page;
68}
69
70int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
71{
72	int pid;
73
74	current->thread.request.u.thread.proc = fn;
75	current->thread.request.u.thread.arg = arg;
76	pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
77		      &current->thread.regs, 0, NULL, NULL);
78	return pid;
79}
80
81static inline void set_current(struct task_struct *task)
82{
83	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
84		{ external_pid(), task });
85}
86
87extern void arch_switch_to(struct task_struct *to);
88
89void *_switch_to(void *prev, void *next, void *last)
90{
91	struct task_struct *from = prev;
92	struct task_struct *to = next;
93
94	to->thread.prev_sched = from;
95	set_current(to);
96
97	do {
98		current->thread.saved_task = NULL;
99
100		switch_threads(&from->thread.switch_buf,
101			       &to->thread.switch_buf);
102
103		arch_switch_to(current);
104
105		if (current->thread.saved_task)
106			show_regs(&(current->thread.regs));
107		to = current->thread.saved_task;
108		from = current;
109	} while (current->thread.saved_task);
110
111	return current->thread.prev_sched;
112
113}
114
115void interrupt_end(void)
116{
117	if (need_resched())
118		schedule();
119	if (test_tsk_thread_flag(current, TIF_SIGPENDING))
120		do_signal();
121}
122
123void exit_thread(void)
124{
125}
126
127void *get_current(void)
128{
129	return current;
130}
131
132/*
133 * This is called magically, by its address being stuffed in a jmp_buf
134 * and being longjmp-d to.
135 */
136void new_thread_handler(void)
137{
138	int (*fn)(void *), n;
139	void *arg;
140
141	if (current->thread.prev_sched != NULL)
142		schedule_tail(current->thread.prev_sched);
143	current->thread.prev_sched = NULL;
144
145	fn = current->thread.request.u.thread.proc;
146	arg = current->thread.request.u.thread.arg;
147
148	/*
149	 * The return value is 1 if the kernel thread execs a process,
150	 * 0 if it just exits
151	 */
152	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
153	if (n == 1) {
154		/* Handle any immediate reschedules or signals */
155		interrupt_end();
156		userspace(&current->thread.regs.regs);
157	}
158	else do_exit(0);
159}
160
161/* Called magically, see new_thread_handler above */
162void fork_handler(void)
163{
164	force_flush_all();
165
166	schedule_tail(current->thread.prev_sched);
167
168	arch_switch_to(current);
169
170	current->thread.prev_sched = NULL;
171
172	/* Handle any immediate reschedules or signals */
173	interrupt_end();
174
175	userspace(&current->thread.regs.regs);
176}
177
178int copy_thread(unsigned long clone_flags, unsigned long sp,
179		unsigned long stack_top, struct task_struct * p,
180		struct pt_regs *regs)
181{
182	void (*handler)(void);
183	int ret = 0;
184
185	p->thread = (struct thread_struct) INIT_THREAD;
186
187	if (current->thread.forking) {
188	  	memcpy(&p->thread.regs.regs, &regs->regs,
189		       sizeof(p->thread.regs.regs));
190		REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
191		if (sp != 0)
192			REGS_SP(p->thread.regs.regs.gp) = sp;
193
194		handler = fork_handler;
195
196		arch_copy_thread(&current->thread.arch, &p->thread.arch);
197	}
198	else {
199		get_safe_registers(p->thread.regs.regs.gp);
200		p->thread.request.u.thread = current->thread.request.u.thread;
201		handler = new_thread_handler;
202	}
203
204	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
205
206	if (current->thread.forking) {
207		clear_flushed_tls(p);
208
209		/*
210		 * Set a new TLS for the child thread?
211		 */
212		if (clone_flags & CLONE_SETTLS)
213			ret = arch_copy_tls(p);
214	}
215
216	return ret;
217}
218
219void initial_thread_cb(void (*proc)(void *), void *arg)
220{
221	int save_kmalloc_ok = kmalloc_ok;
222
223	kmalloc_ok = 0;
224	initial_thread_cb_skas(proc, arg);
225	kmalloc_ok = save_kmalloc_ok;
226}
227
228void default_idle(void)
229{
230	unsigned long long nsecs;
231
232	while (1) {
233		/* endless idle loop with no priority at all */
234
235		/*
236		 * although we are an idle CPU, we do not want to
237		 * get into the scheduler unnecessarily.
238		 */
239		if (need_resched())
240			schedule();
241
242		tick_nohz_stop_sched_tick(1);
243		nsecs = disable_timer();
244		idle_sleep(nsecs);
245		tick_nohz_restart_sched_tick();
246	}
247}
248
249void cpu_idle(void)
250{
251	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
252	default_idle();
253}
254
255int __cant_sleep(void) {
256	return in_atomic() || irqs_disabled() || in_interrupt();
257	/* Is in_interrupt() really needed? */
258}
259
260int user_context(unsigned long sp)
261{
262	unsigned long stack;
263
264	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
265	return stack != (unsigned long) current_thread_info();
266}
267
268extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
269
270void do_uml_exitcalls(void)
271{
272	exitcall_t *call;
273
274	call = &__uml_exitcall_end;
275	while (--call >= &__uml_exitcall_begin)
276		(*call)();
277}
278
279char *uml_strdup(const char *string)
280{
281	return kstrdup(string, GFP_KERNEL);
282}
283
284int copy_to_user_proc(void __user *to, void *from, int size)
285{
286	return copy_to_user(to, from, size);
287}
288
289int copy_from_user_proc(void *to, void __user *from, int size)
290{
291	return copy_from_user(to, from, size);
292}
293
294int clear_user_proc(void __user *buf, int size)
295{
296	return clear_user(buf, size);
297}
298
299int strlen_user_proc(char __user *str)
300{
301	return strlen_user(str);
302}
303
304int smp_sigio_handler(void)
305{
306#ifdef CONFIG_SMP
307	int cpu = current_thread_info()->cpu;
308	IPI_handler(cpu);
309	if (cpu != 0)
310		return 1;
311#endif
312	return 0;
313}
314
315int cpu(void)
316{
317	return current_thread_info()->cpu;
318}
319
320static atomic_t using_sysemu = ATOMIC_INIT(0);
321int sysemu_supported;
322
323void set_using_sysemu(int value)
324{
325	if (value > sysemu_supported)
326		return;
327	atomic_set(&using_sysemu, value);
328}
329
330int get_using_sysemu(void)
331{
332	return atomic_read(&using_sysemu);
333}
334
335static int sysemu_proc_show(struct seq_file *m, void *v)
336{
337	seq_printf(m, "%d\n", get_using_sysemu());
338	return 0;
339}
340
341static int sysemu_proc_open(struct inode *inode, struct file *file)
342{
343	return single_open(file, sysemu_proc_show, NULL);
344}
345
346static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
347				 size_t count, loff_t *pos)
348{
349	char tmp[2];
350
351	if (copy_from_user(tmp, buf, 1))
352		return -EFAULT;
353
354	if (tmp[0] >= '0' && tmp[0] <= '2')
355		set_using_sysemu(tmp[0] - '0');
356	/* We use the first char, but pretend to write everything */
357	return count;
358}
359
360static const struct file_operations sysemu_proc_fops = {
361	.owner		= THIS_MODULE,
362	.open		= sysemu_proc_open,
363	.read		= seq_read,
364	.llseek		= seq_lseek,
365	.release	= single_release,
366	.write		= sysemu_proc_write,
367};
368
369int __init make_proc_sysemu(void)
370{
371	struct proc_dir_entry *ent;
372	if (!sysemu_supported)
373		return 0;
374
375	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
376
377	if (ent == NULL)
378	{
379		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
380		return 0;
381	}
382
383	return 0;
384}
385
386late_initcall(make_proc_sysemu);
387
388int singlestepping(void * t)
389{
390	struct task_struct *task = t ? t : current;
391
392	if (!(task->ptrace & PT_DTRACE))
393		return 0;
394
395	if (task->thread.singlestep_syscall)
396		return 1;
397
398	return 2;
399}
400
401/*
402 * Only x86 and x86_64 have an arch_align_stack().
403 * All other arches have "#define arch_align_stack(x) (x)"
404 * in their asm/system.h
405 * As this is included in UML from asm-um/system-generic.h,
406 * we can use it to behave as the subarch does.
407 */
408#ifndef arch_align_stack
409unsigned long arch_align_stack(unsigned long sp)
410{
411	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
412		sp -= get_random_int() % 8192;
413	return sp & ~0xf;
414}
415#endif
416
417unsigned long get_wchan(struct task_struct *p)
418{
419	unsigned long stack_page, sp, ip;
420	bool seen_sched = 0;
421
422	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
423		return 0;
424
425	stack_page = (unsigned long) task_stack_page(p);
426	/* Bail if the process has no kernel stack for some reason */
427	if (stack_page == 0)
428		return 0;
429
430	sp = p->thread.switch_buf->JB_SP;
431	/*
432	 * Bail if the stack pointer is below the bottom of the kernel
433	 * stack for some reason
434	 */
435	if (sp < stack_page)
436		return 0;
437
438	while (sp < stack_page + THREAD_SIZE) {
439		ip = *((unsigned long *) sp);
440		if (in_sched_functions(ip))
441			/* Ignore everything until we're above the scheduler */
442			seen_sched = 1;
443		else if (kernel_text_address(ip) && seen_sched)
444			return ip;
445
446		sp += sizeof(unsigned long);
447	}
448
449	return 0;
450}
451
452int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
453{
454	int cpu = current_thread_info()->cpu;
455
456	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
457}
458