1/*
2 * arch/ubicom32/kernel/process.c
3 *   Ubicom32 architecture-dependent process handling.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 * Copyright (C) 1995  Hamish Macdonald
7 *
8 * 68060 fixes by Jesper Skov
9 *
10 * uClinux changes
11 * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com>
12 *
13 * This file is part of the Ubicom32 Linux Kernel Port.
14 *
15 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
16 * it and/or modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, either version 2 of the
18 * License, or (at your option) any later version.
19 *
20 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
23 * the GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with the Ubicom32 Linux Kernel Port.  If not,
27 * see <http://www.gnu.org/licenses/>.
28 *
29 * Ubicom32 implementation derived from (with many thanks):
30 *   arch/m68knommu
31 *   arch/blackfin
32 *   arch/parisc
33 */
34
35/*
36 * This file handles the architecture-dependent parts of process handling..
37 */
38
39#include <linux/module.h>
40#include <linux/errno.h>
41#include <linux/sched.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/smp.h>
45#include <linux/smp_lock.h>
46#include <linux/stddef.h>
47#include <linux/unistd.h>
48#include <linux/ptrace.h>
49#include <linux/slab.h>
50#include <linux/user.h>
51#include <linux/a.out.h>
52#include <linux/interrupt.h>
53#include <linux/reboot.h>
54#include <linux/fs.h>
55#include <linux/pm.h>
56
57#include <linux/uaccess.h>
58#include <asm/system.h>
59#include <asm/traps.h>
60#include <asm/machdep.h>
61#include <asm/setup.h>
62#include <asm/pgtable.h>
63#include <asm/ip5000.h>
64#include <asm/range-protect.h>
65
66#define DUMP_RANGE_REGISTER(REG, IDX) asm volatile ( \
67        "       move.4          %0, "REG"_RANGE"IDX"_EN \n\t" \
68        "       move.4          %1, "REG"_RANGE"IDX"_LO \n\t" \
69        "       move.4          %2, "REG"_RANGE"IDX"_HI \n\t" \
70                : "=d"(en), "=d"(lo), "=d"(hi) \
71        ); \
72        printk(KERN_NOTICE REG"Range"IDX": en:%08x, range: %08x-%08x\n", \
73                (unsigned int)en, \
74                (unsigned int)lo, \
75                (unsigned int)hi)
76
77asmlinkage void ret_from_fork(void);
78
79void (*pm_power_off)(void) = machine_power_off;
80EXPORT_SYMBOL(pm_power_off);
81
82/* machine-dependent / hardware-specific power functions */
83void (*mach_reset)(void);
84void (*mach_halt)(void);
85void (*mach_power_off)(void);
86
87/*
88 * cpu_idle()
89 *	The idle thread.
90 *
91 * Our idle loop suspends and is woken up by a timer interrupt.
92 */
93void cpu_idle(void)
94{
95	while (1) {
96		local_irq_disable();
97		while (!need_resched()) {
98			local_irq_enable();
99			thread_suspend();
100			local_irq_disable();
101		}
102		local_irq_enable();
103		preempt_enable_no_resched();
104		schedule();
105		preempt_disable();
106	}
107}
108
109/*
110 * dump_fpu()
111 *
112 *	Fill in the fpu structure for a core dump. (just a stub as we don't have
113 *	an fpu)
114 */
115int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
116{
117	return 1;
118}
119
120/*
121 * machine_restart()
122 *	Resets the system.
123 */
124void machine_restart(char *__unused)
125{
126	/*
127	 * Disable all threads except myself. We can do this
128	 * directly without needing to call smp_send_stop
129	 * because we have a unique architecture where
130	 * one thread can disable one or more other threads.
131	 */
132	thread_disable_others();
133
134	/*
135	 * Call the hardware-specific machine reset function.
136	 */
137	if (mach_reset) {
138		mach_reset();
139	}
140
141	printk(KERN_EMERG "System Restarting\n");
142
143	/*
144	 * Set watchdog to trigger (after 1ms delay) (12 Mhz is the fixed OSC)
145	 */
146	UBICOM32_IO_TIMER->tkey = TIMER_TKEYVAL;
147	UBICOM32_IO_TIMER->wdcom = UBICOM32_IO_TIMER->mptval +
148		(12000000 / 1000);
149	UBICOM32_IO_TIMER->wdcfg = 0;
150	UBICOM32_IO_TIMER->tkey = 0;
151
152	/*
153	 * Wait for watchdog
154	 */
155	asm volatile (
156		"	move.4		MT_EN, #0		\n\t"
157		"	pipe_flush	0			\n\t"
158	);
159
160	local_irq_disable();
161	for (;;) {
162		thread_suspend();
163	}
164}
165
166/*
167 * machine_halt()
168 *	Halt the machine.
169 *
170 * Similar to machine_power_off, but don't shut off power.  Add code
171 * here to freeze the system for e.g. post-mortem debug purpose when
172 * possible.  This halt has nothing to do with the idle halt.
173 */
174void machine_halt(void)
175{
176	/*
177	 * Disable all threads except myself. We can do this
178	 * directly without needing to call smp_send_stop
179	 * because we have a unique architecture where
180	 * one thread can disable one or more other threads.
181	 */
182	thread_disable_others();
183
184	/*
185	 * Call the hardware-specific machine halt function.
186	 */
187	if (mach_halt) {
188		mach_halt();
189	}
190
191	printk(KERN_EMERG "System Halted, OK to turn off power\n");
192	local_irq_disable();
193	for (;;) {
194		thread_suspend();
195	}
196}
197
198/*
199 * machine_power_off()
200 *	Turn the power off, if a power off handler is defined, otherwise, spin
201 *	endlessly.
202 */
203void machine_power_off(void)
204{
205	/*
206	 * Disable all threads except myself. We can do this
207	 * directly without needing to call smp_send_stop
208	 * because we have a unique architecture where
209	 * one thread can disable one or more other threads.
210	 */
211	thread_disable_others();
212
213	/*
214	 * Call the hardware-specific machine power off function.
215	 */
216	if (mach_power_off) {
217		mach_power_off();
218	}
219
220	printk(KERN_EMERG "System Halted, OK to turn off power\n");
221	local_irq_disable();
222	for (;;) {
223		thread_suspend();
224	}
225}
226
227/*
228 * address_is_valid()
229 * 	check if an address is valid -- (for read access)
230 */
231static bool address_is_valid(const void *address)
232{
233	int addr = (int)address;
234	unsigned long socm, eocm, sdram, edram;
235
236	if (addr & 3)
237		return false;
238
239	processor_ocm(&socm, &eocm);
240	processor_dram(&sdram, &edram);
241	if (addr >= socm && addr < eocm)
242		return true;
243
244	if (addr >= sdram && addr < edram)
245		return true;
246
247	return false;
248}
249
250/*
251 * vma_path_name_is_valid()
252 * 	check if path_name of a vma is a valid string
253 */
254static bool vma_path_name_is_valid(const char *str)
255{
256#define MAX_NAME_LEN 256
257	int i = 0;
258	if (!address_is_valid(str))
259		return false;
260
261	for (; i < MAX_NAME_LEN; i++, str++) {
262		if (*str == '\0')
263			return true;
264	}
265
266	return false;
267}
268
269/*
270 * show_vmas()
271 * 	show vma info of a process
272 */
273void show_vmas(struct task_struct *task)
274{
275#ifdef CONFIG_DEBUG_VERBOSE
276#define UBICOM32_MAX_VMA_COUNT 1024
277
278	struct vm_area_struct *vma;
279	struct file *file;
280	char *name = "";
281	int flags, loop = 0;
282
283	printk(KERN_NOTICE "Start of vma list\n");
284
285	if (!address_is_valid(task) || !address_is_valid(task->mm))
286		goto error;
287
288	vma = task->mm->mmap;
289	while (vma) {
290		if (!address_is_valid(vma))
291			goto error;
292
293		flags = vma->vm_flags;
294		file = vma->vm_file;
295
296		if (file) {
297			/* seems better to use dentry op here, but sanity check is easier this way */
298			if (!address_is_valid(file) || !address_is_valid(file->f_path.dentry) || !vma_path_name_is_valid(file->f_path.dentry->d_name.name))
299				goto error;
300
301			name = (char *)file->f_path.dentry->d_name.name;
302		}
303
304		/* Similar to /proc/pid/maps format */
305		printk(KERN_NOTICE "%08lx-%08lx %c%c%c%c %08lx %s\n",
306			vma->vm_start,
307			vma->vm_end,
308			flags & VM_READ ? 'r' : '-',
309			flags & VM_WRITE ? 'w' : '-',
310			flags & VM_EXEC ? 'x' : '-',
311			flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
312			vma->vm_pgoff << PAGE_SHIFT,
313			name);
314
315		vma = vma->vm_next;
316
317		if (loop++ > UBICOM32_MAX_VMA_COUNT)
318			goto error;
319	}
320
321	printk(KERN_NOTICE "End of vma list\n");
322	return;
323
324error:
325	printk(KERN_NOTICE "\nCorrupted vma list, abort!\n");
326#endif
327}
328
329/*
330 * show_regs()
331 *	Print out all of the registers.
332 */
333void show_regs(struct pt_regs *regs)
334{
335	unsigned int i;
336	unsigned int en, lo, hi;
337
338	printk(KERN_NOTICE "regs: %p, tid: %d\n",
339		(void *)regs,
340		thread_get_self());
341
342	printk(KERN_NOTICE "pc: %08x, previous_pc: %08x\n\n",
343		(unsigned int)regs->pc,
344		(unsigned int)regs->previous_pc);
345
346	printk(KERN_NOTICE "Data registers\n");
347	for (i = 0; i < 16; i++) {
348		printk("D%02d: %08x, ", i, (unsigned int)regs->dn[i]);
349		if ((i % 4) == 3) {
350			printk("\n");
351		}
352	}
353	printk("\n");
354
355	printk(KERN_NOTICE "Address registers\n");
356	for (i = 0; i < 8; i++) {
357		printk("A%02d: %08x, ", i, (unsigned int)regs->an[i]);
358		if ((i % 4) == 3) {
359			printk("\n");
360		}
361	}
362	printk("\n");
363
364	printk(KERN_NOTICE "acc0: %08x-%08x, acc1: %08x-%08x\n",
365		(unsigned int)regs->acc0[1],
366		(unsigned int)regs->acc0[0],
367		(unsigned int)regs->acc1[1],
368		(unsigned int)regs->acc1[0]);
369
370	printk(KERN_NOTICE "mac_rc16: %08x, source3: %08x\n",
371		(unsigned int)regs->mac_rc16,
372		(unsigned int)regs->source3);
373
374	printk(KERN_NOTICE "inst_cnt: %08x, csr: %08x\n",
375		(unsigned int)regs->inst_cnt,
376		(unsigned int)regs->csr);
377
378	printk(KERN_NOTICE "int_mask0: %08x, int_mask1: %08x\n",
379		(unsigned int)regs->int_mask0,
380		(unsigned int)regs->int_mask1);
381
382	/*
383	 * Dump range registers
384	 */
385	DUMP_RANGE_REGISTER("I", "0");
386	DUMP_RANGE_REGISTER("I", "1");
387	DUMP_RANGE_REGISTER("I", "2");
388	DUMP_RANGE_REGISTER("I", "3");
389	DUMP_RANGE_REGISTER("D", "0");
390	DUMP_RANGE_REGISTER("D", "1");
391	DUMP_RANGE_REGISTER("D", "2");
392	DUMP_RANGE_REGISTER("D", "3");
393	DUMP_RANGE_REGISTER("D", "4");
394
395	printk(KERN_NOTICE "frame_type: %d, nesting_level: %d, thread_type %d\n\n",
396		(int)regs->frame_type,
397		(int)regs->nesting_level,
398		(int)regs->thread_type);
399}
400
401/*
402 * kernel_thread_helper()
403 *	On execution d0 will be 0, d1 will be the argument to be passed to the
404 *	kernel function.  d2 contains the kernel function that needs to get
405 *	called. d3 will contain address to do_exit which need to get moved
406 *	into a5. On return from fork the child thread d0 will be 0. We call
407 *	this dummy function which in turn loads the argument
408 */
409asmlinkage void kernel_thread_helper(void);
410
411/*
412 * kernel_thread()
413 *	Create a kernel thread
414 */
415int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
416{
417	struct pt_regs regs;
418
419	memset(&regs, 0, sizeof(regs));
420
421	regs.dn[1] = (unsigned long)arg;
422	regs.dn[2] = (unsigned long)fn;
423	regs.dn[3] = (unsigned long)do_exit;
424	regs.an[5] = (unsigned long)kernel_thread_helper;
425	regs.pc = (unsigned long)kernel_thread_helper;
426	regs.nesting_level = 0;
427	regs.thread_type = KERNEL_THREAD;
428
429	return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
430		       0, &regs, 0, NULL, NULL);
431}
432EXPORT_SYMBOL(kernel_thread);
433
434/*
435 * flush_thread()
436 *	XXX todo
437 */
438void flush_thread(void)
439{
440	/* XXX todo */
441}
442
443/*
444 * sys_fork()
445 *	Not implemented on no-mmu.
446 */
447asmlinkage int sys_fork(struct pt_regs *regs)
448{
449	/* fork almost works, enough to trick you into looking elsewhere :-( */
450	return -EINVAL;
451}
452
453/*
454 * sys_vfork()
455 *	By the time we get here, the non-volatile registers have also been saved
456 *	on the stack. We do some ugly pointer stuff here.. (see also copy_thread
457 *	which does context copy).
458 */
459asmlinkage int sys_vfork(struct pt_regs *regs)
460{
461	unsigned long old_sp = regs->an[7];
462	unsigned long old_a5 = regs->an[5];
463	unsigned long old_return_address;
464	long do_fork_return;
465
466	/*
467	 * Read the old retrun address from the stack.
468	 */
469	if (copy_from_user(&old_return_address,
470			   (void *)old_sp, sizeof(unsigned long))) {
471		force_sig(SIGSEGV, current);
472		return 0;
473	}
474
475	/*
476	 * Pop the vfork call frame by setting a5 and pc to the old_return
477	 * address and incrementing the stack pointer by 4.
478	 */
479	regs->an[5] = old_return_address;
480	regs->pc = old_return_address;
481	regs->an[7] += 4;
482
483	do_fork_return = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
484				 regs->an[7], regs, 0, NULL, NULL);
485
486	/*
487	 * Now we have to test if the return code is an error. If it is an error
488	 * then restore the frame and we will execute error processing in user
489	 * space. Other wise the child and the parent will return to the correct
490	 * places.
491	 */
492	if ((unsigned long)(do_fork_return) >= (unsigned long)(-125)) {
493		/*
494		 * Error case. We need to restore the frame.
495		 */
496		regs->an[5] = old_a5;
497		regs->pc = old_a5;
498		regs->an[7] = old_sp;
499	}
500
501	return do_fork_return;
502}
503
504/*
505 * sys_clone()
506 *	creates a child thread.
507 */
508asmlinkage int sys_clone(unsigned long clone_flags,
509			 unsigned long newsp,
510			 struct pt_regs *regs)
511{
512	if (!newsp)
513		newsp = regs->an[7];
514	return do_fork(clone_flags, newsp, regs, 0,
515		       NULL, NULL);
516}
517
518/*
519 * copy_thread()
520 *	low level thread copy, only used by do_fork in kernel/fork.c
521 */
522int copy_thread(unsigned long clone_flags,
523		unsigned long usp, unsigned long topstk,
524		struct task_struct *p, struct pt_regs *regs)
525
526{
527	struct pt_regs *childregs;
528
529	childregs = (struct pt_regs *)
530		(task_stack_page(p) + THREAD_SIZE - 8) - 1;
531
532	*childregs = *regs;
533
534	/*
535	 * Set return value for child to be 0.
536	 */
537	childregs->dn[0] = 0;
538
539	if (usp)
540		childregs->an[7] = usp;
541	else
542		childregs->an[7] = (unsigned long)task_stack_page(p) +
543			THREAD_SIZE - 8;
544
545	/*
546	 * Set up the switch_to frame to return to "ret_from_fork"
547	 */
548	p->thread.a5 = (unsigned long)ret_from_fork;
549	p->thread.sp = (unsigned long)childregs;
550
551	return 0;
552}
553
554/*
555 * sys_execve()
556 *	executes a new program.
557 */
558asmlinkage int sys_execve(char *name, char **argv,
559			  char **envp, struct pt_regs *regs)
560{
561	int error;
562	char *filename;
563
564	lock_kernel();
565	filename = getname(name);
566	error = PTR_ERR(filename);
567	if (IS_ERR(filename))
568		goto out;
569	error = do_execve(filename, argv, envp, regs);
570	putname(filename);
571	asm ("       .global sys_execve_complete\n"
572	     "       sys_execve_complete:");
573out:
574	unlock_kernel();
575	return error;
576}
577
578/*
579 * Return saved PC of a blocked thread.
580 */
581unsigned long thread_saved_pc(struct task_struct *tsk)
582{
583	return tsk->thread.a5;
584}
585
586
587unsigned long get_wchan(struct task_struct *p)
588{
589	unsigned long pc;
590
591	/*
592	 * If we don't have a process, or it is not the current
593	 * one or not RUNNING, it makes no sense to ask for a
594	 * wchan.
595	 */
596	if (!p || p == current || p->state == TASK_RUNNING)
597		return 0;
598
599	/*
600	 * TODO: If the process is in the middle of schedule, we
601	 * are supposed to do something different but for now we
602	 * will return the same thing in both situations.
603	 */
604	pc = thread_saved_pc(p);
605	if (in_sched_functions(pc))
606		return pc;
607	return pc;
608}
609
610
611/*
612 * Infrequently used interface to dump task registers to core files.
613 */
614int dump_task_regs(struct task_struct *task, elf_gregset_t *elfregs)
615{
616	struct pt_regs *regs = task_pt_regs(task);
617	*(struct pt_regs *)elfregs = *regs;
618
619	return 1;
620}
621
622/*
623 * __switch_to is the function that implements the contex save and
624 * switch within the kernel. Since this is a function call very few
625 * registers have to be saved to pull this off. d0 holds prev and we
626 * want to preserve it. prev_switch is a pointer to task->thread
627 * structure. This is where we will save the register state. next_switch
628 * is pointer to the next task's thread structure that holds the
629 * registers.
630 */
631asmlinkage void *__switch_to(struct task_struct *prev,
632			     struct thread_struct *prev_switch,
633			     struct thread_struct *next_switch)
634	__attribute__((naked));
635