1/*
2 * linux/arch/m32r/kernel/sys_m32r.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/M32R platform.
6 *
7 * Taken from i386 version.
8 */
9
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/sem.h>
15#include <linux/msg.h>
16#include <linux/shm.h>
17#include <linux/stat.h>
18#include <linux/syscalls.h>
19#include <linux/mman.h>
20#include <linux/file.h>
21#include <linux/utsname.h>
22
23#include <asm/uaccess.h>
24#include <asm/cachectl.h>
25#include <asm/cacheflush.h>
26#include <asm/ipc.h>
27#include <asm/syscall.h>
28#include <asm/unistd.h>
29
30/*
31 * sys_tas() - test-and-set
32 */
33asmlinkage int sys_tas(int __user *addr)
34{
35	int oldval;
36
37	if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
38		return -EFAULT;
39
40	/* atomic operation:
41	 *   oldval = *addr; *addr = 1;
42	 */
43	__asm__ __volatile__ (
44		DCACHE_CLEAR("%0", "r4", "%1")
45		"	.fillinsn\n"
46		"1:\n"
47		"	lock	%0, @%1	    ->	unlock	%2, @%1\n"
48		"2:\n"
49		/* NOTE:
50		 *   The m32r processor can accept interrupts only
51		 *   at the 32-bit instruction boundary.
52		 *   So, in the above code, the "unlock" instruction
53		 *   can be executed continuously after the "lock"
54		 *   instruction execution without any interruptions.
55		 */
56		".section .fixup,\"ax\"\n"
57		"	.balign 4\n"
58		"3:	ldi	%0, #%3\n"
59		"	seth	r14, #high(2b)\n"
60		"	or3	r14, r14, #low(2b)\n"
61		"	jmp	r14\n"
62		".previous\n"
63		".section __ex_table,\"a\"\n"
64		"	.balign 4\n"
65		"	.long 1b,3b\n"
66		".previous\n"
67		: "=&r" (oldval)
68		: "r" (addr), "r" (1), "i"(-EFAULT)
69		: "r14", "memory"
70#ifdef CONFIG_CHIP_M32700_TS1
71		  , "r4"
72#endif /* CONFIG_CHIP_M32700_TS1 */
73	);
74
75	return oldval;
76}
77
78/*
79 * sys_pipe() is the normal C calling standard for creating
80 * a pipe. It's not the way Unix traditionally does this, though.
81 */
82asmlinkage int
83sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2,
84	unsigned long r3, unsigned long r4, unsigned long r5,
85	unsigned long r6, struct pt_regs regs)
86{
87	int fd[2];
88	int error;
89
90	error = do_pipe(fd);
91	if (!error) {
92		if (copy_to_user((void __user *)r0, fd, 2*sizeof(int)))
93			error = -EFAULT;
94	}
95	return error;
96}
97
98asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
99	unsigned long prot, unsigned long flags,
100	unsigned long fd, unsigned long pgoff)
101{
102	int error = -EBADF;
103	struct file *file = NULL;
104
105	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
106	if (!(flags & MAP_ANONYMOUS)) {
107		file = fget(fd);
108		if (!file)
109			goto out;
110	}
111
112	down_write(&current->mm->mmap_sem);
113	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
114	up_write(&current->mm->mmap_sem);
115
116	if (file)
117		fput(file);
118out:
119	return error;
120}
121
122/*
123 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
124 *
125 * This is really horribly ugly.
126 */
127asmlinkage int sys_ipc(uint call, int first, int second,
128		       int third, void __user *ptr, long fifth)
129{
130	int version, ret;
131
132	version = call >> 16; /* hack for backward compatibility */
133	call &= 0xffff;
134
135	switch (call) {
136	case SEMOP:
137		return sys_semtimedop(first, (struct sembuf __user *)ptr,
138				      second, NULL);
139	case SEMTIMEDOP:
140		return sys_semtimedop(first, (struct sembuf __user *)ptr,
141				      second, (const struct timespec __user *)fifth);
142	case SEMGET:
143		return sys_semget (first, second, third);
144	case SEMCTL: {
145		union semun fourth;
146		if (!ptr)
147			return -EINVAL;
148		if (get_user(fourth.__pad, (void __user * __user *) ptr))
149			return -EFAULT;
150		return sys_semctl (first, second, third, fourth);
151		}
152
153	case MSGSND:
154		return sys_msgsnd (first, (struct msgbuf __user *) ptr,
155				   second, third);
156	case MSGRCV:
157		switch (version) {
158		case 0: {
159			struct ipc_kludge tmp;
160			if (!ptr)
161				return -EINVAL;
162
163			if (copy_from_user(&tmp,
164					   (struct ipc_kludge __user *) ptr,
165					   sizeof (tmp)))
166				return -EFAULT;
167			return sys_msgrcv (first, tmp.msgp, second,
168					   tmp.msgtyp, third);
169			}
170		default:
171			return sys_msgrcv (first,
172					   (struct msgbuf __user *) ptr,
173					   second, fifth, third);
174		}
175	case MSGGET:
176		return sys_msgget ((key_t) first, second);
177	case MSGCTL:
178		return sys_msgctl (first, second,
179				   (struct msqid_ds __user *) ptr);
180	case SHMAT: {
181		ulong raddr;
182
183		if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
184				      sizeof(ulong)))
185			return -EFAULT;
186		ret = do_shmat (first, (char __user *) ptr, second, &raddr);
187		if (ret)
188			return ret;
189		return put_user (raddr, (ulong __user *) third);
190		}
191	case SHMDT:
192		return sys_shmdt ((char __user *)ptr);
193	case SHMGET:
194		return sys_shmget (first, second, third);
195	case SHMCTL:
196		return sys_shmctl (first, second,
197				   (struct shmid_ds __user *) ptr);
198	default:
199		return -ENOSYS;
200	}
201}
202
203asmlinkage int sys_uname(struct old_utsname __user * name)
204{
205	int err;
206	if (!name)
207		return -EFAULT;
208	down_read(&uts_sem);
209	err = copy_to_user(name, utsname(), sizeof (*name));
210	up_read(&uts_sem);
211	return err?-EFAULT:0;
212}
213
214asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
215{
216	/* This should flush more selectivly ...  */
217	_flush_cache_all();
218	return 0;
219}
220
221asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
222{
223	/* Not implemented yet. */
224	return -ENOSYS;
225}
226
227/*
228 * Do a system call from kernel instead of calling sys_execve so we
229 * end up with proper pt_regs.
230 */
231int kernel_execve(const char *filename, char *const argv[], char *const envp[])
232{
233	register long __scno __asm__ ("r7") = __NR_execve;
234	register long __arg3 __asm__ ("r2") = (long)(envp);
235	register long __arg2 __asm__ ("r1") = (long)(argv);
236	register long __res __asm__ ("r0") = (long)(filename);
237	__asm__ __volatile__ (
238		"trap #" SYSCALL_VECTOR "|| nop"
239		: "=r" (__res)
240		: "r" (__scno), "0" (__res), "r" (__arg2),
241			"r" (__arg3)
242		: "memory");
243	return __res;
244}
245