1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/capability.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/smp_lock.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/utsname.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/ipc.h>
29#include <asm/page.h>
30#include <asm/unistd.h>
31
32/*
33 * sys_pipe() is the normal C calling standard for creating
34 * a pipe. It's not the way unix traditionally does this, though.
35 */
36asmlinkage int sys_pipe(unsigned long __user * fildes)
37{
38	int fd[2];
39	int error;
40
41	error = do_pipe(fd);
42	if (!error) {
43		if (copy_to_user(fildes, fd, 2*sizeof(int)))
44			error = -EFAULT;
45	}
46	return error;
47}
48
49/* common code for old and new mmaps */
50static inline long do_mmap2(
51	unsigned long addr, unsigned long len,
52	unsigned long prot, unsigned long flags,
53	unsigned long fd, unsigned long pgoff)
54{
55	int error = -EBADF;
56	struct file * file = NULL;
57
58	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
59	if (!(flags & MAP_ANONYMOUS)) {
60		file = fget(fd);
61		if (!file)
62			goto out;
63	}
64
65	down_write(&current->mm->mmap_sem);
66	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
67	up_write(&current->mm->mmap_sem);
68
69	if (file)
70		fput(file);
71out:
72	return error;
73}
74
75asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
76	unsigned long prot, unsigned long flags,
77	unsigned long fd, unsigned long pgoff)
78{
79	return do_mmap2(addr, len, prot, flags, fd, pgoff);
80}
81
82/*
83 * Perform the select(nd, in, out, ex, tv) and mmap() system
84 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
85 * handle more than 4 system call parameters, so these system calls
86 * used a memory block for parameter passing..
87 */
88
89struct mmap_arg_struct {
90	unsigned long addr;
91	unsigned long len;
92	unsigned long prot;
93	unsigned long flags;
94	unsigned long fd;
95	unsigned long offset;
96};
97
98asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
99{
100	struct mmap_arg_struct a;
101	int error = -EFAULT;
102
103	if (copy_from_user(&a, arg, sizeof(a)))
104		goto out;
105
106	error = -EINVAL;
107	if (a.offset & ~PAGE_MASK)
108		goto out;
109
110	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
111
112	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
113out:
114	return error;
115}
116
117
118struct sel_arg_struct {
119	unsigned long n;
120	fd_set __user *inp, *outp, *exp;
121	struct timeval __user *tvp;
122};
123
124asmlinkage int old_select(struct sel_arg_struct __user *arg)
125{
126	struct sel_arg_struct a;
127
128	if (copy_from_user(&a, arg, sizeof(a)))
129		return -EFAULT;
130	/* sys_select() does the appropriate kernel locking */
131	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
132}
133
134/*
135 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
136 *
137 * This is really horribly ugly.
138 */
139asmlinkage int sys_ipc (uint call, int first, int second,
140			int third, void __user *ptr, long fifth)
141{
142	int version, ret;
143
144	version = call >> 16; /* hack for backward compatibility */
145	call &= 0xffff;
146
147	if (call <= SEMCTL)
148		switch (call) {
149		case SEMOP:
150			return sys_semop (first, ptr, second);
151		case SEMGET:
152			return sys_semget (first, second, third);
153		case SEMCTL: {
154			union semun fourth;
155			if (!ptr)
156				return -EINVAL;
157			if (get_user(fourth.__pad, (void __user *__user *) ptr))
158				return -EFAULT;
159			return sys_semctl (first, second, third, fourth);
160			}
161		default:
162			return -ENOSYS;
163		}
164	if (call <= MSGCTL)
165		switch (call) {
166		case MSGSND:
167			return sys_msgsnd (first, ptr, second, third);
168		case MSGRCV:
169			switch (version) {
170			case 0: {
171				struct ipc_kludge tmp;
172				if (!ptr)
173					return -EINVAL;
174				if (copy_from_user (&tmp, ptr, sizeof (tmp)))
175					return -EFAULT;
176				return sys_msgrcv (first, tmp.msgp, second,
177						   tmp.msgtyp, third);
178				}
179			default:
180				return sys_msgrcv (first, ptr,
181						   second, fifth, third);
182			}
183		case MSGGET:
184			return sys_msgget ((key_t) first, second);
185		case MSGCTL:
186			return sys_msgctl (first, second, ptr);
187		default:
188			return -ENOSYS;
189		}
190	if (call <= SHMCTL)
191		switch (call) {
192		case SHMAT:
193			switch (version) {
194			default: {
195				ulong raddr;
196				ret = do_shmat (first, ptr, second, &raddr);
197				if (ret)
198					return ret;
199				return put_user (raddr, (ulong __user *) third);
200			}
201			}
202		case SHMDT:
203			return sys_shmdt (ptr);
204		case SHMGET:
205			return sys_shmget (first, second, third);
206		case SHMCTL:
207			return sys_shmctl (first, second, ptr);
208		default:
209			return -ENOSYS;
210		}
211
212	return -EINVAL;
213}
214
215/* Convert virtual (user) address VADDR to physical address PADDR */
216#define virt_to_phys_040(vaddr)						\
217({									\
218  unsigned long _mmusr, _paddr;						\
219									\
220  __asm__ __volatile__ (".chip 68040\n\t"				\
221			"ptestr (%1)\n\t"				\
222			"movec %%mmusr,%0\n\t"				\
223			".chip 68k"					\
224			: "=r" (_mmusr)					\
225			: "a" (vaddr));					\
226  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
227  _paddr;								\
228})
229
230static inline int
231cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
232{
233  unsigned long paddr, i;
234
235  switch (scope)
236    {
237    case FLUSH_SCOPE_ALL:
238      switch (cache)
239	{
240	case FLUSH_CACHE_DATA:
241	  /* This nop is needed for some broken versions of the 68040.  */
242	  __asm__ __volatile__ ("nop\n\t"
243				".chip 68040\n\t"
244				"cpusha %dc\n\t"
245				".chip 68k");
246	  break;
247	case FLUSH_CACHE_INSN:
248	  __asm__ __volatile__ ("nop\n\t"
249				".chip 68040\n\t"
250				"cpusha %ic\n\t"
251				".chip 68k");
252	  break;
253	default:
254	case FLUSH_CACHE_BOTH:
255	  __asm__ __volatile__ ("nop\n\t"
256				".chip 68040\n\t"
257				"cpusha %bc\n\t"
258				".chip 68k");
259	  break;
260	}
261      break;
262
263    case FLUSH_SCOPE_LINE:
264      /* Find the physical address of the first mapped page in the
265	 address range.  */
266      if ((paddr = virt_to_phys_040(addr))) {
267        paddr += addr & ~(PAGE_MASK | 15);
268        len = (len + (addr & 15) + 15) >> 4;
269      } else {
270	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
271
272	if (len <= tmp)
273	  return 0;
274	addr += tmp;
275	len -= tmp;
276	tmp = PAGE_SIZE;
277	for (;;)
278	  {
279	    if ((paddr = virt_to_phys_040(addr)))
280	      break;
281	    if (len <= tmp)
282	      return 0;
283	    addr += tmp;
284	    len -= tmp;
285	  }
286	len = (len + 15) >> 4;
287      }
288      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
289      while (len--)
290	{
291	  switch (cache)
292	    {
293	    case FLUSH_CACHE_DATA:
294	      __asm__ __volatile__ ("nop\n\t"
295				    ".chip 68040\n\t"
296				    "cpushl %%dc,(%0)\n\t"
297				    ".chip 68k"
298				    : : "a" (paddr));
299	      break;
300	    case FLUSH_CACHE_INSN:
301	      __asm__ __volatile__ ("nop\n\t"
302				    ".chip 68040\n\t"
303				    "cpushl %%ic,(%0)\n\t"
304				    ".chip 68k"
305				    : : "a" (paddr));
306	      break;
307	    default:
308	    case FLUSH_CACHE_BOTH:
309	      __asm__ __volatile__ ("nop\n\t"
310				    ".chip 68040\n\t"
311				    "cpushl %%bc,(%0)\n\t"
312				    ".chip 68k"
313				    : : "a" (paddr));
314	      break;
315	    }
316	  if (!--i && len)
317	    {
318	      /*
319	       * No need to page align here since it is done by
320	       * virt_to_phys_040().
321	       */
322	      addr += PAGE_SIZE;
323	      i = PAGE_SIZE / 16;
324	      /* Recompute physical address when crossing a page
325	         boundary. */
326	      for (;;)
327		{
328		  if ((paddr = virt_to_phys_040(addr)))
329		    break;
330		  if (len <= i)
331		    return 0;
332		  len -= i;
333		  addr += PAGE_SIZE;
334		}
335	    }
336	  else
337	    paddr += 16;
338	}
339      break;
340
341    default:
342    case FLUSH_SCOPE_PAGE:
343      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
344      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
345	{
346	  if (!(paddr = virt_to_phys_040(addr)))
347	    continue;
348	  switch (cache)
349	    {
350	    case FLUSH_CACHE_DATA:
351	      __asm__ __volatile__ ("nop\n\t"
352				    ".chip 68040\n\t"
353				    "cpushp %%dc,(%0)\n\t"
354				    ".chip 68k"
355				    : : "a" (paddr));
356	      break;
357	    case FLUSH_CACHE_INSN:
358	      __asm__ __volatile__ ("nop\n\t"
359				    ".chip 68040\n\t"
360				    "cpushp %%ic,(%0)\n\t"
361				    ".chip 68k"
362				    : : "a" (paddr));
363	      break;
364	    default:
365	    case FLUSH_CACHE_BOTH:
366	      __asm__ __volatile__ ("nop\n\t"
367				    ".chip 68040\n\t"
368				    "cpushp %%bc,(%0)\n\t"
369				    ".chip 68k"
370				    : : "a" (paddr));
371	      break;
372	    }
373	}
374      break;
375    }
376  return 0;
377}
378
379#define virt_to_phys_060(vaddr)				\
380({							\
381  unsigned long paddr;					\
382  __asm__ __volatile__ (".chip 68060\n\t"		\
383			"plpar (%0)\n\t"		\
384			".chip 68k"			\
385			: "=a" (paddr)			\
386			: "0" (vaddr));			\
387  (paddr);					\
388})
389
390static inline int
391cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
392{
393  unsigned long paddr, i;
394
395  /*
396   * 68060 manual says:
397   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
398   *  cpush %ic : invalidate IC
399   *  cpush %bc : flush DC + invalidate IC
400   */
401  switch (scope)
402    {
403    case FLUSH_SCOPE_ALL:
404      switch (cache)
405	{
406	case FLUSH_CACHE_DATA:
407	  __asm__ __volatile__ (".chip 68060\n\t"
408				"cpusha %dc\n\t"
409				".chip 68k");
410	  break;
411	case FLUSH_CACHE_INSN:
412	  __asm__ __volatile__ (".chip 68060\n\t"
413				"cpusha %ic\n\t"
414				".chip 68k");
415	  break;
416	default:
417	case FLUSH_CACHE_BOTH:
418	  __asm__ __volatile__ (".chip 68060\n\t"
419				"cpusha %bc\n\t"
420				".chip 68k");
421	  break;
422	}
423      break;
424
425    case FLUSH_SCOPE_LINE:
426      /* Find the physical address of the first mapped page in the
427	 address range.  */
428      len += addr & 15;
429      addr &= -16;
430      if (!(paddr = virt_to_phys_060(addr))) {
431	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
432
433	if (len <= tmp)
434	  return 0;
435	addr += tmp;
436	len -= tmp;
437	tmp = PAGE_SIZE;
438	for (;;)
439	  {
440	    if ((paddr = virt_to_phys_060(addr)))
441	      break;
442	    if (len <= tmp)
443	      return 0;
444	    addr += tmp;
445	    len -= tmp;
446	  }
447      }
448      len = (len + 15) >> 4;
449      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
450      while (len--)
451	{
452	  switch (cache)
453	    {
454	    case FLUSH_CACHE_DATA:
455	      __asm__ __volatile__ (".chip 68060\n\t"
456				    "cpushl %%dc,(%0)\n\t"
457				    ".chip 68k"
458				    : : "a" (paddr));
459	      break;
460	    case FLUSH_CACHE_INSN:
461	      __asm__ __volatile__ (".chip 68060\n\t"
462				    "cpushl %%ic,(%0)\n\t"
463				    ".chip 68k"
464				    : : "a" (paddr));
465	      break;
466	    default:
467	    case FLUSH_CACHE_BOTH:
468	      __asm__ __volatile__ (".chip 68060\n\t"
469				    "cpushl %%bc,(%0)\n\t"
470				    ".chip 68k"
471				    : : "a" (paddr));
472	      break;
473	    }
474	  if (!--i && len)
475	    {
476
477	      /*
478	       * We just want to jump to the first cache line
479	       * in the next page.
480	       */
481	      addr += PAGE_SIZE;
482	      addr &= PAGE_MASK;
483
484	      i = PAGE_SIZE / 16;
485	      /* Recompute physical address when crossing a page
486	         boundary. */
487	      for (;;)
488	        {
489	          if ((paddr = virt_to_phys_060(addr)))
490	            break;
491	          if (len <= i)
492	            return 0;
493	          len -= i;
494	          addr += PAGE_SIZE;
495	        }
496	    }
497	  else
498	    paddr += 16;
499	}
500      break;
501
502    default:
503    case FLUSH_SCOPE_PAGE:
504      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
505      addr &= PAGE_MASK;
506      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
507	{
508	  if (!(paddr = virt_to_phys_060(addr)))
509	    continue;
510	  switch (cache)
511	    {
512	    case FLUSH_CACHE_DATA:
513	      __asm__ __volatile__ (".chip 68060\n\t"
514				    "cpushp %%dc,(%0)\n\t"
515				    ".chip 68k"
516				    : : "a" (paddr));
517	      break;
518	    case FLUSH_CACHE_INSN:
519	      __asm__ __volatile__ (".chip 68060\n\t"
520				    "cpushp %%ic,(%0)\n\t"
521				    ".chip 68k"
522				    : : "a" (paddr));
523	      break;
524	    default:
525	    case FLUSH_CACHE_BOTH:
526	      __asm__ __volatile__ (".chip 68060\n\t"
527				    "cpushp %%bc,(%0)\n\t"
528				    ".chip 68k"
529				    : : "a" (paddr));
530	      break;
531	    }
532	}
533      break;
534    }
535  return 0;
536}
537
538/* sys_cacheflush -- flush (part of) the processor cache.  */
539asmlinkage int
540sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
541{
542	struct vm_area_struct *vma;
543	int ret = -EINVAL;
544
545	lock_kernel();
546	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
547	    cache & ~FLUSH_CACHE_BOTH)
548		goto out;
549
550	if (scope == FLUSH_SCOPE_ALL) {
551		/* Only the superuser may explicitly flush the whole cache. */
552		ret = -EPERM;
553		if (!capable(CAP_SYS_ADMIN))
554			goto out;
555	} else {
556		/*
557		 * Verify that the specified address region actually belongs
558		 * to this process.
559		 */
560		vma = find_vma (current->mm, addr);
561		ret = -EINVAL;
562		/* Check for overflow.  */
563		if (addr + len < addr)
564			goto out;
565		if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
566			goto out;
567	}
568
569	if (CPU_IS_020_OR_030) {
570		if (scope == FLUSH_SCOPE_LINE && len < 256) {
571			unsigned long cacr;
572			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
573			if (cache & FLUSH_CACHE_INSN)
574				cacr |= 4;
575			if (cache & FLUSH_CACHE_DATA)
576				cacr |= 0x400;
577			len >>= 2;
578			while (len--) {
579				__asm__ __volatile__ ("movec %1, %%caar\n\t"
580						      "movec %0, %%cacr"
581						      : /* no outputs */
582						      : "r" (cacr), "r" (addr));
583				addr += 4;
584			}
585		} else {
586			/* Flush the whole cache, even if page granularity requested. */
587			unsigned long cacr;
588			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
589			if (cache & FLUSH_CACHE_INSN)
590				cacr |= 8;
591			if (cache & FLUSH_CACHE_DATA)
592				cacr |= 0x800;
593			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
594		}
595		ret = 0;
596		goto out;
597	} else {
598	    /*
599	     * 040 or 060: don't blindly trust 'scope', someone could
600	     * try to flush a few megs of memory.
601	     */
602
603	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
604	        scope=FLUSH_SCOPE_PAGE;
605	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
606	        scope=FLUSH_SCOPE_ALL;
607	    if (CPU_IS_040) {
608		ret = cache_flush_040 (addr, scope, cache, len);
609	    } else if (CPU_IS_060) {
610		ret = cache_flush_060 (addr, scope, cache, len);
611	    }
612	}
613out:
614	unlock_kernel();
615	return ret;
616}
617
618asmlinkage int sys_getpagesize(void)
619{
620	return PAGE_SIZE;
621}
622
623/*
624 * Do a system call from kernel instead of calling sys_execve so we
625 * end up with proper pt_regs.
626 */
627int kernel_execve(const char *filename, char *const argv[], char *const envp[])
628{
629	register long __res asm ("%d0") = __NR_execve;
630	register long __a asm ("%d1") = (long)(filename);
631	register long __b asm ("%d2") = (long)(argv);
632	register long __c asm ("%d3") = (long)(envp);
633	asm volatile ("trap  #0" : "+d" (__res)
634			: "d" (__a), "d" (__b), "d" (__c));
635	return __res;
636}
637