1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/kernel/sys_m68k.c
4 *
5 * This file contains various random system calls that
6 * have a non-standard calling sequence on the Linux/m68k
7 * platform.
8 */
9
10#include <linux/capability.h>
11#include <linux/errno.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/fs.h>
15#include <linux/smp.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/syscalls.h>
21#include <linux/mman.h>
22#include <linux/file.h>
23#include <linux/ipc.h>
24
25#include <asm/setup.h>
26#include <linux/uaccess.h>
27#include <asm/cachectl.h>
28#include <asm/traps.h>
29#include <asm/page.h>
30#include <asm/syscalls.h>
31#include <asm/unistd.h>
32#include <asm/cacheflush.h>
33
34#ifdef CONFIG_MMU
35
36#include <asm/tlb.h>
37
38#include "../mm/fault.h"
39
40asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
41	unsigned long prot, unsigned long flags,
42	unsigned long fd, unsigned long pgoff)
43{
44	/*
45	 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
46	 * so we need to shift the argument down by 1; m68k mmap64(3)
47	 * (in libc) expects the last argument of mmap2 in 4Kb units.
48	 */
49	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
50}
51
52/* Convert virtual (user) address VADDR to physical address PADDR */
53#define virt_to_phys_040(vaddr)						\
54({									\
55  unsigned long _mmusr, _paddr;						\
56									\
57  __asm__ __volatile__ (".chip 68040\n\t"				\
58			"ptestr (%1)\n\t"				\
59			"movec %%mmusr,%0\n\t"				\
60			".chip 68k"					\
61			: "=r" (_mmusr)					\
62			: "a" (vaddr));					\
63  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
64  _paddr;								\
65})
66
67static inline int
68cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
69{
70  unsigned long paddr, i;
71
72  switch (scope)
73    {
74    case FLUSH_SCOPE_ALL:
75      switch (cache)
76	{
77	case FLUSH_CACHE_DATA:
78	  /* This nop is needed for some broken versions of the 68040.  */
79	  __asm__ __volatile__ ("nop\n\t"
80				".chip 68040\n\t"
81				"cpusha %dc\n\t"
82				".chip 68k");
83	  break;
84	case FLUSH_CACHE_INSN:
85	  __asm__ __volatile__ ("nop\n\t"
86				".chip 68040\n\t"
87				"cpusha %ic\n\t"
88				".chip 68k");
89	  break;
90	default:
91	case FLUSH_CACHE_BOTH:
92	  __asm__ __volatile__ ("nop\n\t"
93				".chip 68040\n\t"
94				"cpusha %bc\n\t"
95				".chip 68k");
96	  break;
97	}
98      break;
99
100    case FLUSH_SCOPE_LINE:
101      /* Find the physical address of the first mapped page in the
102	 address range.  */
103      if ((paddr = virt_to_phys_040(addr))) {
104        paddr += addr & ~(PAGE_MASK | 15);
105        len = (len + (addr & 15) + 15) >> 4;
106      } else {
107	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
108
109	if (len <= tmp)
110	  return 0;
111	addr += tmp;
112	len -= tmp;
113	tmp = PAGE_SIZE;
114	for (;;)
115	  {
116	    if ((paddr = virt_to_phys_040(addr)))
117	      break;
118	    if (len <= tmp)
119	      return 0;
120	    addr += tmp;
121	    len -= tmp;
122	  }
123	len = (len + 15) >> 4;
124      }
125      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
126      while (len--)
127	{
128	  switch (cache)
129	    {
130	    case FLUSH_CACHE_DATA:
131	      __asm__ __volatile__ ("nop\n\t"
132				    ".chip 68040\n\t"
133				    "cpushl %%dc,(%0)\n\t"
134				    ".chip 68k"
135				    : : "a" (paddr));
136	      break;
137	    case FLUSH_CACHE_INSN:
138	      __asm__ __volatile__ ("nop\n\t"
139				    ".chip 68040\n\t"
140				    "cpushl %%ic,(%0)\n\t"
141				    ".chip 68k"
142				    : : "a" (paddr));
143	      break;
144	    default:
145	    case FLUSH_CACHE_BOTH:
146	      __asm__ __volatile__ ("nop\n\t"
147				    ".chip 68040\n\t"
148				    "cpushl %%bc,(%0)\n\t"
149				    ".chip 68k"
150				    : : "a" (paddr));
151	      break;
152	    }
153	  if (!--i && len)
154	    {
155	      /*
156	       * No need to page align here since it is done by
157	       * virt_to_phys_040().
158	       */
159	      addr += PAGE_SIZE;
160	      i = PAGE_SIZE / 16;
161	      /* Recompute physical address when crossing a page
162	         boundary. */
163	      for (;;)
164		{
165		  if ((paddr = virt_to_phys_040(addr)))
166		    break;
167		  if (len <= i)
168		    return 0;
169		  len -= i;
170		  addr += PAGE_SIZE;
171		}
172	    }
173	  else
174	    paddr += 16;
175	}
176      break;
177
178    default:
179    case FLUSH_SCOPE_PAGE:
180      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
181      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
182	{
183	  if (!(paddr = virt_to_phys_040(addr)))
184	    continue;
185	  switch (cache)
186	    {
187	    case FLUSH_CACHE_DATA:
188	      __asm__ __volatile__ ("nop\n\t"
189				    ".chip 68040\n\t"
190				    "cpushp %%dc,(%0)\n\t"
191				    ".chip 68k"
192				    : : "a" (paddr));
193	      break;
194	    case FLUSH_CACHE_INSN:
195	      __asm__ __volatile__ ("nop\n\t"
196				    ".chip 68040\n\t"
197				    "cpushp %%ic,(%0)\n\t"
198				    ".chip 68k"
199				    : : "a" (paddr));
200	      break;
201	    default:
202	    case FLUSH_CACHE_BOTH:
203	      __asm__ __volatile__ ("nop\n\t"
204				    ".chip 68040\n\t"
205				    "cpushp %%bc,(%0)\n\t"
206				    ".chip 68k"
207				    : : "a" (paddr));
208	      break;
209	    }
210	}
211      break;
212    }
213  return 0;
214}
215
216#define virt_to_phys_060(vaddr)				\
217({							\
218  unsigned long paddr;					\
219  __asm__ __volatile__ (".chip 68060\n\t"		\
220			"plpar (%0)\n\t"		\
221			".chip 68k"			\
222			: "=a" (paddr)			\
223			: "0" (vaddr));			\
224  (paddr); /* XXX */					\
225})
226
227static inline int
228cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
229{
230  unsigned long paddr, i;
231
232  /*
233   * 68060 manual says:
234   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
235   *  cpush %ic : invalidate IC
236   *  cpush %bc : flush DC + invalidate IC
237   */
238  switch (scope)
239    {
240    case FLUSH_SCOPE_ALL:
241      switch (cache)
242	{
243	case FLUSH_CACHE_DATA:
244	  __asm__ __volatile__ (".chip 68060\n\t"
245				"cpusha %dc\n\t"
246				".chip 68k");
247	  break;
248	case FLUSH_CACHE_INSN:
249	  __asm__ __volatile__ (".chip 68060\n\t"
250				"cpusha %ic\n\t"
251				".chip 68k");
252	  break;
253	default:
254	case FLUSH_CACHE_BOTH:
255	  __asm__ __volatile__ (".chip 68060\n\t"
256				"cpusha %bc\n\t"
257				".chip 68k");
258	  break;
259	}
260      break;
261
262    case FLUSH_SCOPE_LINE:
263      /* Find the physical address of the first mapped page in the
264	 address range.  */
265      len += addr & 15;
266      addr &= -16;
267      if (!(paddr = virt_to_phys_060(addr))) {
268	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
269
270	if (len <= tmp)
271	  return 0;
272	addr += tmp;
273	len -= tmp;
274	tmp = PAGE_SIZE;
275	for (;;)
276	  {
277	    if ((paddr = virt_to_phys_060(addr)))
278	      break;
279	    if (len <= tmp)
280	      return 0;
281	    addr += tmp;
282	    len -= tmp;
283	  }
284      }
285      len = (len + 15) >> 4;
286      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
287      while (len--)
288	{
289	  switch (cache)
290	    {
291	    case FLUSH_CACHE_DATA:
292	      __asm__ __volatile__ (".chip 68060\n\t"
293				    "cpushl %%dc,(%0)\n\t"
294				    ".chip 68k"
295				    : : "a" (paddr));
296	      break;
297	    case FLUSH_CACHE_INSN:
298	      __asm__ __volatile__ (".chip 68060\n\t"
299				    "cpushl %%ic,(%0)\n\t"
300				    ".chip 68k"
301				    : : "a" (paddr));
302	      break;
303	    default:
304	    case FLUSH_CACHE_BOTH:
305	      __asm__ __volatile__ (".chip 68060\n\t"
306				    "cpushl %%bc,(%0)\n\t"
307				    ".chip 68k"
308				    : : "a" (paddr));
309	      break;
310	    }
311	  if (!--i && len)
312	    {
313
314	      /*
315	       * We just want to jump to the first cache line
316	       * in the next page.
317	       */
318	      addr += PAGE_SIZE;
319	      addr &= PAGE_MASK;
320
321	      i = PAGE_SIZE / 16;
322	      /* Recompute physical address when crossing a page
323	         boundary. */
324	      for (;;)
325	        {
326	          if ((paddr = virt_to_phys_060(addr)))
327	            break;
328	          if (len <= i)
329	            return 0;
330	          len -= i;
331	          addr += PAGE_SIZE;
332	        }
333	    }
334	  else
335	    paddr += 16;
336	}
337      break;
338
339    default:
340    case FLUSH_SCOPE_PAGE:
341      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
342      addr &= PAGE_MASK;	/* Workaround for bug in some
343				   revisions of the 68060 */
344      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
345	{
346	  if (!(paddr = virt_to_phys_060(addr)))
347	    continue;
348	  switch (cache)
349	    {
350	    case FLUSH_CACHE_DATA:
351	      __asm__ __volatile__ (".chip 68060\n\t"
352				    "cpushp %%dc,(%0)\n\t"
353				    ".chip 68k"
354				    : : "a" (paddr));
355	      break;
356	    case FLUSH_CACHE_INSN:
357	      __asm__ __volatile__ (".chip 68060\n\t"
358				    "cpushp %%ic,(%0)\n\t"
359				    ".chip 68k"
360				    : : "a" (paddr));
361	      break;
362	    default:
363	    case FLUSH_CACHE_BOTH:
364	      __asm__ __volatile__ (".chip 68060\n\t"
365				    "cpushp %%bc,(%0)\n\t"
366				    ".chip 68k"
367				    : : "a" (paddr));
368	      break;
369	    }
370	}
371      break;
372    }
373  return 0;
374}
375
376/* sys_cacheflush -- flush (part of) the processor cache.  */
377asmlinkage int
378sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
379{
380	int ret = -EINVAL;
381
382	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
383	    cache & ~FLUSH_CACHE_BOTH)
384		goto out;
385
386	if (scope == FLUSH_SCOPE_ALL) {
387		/* Only the superuser may explicitly flush the whole cache. */
388		ret = -EPERM;
389		if (!capable(CAP_SYS_ADMIN))
390			goto out;
391
392		mmap_read_lock(current->mm);
393	} else {
394		struct vm_area_struct *vma;
395
396		/* Check for overflow.  */
397		if (addr + len < addr)
398			goto out;
399
400		/*
401		 * Verify that the specified address region actually belongs
402		 * to this process.
403		 */
404		mmap_read_lock(current->mm);
405		vma = vma_lookup(current->mm, addr);
406		if (!vma || addr + len > vma->vm_end)
407			goto out_unlock;
408	}
409
410	if (CPU_IS_020_OR_030) {
411		if (scope == FLUSH_SCOPE_LINE && len < 256) {
412			unsigned long cacr;
413			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
414			if (cache & FLUSH_CACHE_INSN)
415				cacr |= 4;
416			if (cache & FLUSH_CACHE_DATA)
417				cacr |= 0x400;
418			len >>= 2;
419			while (len--) {
420				__asm__ __volatile__ ("movec %1, %%caar\n\t"
421						      "movec %0, %%cacr"
422						      : /* no outputs */
423						      : "r" (cacr), "r" (addr));
424				addr += 4;
425			}
426		} else {
427			/* Flush the whole cache, even if page granularity requested. */
428			unsigned long cacr;
429			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
430			if (cache & FLUSH_CACHE_INSN)
431				cacr |= 8;
432			if (cache & FLUSH_CACHE_DATA)
433				cacr |= 0x800;
434			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
435		}
436		ret = 0;
437		goto out_unlock;
438	} else {
439	    /*
440	     * 040 or 060: don't blindly trust 'scope', someone could
441	     * try to flush a few megs of memory.
442	     */
443
444	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
445	        scope=FLUSH_SCOPE_PAGE;
446	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
447	        scope=FLUSH_SCOPE_ALL;
448	    if (CPU_IS_040) {
449		ret = cache_flush_040 (addr, scope, cache, len);
450	    } else if (CPU_IS_060) {
451		ret = cache_flush_060 (addr, scope, cache, len);
452	    }
453	}
454out_unlock:
455	mmap_read_unlock(current->mm);
456out:
457	return ret;
458}
459
460/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
461   D1 (newval).  */
462asmlinkage int
463sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
464		      unsigned long __user * mem)
465{
466	/* This was borrowed from ARM's implementation.  */
467	for (;;) {
468		struct mm_struct *mm = current->mm;
469		pgd_t *pgd;
470		p4d_t *p4d;
471		pud_t *pud;
472		pmd_t *pmd;
473		pte_t *pte;
474		spinlock_t *ptl;
475		unsigned long mem_value;
476
477		mmap_read_lock(mm);
478		pgd = pgd_offset(mm, (unsigned long)mem);
479		if (!pgd_present(*pgd))
480			goto bad_access;
481		p4d = p4d_offset(pgd, (unsigned long)mem);
482		if (!p4d_present(*p4d))
483			goto bad_access;
484		pud = pud_offset(p4d, (unsigned long)mem);
485		if (!pud_present(*pud))
486			goto bad_access;
487		pmd = pmd_offset(pud, (unsigned long)mem);
488		if (!pmd_present(*pmd))
489			goto bad_access;
490		pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
491		if (!pte)
492			goto bad_access;
493		if (!pte_present(*pte) || !pte_dirty(*pte)
494		    || !pte_write(*pte)) {
495			pte_unmap_unlock(pte, ptl);
496			goto bad_access;
497		}
498
499		/*
500		 * No need to check for EFAULT; we know that the page is
501		 * present and writable.
502		 */
503		__get_user(mem_value, mem);
504		if (mem_value == oldval)
505			__put_user(newval, mem);
506
507		pte_unmap_unlock(pte, ptl);
508		mmap_read_unlock(mm);
509		return mem_value;
510
511	      bad_access:
512		mmap_read_unlock(mm);
513		/* This is not necessarily a bad access, we can get here if
514		   a memory we're trying to write to should be copied-on-write.
515		   Make the kernel do the necessary page stuff, then re-iterate.
516		   Simulate a write access fault to do that.  */
517		{
518			/* The first argument of the function corresponds to
519			   D1, which is the first field of struct pt_regs.  */
520			struct pt_regs *fp = (struct pt_regs *)&newval;
521
522			/* '3' is an RMW flag.  */
523			if (do_page_fault(fp, (unsigned long)mem, 3))
524				/* If the do_page_fault() failed, we don't
525				   have anything meaningful to return.
526				   There should be a SIGSEGV pending for
527				   the process.  */
528				return 0xdeadbeef;
529		}
530	}
531}
532
533#else
534
535/* sys_cacheflush -- flush (part of) the processor cache.  */
536asmlinkage int
537sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
538{
539	flush_cache_all();
540	return 0;
541}
542
543/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
544   D1 (newval).  */
545asmlinkage int
546sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
547		      unsigned long __user * mem)
548{
549	struct mm_struct *mm = current->mm;
550	unsigned long mem_value;
551
552	mmap_read_lock(mm);
553
554	mem_value = *mem;
555	if (mem_value == oldval)
556		*mem = newval;
557
558	mmap_read_unlock(mm);
559	return mem_value;
560}
561
562#endif /* CONFIG_MMU */
563
564asmlinkage int sys_getpagesize(void)
565{
566	return PAGE_SIZE;
567}
568
569asmlinkage unsigned long sys_get_thread_area(void)
570{
571	return current_thread_info()->tp_value;
572}
573
574asmlinkage int sys_set_thread_area(unsigned long tp)
575{
576	current_thread_info()->tp_value = tp;
577	return 0;
578}
579
580asmlinkage int sys_atomic_barrier(void)
581{
582	/* no code needed for uniprocs */
583	return 0;
584}
585