1/* 2 * linux/arch/m68k/kernel/sys_m68k.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/m68k 6 * platform. 7 */ 8 9#include <linux/capability.h> 10#include <linux/errno.h> 11#include <linux/sched.h> 12#include <linux/mm.h> 13#include <linux/fs.h> 14#include <linux/smp.h> 15#include <linux/smp_lock.h> 16#include <linux/sem.h> 17#include <linux/msg.h> 18#include <linux/shm.h> 19#include <linux/stat.h> 20#include <linux/syscalls.h> 21#include <linux/mman.h> 22#include <linux/file.h> 23#include <linux/ipc.h> 24 25#include <asm/setup.h> 26#include <asm/uaccess.h> 27#include <asm/cachectl.h> 28#include <asm/traps.h> 29#include <asm/page.h> 30#include <asm/unistd.h> 31#include <linux/elf.h> 32#include <asm/tlb.h> 33 34asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, 35 unsigned long error_code); 36 37asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 38 unsigned long prot, unsigned long flags, 39 unsigned long fd, unsigned long pgoff) 40{ 41 /* 42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb, 43 * so we need to shift the argument down by 1; m68k mmap64(3) 44 * (in libc) expects the last argument of mmap2 in 4Kb units. 45 */ 46 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 47} 48 49/* Convert virtual (user) address VADDR to physical address PADDR */ 50#define virt_to_phys_040(vaddr) \ 51({ \ 52 unsigned long _mmusr, _paddr; \ 53 \ 54 __asm__ __volatile__ (".chip 68040\n\t" \ 55 "ptestr (%1)\n\t" \ 56 "movec %%mmusr,%0\n\t" \ 57 ".chip 68k" \ 58 : "=r" (_mmusr) \ 59 : "a" (vaddr)); \ 60 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 61 _paddr; \ 62}) 63 64static inline int 65cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) 66{ 67 unsigned long paddr, i; 68 69 switch (scope) 70 { 71 case FLUSH_SCOPE_ALL: 72 switch (cache) 73 { 74 case FLUSH_CACHE_DATA: 75 /* This nop is needed for some broken versions of the 68040. */ 76 __asm__ __volatile__ ("nop\n\t" 77 ".chip 68040\n\t" 78 "cpusha %dc\n\t" 79 ".chip 68k"); 80 break; 81 case FLUSH_CACHE_INSN: 82 __asm__ __volatile__ ("nop\n\t" 83 ".chip 68040\n\t" 84 "cpusha %ic\n\t" 85 ".chip 68k"); 86 break; 87 default: 88 case FLUSH_CACHE_BOTH: 89 __asm__ __volatile__ ("nop\n\t" 90 ".chip 68040\n\t" 91 "cpusha %bc\n\t" 92 ".chip 68k"); 93 break; 94 } 95 break; 96 97 case FLUSH_SCOPE_LINE: 98 /* Find the physical address of the first mapped page in the 99 address range. */ 100 if ((paddr = virt_to_phys_040(addr))) { 101 paddr += addr & ~(PAGE_MASK | 15); 102 len = (len + (addr & 15) + 15) >> 4; 103 } else { 104 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 105 106 if (len <= tmp) 107 return 0; 108 addr += tmp; 109 len -= tmp; 110 tmp = PAGE_SIZE; 111 for (;;) 112 { 113 if ((paddr = virt_to_phys_040(addr))) 114 break; 115 if (len <= tmp) 116 return 0; 117 addr += tmp; 118 len -= tmp; 119 } 120 len = (len + 15) >> 4; 121 } 122 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 123 while (len--) 124 { 125 switch (cache) 126 { 127 case FLUSH_CACHE_DATA: 128 __asm__ __volatile__ ("nop\n\t" 129 ".chip 68040\n\t" 130 "cpushl %%dc,(%0)\n\t" 131 ".chip 68k" 132 : : "a" (paddr)); 133 break; 134 case FLUSH_CACHE_INSN: 135 __asm__ __volatile__ ("nop\n\t" 136 ".chip 68040\n\t" 137 "cpushl %%ic,(%0)\n\t" 138 ".chip 68k" 139 : : "a" (paddr)); 140 break; 141 default: 142 case FLUSH_CACHE_BOTH: 143 __asm__ __volatile__ ("nop\n\t" 144 ".chip 68040\n\t" 145 "cpushl %%bc,(%0)\n\t" 146 ".chip 68k" 147 : : "a" (paddr)); 148 break; 149 } 150 if (!--i && len) 151 { 152 /* 153 * No need to page align here since it is done by 154 * virt_to_phys_040(). 155 */ 156 addr += PAGE_SIZE; 157 i = PAGE_SIZE / 16; 158 /* Recompute physical address when crossing a page 159 boundary. */ 160 for (;;) 161 { 162 if ((paddr = virt_to_phys_040(addr))) 163 break; 164 if (len <= i) 165 return 0; 166 len -= i; 167 addr += PAGE_SIZE; 168 } 169 } 170 else 171 paddr += 16; 172 } 173 break; 174 175 default: 176 case FLUSH_SCOPE_PAGE: 177 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 178 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 179 { 180 if (!(paddr = virt_to_phys_040(addr))) 181 continue; 182 switch (cache) 183 { 184 case FLUSH_CACHE_DATA: 185 __asm__ __volatile__ ("nop\n\t" 186 ".chip 68040\n\t" 187 "cpushp %%dc,(%0)\n\t" 188 ".chip 68k" 189 : : "a" (paddr)); 190 break; 191 case FLUSH_CACHE_INSN: 192 __asm__ __volatile__ ("nop\n\t" 193 ".chip 68040\n\t" 194 "cpushp %%ic,(%0)\n\t" 195 ".chip 68k" 196 : : "a" (paddr)); 197 break; 198 default: 199 case FLUSH_CACHE_BOTH: 200 __asm__ __volatile__ ("nop\n\t" 201 ".chip 68040\n\t" 202 "cpushp %%bc,(%0)\n\t" 203 ".chip 68k" 204 : : "a" (paddr)); 205 break; 206 } 207 } 208 break; 209 } 210 return 0; 211} 212 213#define virt_to_phys_060(vaddr) \ 214({ \ 215 unsigned long paddr; \ 216 __asm__ __volatile__ (".chip 68060\n\t" \ 217 "plpar (%0)\n\t" \ 218 ".chip 68k" \ 219 : "=a" (paddr) \ 220 : "0" (vaddr)); \ 221 (paddr); \ 222}) 223 224static inline int 225cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) 226{ 227 unsigned long paddr, i; 228 229 /* 230 * 68060 manual says: 231 * cpush %dc : flush DC, remains valid (with our %cacr setup) 232 * cpush %ic : invalidate IC 233 * cpush %bc : flush DC + invalidate IC 234 */ 235 switch (scope) 236 { 237 case FLUSH_SCOPE_ALL: 238 switch (cache) 239 { 240 case FLUSH_CACHE_DATA: 241 __asm__ __volatile__ (".chip 68060\n\t" 242 "cpusha %dc\n\t" 243 ".chip 68k"); 244 break; 245 case FLUSH_CACHE_INSN: 246 __asm__ __volatile__ (".chip 68060\n\t" 247 "cpusha %ic\n\t" 248 ".chip 68k"); 249 break; 250 default: 251 case FLUSH_CACHE_BOTH: 252 __asm__ __volatile__ (".chip 68060\n\t" 253 "cpusha %bc\n\t" 254 ".chip 68k"); 255 break; 256 } 257 break; 258 259 case FLUSH_SCOPE_LINE: 260 /* Find the physical address of the first mapped page in the 261 address range. */ 262 len += addr & 15; 263 addr &= -16; 264 if (!(paddr = virt_to_phys_060(addr))) { 265 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 266 267 if (len <= tmp) 268 return 0; 269 addr += tmp; 270 len -= tmp; 271 tmp = PAGE_SIZE; 272 for (;;) 273 { 274 if ((paddr = virt_to_phys_060(addr))) 275 break; 276 if (len <= tmp) 277 return 0; 278 addr += tmp; 279 len -= tmp; 280 } 281 } 282 len = (len + 15) >> 4; 283 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 284 while (len--) 285 { 286 switch (cache) 287 { 288 case FLUSH_CACHE_DATA: 289 __asm__ __volatile__ (".chip 68060\n\t" 290 "cpushl %%dc,(%0)\n\t" 291 ".chip 68k" 292 : : "a" (paddr)); 293 break; 294 case FLUSH_CACHE_INSN: 295 __asm__ __volatile__ (".chip 68060\n\t" 296 "cpushl %%ic,(%0)\n\t" 297 ".chip 68k" 298 : : "a" (paddr)); 299 break; 300 default: 301 case FLUSH_CACHE_BOTH: 302 __asm__ __volatile__ (".chip 68060\n\t" 303 "cpushl %%bc,(%0)\n\t" 304 ".chip 68k" 305 : : "a" (paddr)); 306 break; 307 } 308 if (!--i && len) 309 { 310 311 /* 312 * We just want to jump to the first cache line 313 * in the next page. 314 */ 315 addr += PAGE_SIZE; 316 addr &= PAGE_MASK; 317 318 i = PAGE_SIZE / 16; 319 /* Recompute physical address when crossing a page 320 boundary. */ 321 for (;;) 322 { 323 if ((paddr = virt_to_phys_060(addr))) 324 break; 325 if (len <= i) 326 return 0; 327 len -= i; 328 addr += PAGE_SIZE; 329 } 330 } 331 else 332 paddr += 16; 333 } 334 break; 335 336 default: 337 case FLUSH_SCOPE_PAGE: 338 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 339 addr &= PAGE_MASK; 340 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 341 { 342 if (!(paddr = virt_to_phys_060(addr))) 343 continue; 344 switch (cache) 345 { 346 case FLUSH_CACHE_DATA: 347 __asm__ __volatile__ (".chip 68060\n\t" 348 "cpushp %%dc,(%0)\n\t" 349 ".chip 68k" 350 : : "a" (paddr)); 351 break; 352 case FLUSH_CACHE_INSN: 353 __asm__ __volatile__ (".chip 68060\n\t" 354 "cpushp %%ic,(%0)\n\t" 355 ".chip 68k" 356 : : "a" (paddr)); 357 break; 358 default: 359 case FLUSH_CACHE_BOTH: 360 __asm__ __volatile__ (".chip 68060\n\t" 361 "cpushp %%bc,(%0)\n\t" 362 ".chip 68k" 363 : : "a" (paddr)); 364 break; 365 } 366 } 367 break; 368 } 369 return 0; 370} 371 372/* sys_cacheflush -- flush (part of) the processor cache. */ 373asmlinkage int 374sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) 375{ 376 struct vm_area_struct *vma; 377 int ret = -EINVAL; 378 379 lock_kernel(); 380 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || 381 cache & ~FLUSH_CACHE_BOTH) 382 goto out; 383 384 if (scope == FLUSH_SCOPE_ALL) { 385 /* Only the superuser may explicitly flush the whole cache. */ 386 ret = -EPERM; 387 if (!capable(CAP_SYS_ADMIN)) 388 goto out; 389 } else { 390 /* 391 * Verify that the specified address region actually belongs 392 * to this process. 393 */ 394 vma = find_vma (current->mm, addr); 395 ret = -EINVAL; 396 /* Check for overflow. */ 397 if (addr + len < addr) 398 goto out; 399 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) 400 goto out; 401 } 402 403 if (CPU_IS_020_OR_030) { 404 if (scope == FLUSH_SCOPE_LINE && len < 256) { 405 unsigned long cacr; 406 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 407 if (cache & FLUSH_CACHE_INSN) 408 cacr |= 4; 409 if (cache & FLUSH_CACHE_DATA) 410 cacr |= 0x400; 411 len >>= 2; 412 while (len--) { 413 __asm__ __volatile__ ("movec %1, %%caar\n\t" 414 "movec %0, %%cacr" 415 : /* no outputs */ 416 : "r" (cacr), "r" (addr)); 417 addr += 4; 418 } 419 } else { 420 /* Flush the whole cache, even if page granularity requested. */ 421 unsigned long cacr; 422 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 423 if (cache & FLUSH_CACHE_INSN) 424 cacr |= 8; 425 if (cache & FLUSH_CACHE_DATA) 426 cacr |= 0x800; 427 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); 428 } 429 ret = 0; 430 goto out; 431 } else { 432 /* 433 * 040 or 060: don't blindly trust 'scope', someone could 434 * try to flush a few megs of memory. 435 */ 436 437 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE) 438 scope=FLUSH_SCOPE_PAGE; 439 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL) 440 scope=FLUSH_SCOPE_ALL; 441 if (CPU_IS_040) { 442 ret = cache_flush_040 (addr, scope, cache, len); 443 } else if (CPU_IS_060) { 444 ret = cache_flush_060 (addr, scope, cache, len); 445 } 446 } 447out: 448 unlock_kernel(); 449 return ret; 450} 451 452asmlinkage int sys_getpagesize(void) 453{ 454 return PAGE_SIZE; 455} 456 457/* 458 * Do a system call from kernel instead of calling sys_execve so we 459 * end up with proper pt_regs. 460 */ 461int kernel_execve(const char *filename, 462 const char *const argv[], 463 const char *const envp[]) 464{ 465 register long __res asm ("%d0") = __NR_execve; 466 register long __a asm ("%d1") = (long)(filename); 467 register long __b asm ("%d2") = (long)(argv); 468 register long __c asm ("%d3") = (long)(envp); 469 asm volatile ("trap #0" : "+d" (__res) 470 : "d" (__a), "d" (__b), "d" (__c)); 471 return __res; 472} 473 474asmlinkage unsigned long sys_get_thread_area(void) 475{ 476 return current_thread_info()->tp_value; 477} 478 479asmlinkage int sys_set_thread_area(unsigned long tp) 480{ 481 current_thread_info()->tp_value = tp; 482 return 0; 483} 484 485/* This syscall gets its arguments in A0 (mem), D2 (oldval) and 486 D1 (newval). */ 487asmlinkage int 488sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, 489 unsigned long __user * mem) 490{ 491 /* This was borrowed from ARM's implementation. */ 492 for (;;) { 493 struct mm_struct *mm = current->mm; 494 pgd_t *pgd; 495 pmd_t *pmd; 496 pte_t *pte; 497 spinlock_t *ptl; 498 unsigned long mem_value; 499 500 down_read(&mm->mmap_sem); 501 pgd = pgd_offset(mm, (unsigned long)mem); 502 if (!pgd_present(*pgd)) 503 goto bad_access; 504 pmd = pmd_offset(pgd, (unsigned long)mem); 505 if (!pmd_present(*pmd)) 506 goto bad_access; 507 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); 508 if (!pte_present(*pte) || !pte_dirty(*pte) 509 || !pte_write(*pte)) { 510 pte_unmap_unlock(pte, ptl); 511 goto bad_access; 512 } 513 514 mem_value = *mem; 515 if (mem_value == oldval) 516 *mem = newval; 517 518 pte_unmap_unlock(pte, ptl); 519 up_read(&mm->mmap_sem); 520 return mem_value; 521 522 bad_access: 523 up_read(&mm->mmap_sem); 524 /* This is not necessarily a bad access, we can get here if 525 a memory we're trying to write to should be copied-on-write. 526 Make the kernel do the necessary page stuff, then re-iterate. 527 Simulate a write access fault to do that. */ 528 { 529 /* The first argument of the function corresponds to 530 D1, which is the first field of struct pt_regs. */ 531 struct pt_regs *fp = (struct pt_regs *)&newval; 532 533 /* '3' is an RMW flag. */ 534 if (do_page_fault(fp, (unsigned long)mem, 3)) 535 /* If the do_page_fault() failed, we don't 536 have anything meaningful to return. 537 There should be a SIGSEGV pending for 538 the process. */ 539 return 0xdeadbeef; 540 } 541 } 542} 543 544asmlinkage int sys_atomic_barrier(void) 545{ 546 /* no code needed for uniprocs */ 547 return 0; 548} 549