• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/sparc/kernel/
1/* linux/arch/sparc64/kernel/sys_sparc.c
2 *
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
5 * platform.
6 */
7
8#include <linux/errno.h>
9#include <linux/types.h>
10#include <linux/sched.h>
11#include <linux/fs.h>
12#include <linux/file.h>
13#include <linux/mm.h>
14#include <linux/sem.h>
15#include <linux/msg.h>
16#include <linux/shm.h>
17#include <linux/stat.h>
18#include <linux/mman.h>
19#include <linux/utsname.h>
20#include <linux/smp.h>
21#include <linux/slab.h>
22#include <linux/syscalls.h>
23#include <linux/ipc.h>
24#include <linux/personality.h>
25#include <linux/random.h>
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <asm/utrap.h>
30#include <asm/unistd.h>
31
32#include "entry.h"
33#include "systbls.h"
34
35/* #define DEBUG_UNIMP_SYSCALL */
36
37asmlinkage unsigned long sys_getpagesize(void)
38{
39	return PAGE_SIZE;
40}
41
42#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
44
45/* Does addr --> addr+len fall within 4GB of the VA-space hole or
46 * overflow past the end of the 64-bit address space?
47 */
48static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
49{
50	unsigned long va_exclude_start, va_exclude_end;
51
52	va_exclude_start = VA_EXCLUDE_START;
53	va_exclude_end   = VA_EXCLUDE_END;
54
55	if (unlikely(len >= va_exclude_start))
56		return 1;
57
58	if (unlikely((addr + len) < addr))
59		return 1;
60
61	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62		     ((addr + len) >= va_exclude_start &&
63		      (addr + len) < va_exclude_end)))
64		return 1;
65
66	return 0;
67}
68
69/* Does start,end straddle the VA-space hole?  */
70static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
71{
72	unsigned long va_exclude_start, va_exclude_end;
73
74	va_exclude_start = VA_EXCLUDE_START;
75	va_exclude_end   = VA_EXCLUDE_END;
76
77	if (likely(start < va_exclude_start && end < va_exclude_start))
78		return 0;
79
80	if (likely(start >= va_exclude_end && end >= va_exclude_end))
81		return 0;
82
83	return 1;
84}
85
86/* These functions differ from the default implementations in
87 * mm/mmap.c in two ways:
88 *
89 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
90 *    for fixed such mappings we just validate what the user gave us.
91 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
92 *    the spitfire/niagara VA-hole.
93 */
94
95static inline unsigned long COLOUR_ALIGN(unsigned long addr,
96					 unsigned long pgoff)
97{
98	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
99	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
100
101	return base + off;
102}
103
104static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
105					      unsigned long pgoff)
106{
107	unsigned long base = addr & ~(SHMLBA-1);
108	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
109
110	if (base + off <= addr)
111		return base + off;
112	return base - off;
113}
114
115unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
116{
117	struct mm_struct *mm = current->mm;
118	struct vm_area_struct * vma;
119	unsigned long task_size = TASK_SIZE;
120	unsigned long start_addr;
121	int do_color_align;
122
123	if (flags & MAP_FIXED) {
124		/* We do not accept a shared mapping if it would violate
125		 * cache aliasing constraints.
126		 */
127		if ((flags & MAP_SHARED) &&
128		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
129			return -EINVAL;
130		return addr;
131	}
132
133	if (test_thread_flag(TIF_32BIT))
134		task_size = STACK_TOP32;
135	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
136		return -ENOMEM;
137
138	do_color_align = 0;
139	if (filp || (flags & MAP_SHARED))
140		do_color_align = 1;
141
142	if (addr) {
143		if (do_color_align)
144			addr = COLOUR_ALIGN(addr, pgoff);
145		else
146			addr = PAGE_ALIGN(addr);
147
148		vma = find_vma(mm, addr);
149		if (task_size - len >= addr &&
150		    (!vma || addr + len <= vma->vm_start))
151			return addr;
152	}
153
154	if (len > mm->cached_hole_size) {
155	        start_addr = addr = mm->free_area_cache;
156	} else {
157	        start_addr = addr = TASK_UNMAPPED_BASE;
158	        mm->cached_hole_size = 0;
159	}
160
161	task_size -= len;
162
163full_search:
164	if (do_color_align)
165		addr = COLOUR_ALIGN(addr, pgoff);
166	else
167		addr = PAGE_ALIGN(addr);
168
169	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
170		/* At this point:  (!vma || addr < vma->vm_end). */
171		if (addr < VA_EXCLUDE_START &&
172		    (addr + len) >= VA_EXCLUDE_START) {
173			addr = VA_EXCLUDE_END;
174			vma = find_vma(mm, VA_EXCLUDE_END);
175		}
176		if (unlikely(task_size < addr)) {
177			if (start_addr != TASK_UNMAPPED_BASE) {
178				start_addr = addr = TASK_UNMAPPED_BASE;
179				mm->cached_hole_size = 0;
180				goto full_search;
181			}
182			return -ENOMEM;
183		}
184		if (likely(!vma || addr + len <= vma->vm_start)) {
185			/*
186			 * Remember the place where we stopped the search:
187			 */
188			mm->free_area_cache = addr + len;
189			return addr;
190		}
191		if (addr + mm->cached_hole_size < vma->vm_start)
192		        mm->cached_hole_size = vma->vm_start - addr;
193
194		addr = vma->vm_end;
195		if (do_color_align)
196			addr = COLOUR_ALIGN(addr, pgoff);
197	}
198}
199
200unsigned long
201arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
202			  const unsigned long len, const unsigned long pgoff,
203			  const unsigned long flags)
204{
205	struct vm_area_struct *vma;
206	struct mm_struct *mm = current->mm;
207	unsigned long task_size = STACK_TOP32;
208	unsigned long addr = addr0;
209	int do_color_align;
210
211	/* This should only ever run for 32-bit processes.  */
212	BUG_ON(!test_thread_flag(TIF_32BIT));
213
214	if (flags & MAP_FIXED) {
215		/* We do not accept a shared mapping if it would violate
216		 * cache aliasing constraints.
217		 */
218		if ((flags & MAP_SHARED) &&
219		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
220			return -EINVAL;
221		return addr;
222	}
223
224	if (unlikely(len > task_size))
225		return -ENOMEM;
226
227	do_color_align = 0;
228	if (filp || (flags & MAP_SHARED))
229		do_color_align = 1;
230
231	/* requesting a specific address */
232	if (addr) {
233		if (do_color_align)
234			addr = COLOUR_ALIGN(addr, pgoff);
235		else
236			addr = PAGE_ALIGN(addr);
237
238		vma = find_vma(mm, addr);
239		if (task_size - len >= addr &&
240		    (!vma || addr + len <= vma->vm_start))
241			return addr;
242	}
243
244	/* check if free_area_cache is useful for us */
245	if (len <= mm->cached_hole_size) {
246 	        mm->cached_hole_size = 0;
247 		mm->free_area_cache = mm->mmap_base;
248 	}
249
250	/* either no address requested or can't fit in requested address hole */
251	addr = mm->free_area_cache;
252	if (do_color_align) {
253		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
254
255		addr = base + len;
256	}
257
258	/* make sure it can fit in the remaining address space */
259	if (likely(addr > len)) {
260		vma = find_vma(mm, addr-len);
261		if (!vma || addr <= vma->vm_start) {
262			/* remember the address as a hint for next time */
263			return (mm->free_area_cache = addr-len);
264		}
265	}
266
267	if (unlikely(mm->mmap_base < len))
268		goto bottomup;
269
270	addr = mm->mmap_base-len;
271	if (do_color_align)
272		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
273
274	do {
275		/*
276		 * Lookup failure means no vma is above this address,
277		 * else if new region fits below vma->vm_start,
278		 * return with success:
279		 */
280		vma = find_vma(mm, addr);
281		if (likely(!vma || addr+len <= vma->vm_start)) {
282			/* remember the address as a hint for next time */
283			return (mm->free_area_cache = addr);
284		}
285
286 		/* remember the largest hole we saw so far */
287 		if (addr + mm->cached_hole_size < vma->vm_start)
288 		        mm->cached_hole_size = vma->vm_start - addr;
289
290		/* try just below the current vma->vm_start */
291		addr = vma->vm_start-len;
292		if (do_color_align)
293			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
294	} while (likely(len < vma->vm_start));
295
296bottomup:
297	/*
298	 * A failed mmap() very likely causes application failure,
299	 * so fall back to the bottom-up function here. This scenario
300	 * can happen with large stack limits and large mmap()
301	 * allocations.
302	 */
303	mm->cached_hole_size = ~0UL;
304  	mm->free_area_cache = TASK_UNMAPPED_BASE;
305	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
306	/*
307	 * Restore the topdown base:
308	 */
309	mm->free_area_cache = mm->mmap_base;
310	mm->cached_hole_size = ~0UL;
311
312	return addr;
313}
314
315/* Try to align mapping such that we align it as much as possible. */
316unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
317{
318	unsigned long align_goal, addr = -ENOMEM;
319	unsigned long (*get_area)(struct file *, unsigned long,
320				  unsigned long, unsigned long, unsigned long);
321
322	get_area = current->mm->get_unmapped_area;
323
324	if (flags & MAP_FIXED) {
325		/* Ok, don't mess with it. */
326		return get_area(NULL, orig_addr, len, pgoff, flags);
327	}
328	flags &= ~MAP_SHARED;
329
330	align_goal = PAGE_SIZE;
331	if (len >= (4UL * 1024 * 1024))
332		align_goal = (4UL * 1024 * 1024);
333	else if (len >= (512UL * 1024))
334		align_goal = (512UL * 1024);
335	else if (len >= (64UL * 1024))
336		align_goal = (64UL * 1024);
337
338	do {
339		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
340		if (!(addr & ~PAGE_MASK)) {
341			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
342			break;
343		}
344
345		if (align_goal == (4UL * 1024 * 1024))
346			align_goal = (512UL * 1024);
347		else if (align_goal == (512UL * 1024))
348			align_goal = (64UL * 1024);
349		else
350			align_goal = PAGE_SIZE;
351	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
352
353	/* Mapping is smaller than 64K or larger areas could not
354	 * be obtained.
355	 */
356	if (addr & ~PAGE_MASK)
357		addr = get_area(NULL, orig_addr, len, pgoff, flags);
358
359	return addr;
360}
361EXPORT_SYMBOL(get_fb_unmapped_area);
362
363/* Essentially the same as PowerPC... */
364void arch_pick_mmap_layout(struct mm_struct *mm)
365{
366	unsigned long random_factor = 0UL;
367	unsigned long gap;
368
369	if (current->flags & PF_RANDOMIZE) {
370		random_factor = get_random_int();
371		if (test_thread_flag(TIF_32BIT))
372			random_factor &= ((1 * 1024 * 1024) - 1);
373		else
374			random_factor = ((random_factor << PAGE_SHIFT) &
375					 0xffffffffUL);
376	}
377
378	/*
379	 * Fall back to the standard layout if the personality
380	 * bit is set, or if the expected stack growth is unlimited:
381	 */
382	gap = rlimit(RLIMIT_STACK);
383	if (!test_thread_flag(TIF_32BIT) ||
384	    (current->personality & ADDR_COMPAT_LAYOUT) ||
385	    gap == RLIM_INFINITY ||
386	    sysctl_legacy_va_layout) {
387		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
388		mm->get_unmapped_area = arch_get_unmapped_area;
389		mm->unmap_area = arch_unmap_area;
390	} else {
391		/* We know it's 32-bit */
392		unsigned long task_size = STACK_TOP32;
393
394		if (gap < 128 * 1024 * 1024)
395			gap = 128 * 1024 * 1024;
396		if (gap > (task_size / 6 * 5))
397			gap = (task_size / 6 * 5);
398
399		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
400		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
401		mm->unmap_area = arch_unmap_area_topdown;
402	}
403}
404
405/*
406 * sys_pipe() is the normal C calling standard for creating
407 * a pipe. It's not the way unix traditionally does this, though.
408 */
409SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
410{
411	int fd[2];
412	int error;
413
414	error = do_pipe_flags(fd, 0);
415	if (error)
416		goto out;
417	regs->u_regs[UREG_I1] = fd[1];
418	error = fd[0];
419out:
420	return error;
421}
422
423/*
424 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
425 *
426 * This is really horribly ugly.
427 */
428
429SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
430		unsigned long, third, void __user *, ptr, long, fifth)
431{
432	long err;
433
434	/* No need for backward compatibility. We can start fresh... */
435	if (call <= SEMCTL) {
436		switch (call) {
437		case SEMOP:
438			err = sys_semtimedop(first, ptr,
439					     (unsigned)second, NULL);
440			goto out;
441		case SEMTIMEDOP:
442			err = sys_semtimedop(first, ptr, (unsigned)second,
443				(const struct timespec __user *)
444					     (unsigned long) fifth);
445			goto out;
446		case SEMGET:
447			err = sys_semget(first, (int)second, (int)third);
448			goto out;
449		case SEMCTL: {
450			err = sys_semctl(first, second,
451					 (int)third | IPC_64,
452					 (union semun) ptr);
453			goto out;
454		}
455		default:
456			err = -ENOSYS;
457			goto out;
458		};
459	}
460	if (call <= MSGCTL) {
461		switch (call) {
462		case MSGSND:
463			err = sys_msgsnd(first, ptr, (size_t)second,
464					 (int)third);
465			goto out;
466		case MSGRCV:
467			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
468					 (int)third);
469			goto out;
470		case MSGGET:
471			err = sys_msgget((key_t)first, (int)second);
472			goto out;
473		case MSGCTL:
474			err = sys_msgctl(first, (int)second | IPC_64, ptr);
475			goto out;
476		default:
477			err = -ENOSYS;
478			goto out;
479		};
480	}
481	if (call <= SHMCTL) {
482		switch (call) {
483		case SHMAT: {
484			ulong raddr;
485			err = do_shmat(first, ptr, (int)second, &raddr);
486			if (!err) {
487				if (put_user(raddr,
488					     (ulong __user *) third))
489					err = -EFAULT;
490			}
491			goto out;
492		}
493		case SHMDT:
494			err = sys_shmdt(ptr);
495			goto out;
496		case SHMGET:
497			err = sys_shmget(first, (size_t)second, (int)third);
498			goto out;
499		case SHMCTL:
500			err = sys_shmctl(first, (int)second | IPC_64, ptr);
501			goto out;
502		default:
503			err = -ENOSYS;
504			goto out;
505		};
506	} else {
507		err = -ENOSYS;
508	}
509out:
510	return err;
511}
512
513SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
514{
515	int ret;
516
517	if (current->personality == PER_LINUX32 &&
518	    personality == PER_LINUX)
519		personality = PER_LINUX32;
520	ret = sys_personality(personality);
521	if (ret == PER_LINUX32)
522		ret = PER_LINUX;
523
524	return ret;
525}
526
527int sparc_mmap_check(unsigned long addr, unsigned long len)
528{
529	if (test_thread_flag(TIF_32BIT)) {
530		if (len >= STACK_TOP32)
531			return -EINVAL;
532
533		if (addr > STACK_TOP32 - len)
534			return -EINVAL;
535	} else {
536		if (len >= VA_EXCLUDE_START)
537			return -EINVAL;
538
539		if (invalid_64bit_range(addr, len))
540			return -EINVAL;
541	}
542
543	return 0;
544}
545
546/* Linux version of mmap */
547SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
548		unsigned long, prot, unsigned long, flags, unsigned long, fd,
549		unsigned long, off)
550{
551	unsigned long retval = -EINVAL;
552
553	if ((off + PAGE_ALIGN(len)) < off)
554		goto out;
555	if (off & ~PAGE_MASK)
556		goto out;
557	retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
558out:
559	return retval;
560}
561
562SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
563{
564	long ret;
565
566	if (invalid_64bit_range(addr, len))
567		return -EINVAL;
568
569	down_write(&current->mm->mmap_sem);
570	ret = do_munmap(current->mm, addr, len);
571	up_write(&current->mm->mmap_sem);
572	return ret;
573}
574
575extern unsigned long do_mremap(unsigned long addr,
576	unsigned long old_len, unsigned long new_len,
577	unsigned long flags, unsigned long new_addr);
578
579SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
580		unsigned long, new_len, unsigned long, flags,
581		unsigned long, new_addr)
582{
583	unsigned long ret = -EINVAL;
584
585	if (test_thread_flag(TIF_32BIT))
586		goto out;
587
588	down_write(&current->mm->mmap_sem);
589	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
590	up_write(&current->mm->mmap_sem);
591out:
592	return ret;
593}
594
595/* we come to here via sys_nis_syscall so it can setup the regs argument */
596asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
597{
598	static int count;
599
600	/* Don't make the system unusable, if someone goes stuck */
601	if (count++ > 5)
602		return -ENOSYS;
603
604	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
605#ifdef DEBUG_UNIMP_SYSCALL
606	show_regs (regs);
607#endif
608
609	return -ENOSYS;
610}
611
612/* #define DEBUG_SPARC_BREAKPOINT */
613
614asmlinkage void sparc_breakpoint(struct pt_regs *regs)
615{
616	siginfo_t info;
617
618	if (test_thread_flag(TIF_32BIT)) {
619		regs->tpc &= 0xffffffff;
620		regs->tnpc &= 0xffffffff;
621	}
622#ifdef DEBUG_SPARC_BREAKPOINT
623        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
624#endif
625	info.si_signo = SIGTRAP;
626	info.si_errno = 0;
627	info.si_code = TRAP_BRKPT;
628	info.si_addr = (void __user *)regs->tpc;
629	info.si_trapno = 0;
630	force_sig_info(SIGTRAP, &info, current);
631#ifdef DEBUG_SPARC_BREAKPOINT
632	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
633#endif
634}
635
636extern void check_pending(int signum);
637
638SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
639{
640        int nlen, err;
641
642	if (len < 0)
643		return -EINVAL;
644
645 	down_read(&uts_sem);
646
647	nlen = strlen(utsname()->domainname) + 1;
648	err = -EINVAL;
649	if (nlen > len)
650		goto out;
651
652	err = -EFAULT;
653	if (!copy_to_user(name, utsname()->domainname, nlen))
654		err = 0;
655
656out:
657	up_read(&uts_sem);
658	return err;
659}
660
661SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
662		utrap_handler_t, new_p, utrap_handler_t, new_d,
663		utrap_handler_t __user *, old_p,
664		utrap_handler_t __user *, old_d)
665{
666	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
667		return -EINVAL;
668	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
669		if (old_p) {
670			if (!current_thread_info()->utraps) {
671				if (put_user(NULL, old_p))
672					return -EFAULT;
673			} else {
674				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
675					return -EFAULT;
676			}
677		}
678		if (old_d) {
679			if (put_user(NULL, old_d))
680				return -EFAULT;
681		}
682		return 0;
683	}
684	if (!current_thread_info()->utraps) {
685		current_thread_info()->utraps =
686			kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
687		if (!current_thread_info()->utraps)
688			return -ENOMEM;
689		current_thread_info()->utraps[0] = 1;
690	} else {
691		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
692		    current_thread_info()->utraps[0] > 1) {
693			unsigned long *p = current_thread_info()->utraps;
694
695			current_thread_info()->utraps =
696				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
697					GFP_KERNEL);
698			if (!current_thread_info()->utraps) {
699				current_thread_info()->utraps = p;
700				return -ENOMEM;
701			}
702			p[0]--;
703			current_thread_info()->utraps[0] = 1;
704			memcpy(current_thread_info()->utraps+1, p+1,
705			       UT_TRAP_INSTRUCTION_31*sizeof(long));
706		}
707	}
708	if (old_p) {
709		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
710			return -EFAULT;
711	}
712	if (old_d) {
713		if (put_user(NULL, old_d))
714			return -EFAULT;
715	}
716	current_thread_info()->utraps[type] = (long)new_p;
717
718	return 0;
719}
720
721asmlinkage long sparc_memory_ordering(unsigned long model,
722				      struct pt_regs *regs)
723{
724	if (model >= 3)
725		return -EINVAL;
726	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
727	return 0;
728}
729
730SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
731		struct sigaction __user *, oact, void __user *, restorer,
732		size_t, sigsetsize)
733{
734	struct k_sigaction new_ka, old_ka;
735	int ret;
736
737	if (sigsetsize != sizeof(sigset_t))
738		return -EINVAL;
739
740	if (act) {
741		new_ka.ka_restorer = restorer;
742		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
743			return -EFAULT;
744	}
745
746	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
747
748	if (!ret && oact) {
749		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
750			return -EFAULT;
751	}
752
753	return ret;
754}
755
756/*
757 * Do a system call from kernel instead of calling sys_execve so we
758 * end up with proper pt_regs.
759 */
760int kernel_execve(const char *filename,
761		  const char *const argv[],
762		  const char *const envp[])
763{
764	long __res;
765	register long __g1 __asm__ ("g1") = __NR_execve;
766	register long __o0 __asm__ ("o0") = (long)(filename);
767	register long __o1 __asm__ ("o1") = (long)(argv);
768	register long __o2 __asm__ ("o2") = (long)(envp);
769	asm volatile ("t 0x6d\n\t"
770		      "sub %%g0, %%o0, %0\n\t"
771		      "movcc %%xcc, %%o0, %0\n\t"
772		      : "=r" (__res), "=&r" (__o0)
773		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
774		      : "cc");
775	return __res;
776}
777