1/*
2 *  linux/kernel/compat.c
3 *
4 *  Kernel compatibililty routines for e.g. 32 bit syscall support
5 *  on 64 bit kernels.
6 *
7 *  Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
8 *
9 *  This program is free software; you can redistribute it and/or modify
10 *  it under the terms of the GNU General Public License version 2 as
11 *  published by the Free Software Foundation.
12 */
13
14#include <linux/linkage.h>
15#include <linux/compat.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/signal.h>
19#include <linux/sched.h>	/* for MAX_SCHEDULE_TIMEOUT */
20#include <linux/syscalls.h>
21#include <linux/unistd.h>
22#include <linux/security.h>
23#include <linux/timex.h>
24#include <linux/migrate.h>
25#include <linux/posix-timers.h>
26#include <linux/times.h>
27#include <linux/ptrace.h>
28#include <linux/gfp.h>
29
30#include <asm/uaccess.h>
31
32/*
33 * Note that the native side is already converted to a timespec, because
34 * that's what we want anyway.
35 */
36static int compat_get_timeval(struct timespec *o,
37		struct compat_timeval __user *i)
38{
39	long usec;
40
41	if (get_user(o->tv_sec, &i->tv_sec) ||
42	    get_user(usec, &i->tv_usec))
43		return -EFAULT;
44	o->tv_nsec = usec * 1000;
45	return 0;
46}
47
48static int compat_put_timeval(struct compat_timeval __user *o,
49		struct timeval *i)
50{
51	return (put_user(i->tv_sec, &o->tv_sec) ||
52		put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
53}
54
55asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
56		struct timezone __user *tz)
57{
58	if (tv) {
59		struct timeval ktv;
60		do_gettimeofday(&ktv);
61		if (compat_put_timeval(tv, &ktv))
62			return -EFAULT;
63	}
64	if (tz) {
65		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
66			return -EFAULT;
67	}
68
69	return 0;
70}
71
72asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
73		struct timezone __user *tz)
74{
75	struct timespec kts;
76	struct timezone ktz;
77
78	if (tv) {
79		if (compat_get_timeval(&kts, tv))
80			return -EFAULT;
81	}
82	if (tz) {
83		if (copy_from_user(&ktz, tz, sizeof(ktz)))
84			return -EFAULT;
85	}
86
87	return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
88}
89
90int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
91{
92	return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
93			__get_user(ts->tv_sec, &cts->tv_sec) ||
94			__get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
95}
96
97int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
98{
99	return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
100			__put_user(ts->tv_sec, &cts->tv_sec) ||
101			__put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
102}
103
104static long compat_nanosleep_restart(struct restart_block *restart)
105{
106	struct compat_timespec __user *rmtp;
107	struct timespec rmt;
108	mm_segment_t oldfs;
109	long ret;
110
111	restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
112	oldfs = get_fs();
113	set_fs(KERNEL_DS);
114	ret = hrtimer_nanosleep_restart(restart);
115	set_fs(oldfs);
116
117	if (ret) {
118		rmtp = restart->nanosleep.compat_rmtp;
119
120		if (rmtp && put_compat_timespec(&rmt, rmtp))
121			return -EFAULT;
122	}
123
124	return ret;
125}
126
127asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
128				     struct compat_timespec __user *rmtp)
129{
130	struct timespec tu, rmt;
131	mm_segment_t oldfs;
132	long ret;
133
134	if (get_compat_timespec(&tu, rqtp))
135		return -EFAULT;
136
137	if (!timespec_valid(&tu))
138		return -EINVAL;
139
140	oldfs = get_fs();
141	set_fs(KERNEL_DS);
142	ret = hrtimer_nanosleep(&tu,
143				rmtp ? (struct timespec __user *)&rmt : NULL,
144				HRTIMER_MODE_REL, CLOCK_MONOTONIC);
145	set_fs(oldfs);
146
147	if (ret) {
148		struct restart_block *restart
149			= &current_thread_info()->restart_block;
150
151		restart->fn = compat_nanosleep_restart;
152		restart->nanosleep.compat_rmtp = rmtp;
153
154		if (rmtp && put_compat_timespec(&rmt, rmtp))
155			return -EFAULT;
156	}
157
158	return ret;
159}
160
161static inline long get_compat_itimerval(struct itimerval *o,
162		struct compat_itimerval __user *i)
163{
164	return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
165		(__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
166		 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
167		 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
168		 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
169}
170
171static inline long put_compat_itimerval(struct compat_itimerval __user *o,
172		struct itimerval *i)
173{
174	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
175		(__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
176		 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
177		 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
178		 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
179}
180
181asmlinkage long compat_sys_getitimer(int which,
182		struct compat_itimerval __user *it)
183{
184	struct itimerval kit;
185	int error;
186
187	error = do_getitimer(which, &kit);
188	if (!error && put_compat_itimerval(it, &kit))
189		error = -EFAULT;
190	return error;
191}
192
193asmlinkage long compat_sys_setitimer(int which,
194		struct compat_itimerval __user *in,
195		struct compat_itimerval __user *out)
196{
197	struct itimerval kin, kout;
198	int error;
199
200	if (in) {
201		if (get_compat_itimerval(&kin, in))
202			return -EFAULT;
203	} else
204		memset(&kin, 0, sizeof(kin));
205
206	error = do_setitimer(which, &kin, out ? &kout : NULL);
207	if (error || !out)
208		return error;
209	if (put_compat_itimerval(out, &kout))
210		return -EFAULT;
211	return 0;
212}
213
214static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
215{
216	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
217}
218
219asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
220{
221	if (tbuf) {
222		struct tms tms;
223		struct compat_tms tmp;
224
225		do_sys_times(&tms);
226		/* Convert our struct tms to the compat version. */
227		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
228		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
229		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
230		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
231		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
232			return -EFAULT;
233	}
234	force_successful_syscall_return();
235	return compat_jiffies_to_clock_t(jiffies);
236}
237
238/*
239 * Assumption: old_sigset_t and compat_old_sigset_t are both
240 * types that can be passed to put_user()/get_user().
241 */
242
243asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
244{
245	old_sigset_t s;
246	long ret;
247	mm_segment_t old_fs = get_fs();
248
249	set_fs(KERNEL_DS);
250	ret = sys_sigpending((old_sigset_t __user *) &s);
251	set_fs(old_fs);
252	if (ret == 0)
253		ret = put_user(s, set);
254	return ret;
255}
256
257asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
258		compat_old_sigset_t __user *oset)
259{
260	old_sigset_t s;
261	long ret;
262	mm_segment_t old_fs;
263
264	if (set && get_user(s, set))
265		return -EFAULT;
266	old_fs = get_fs();
267	set_fs(KERNEL_DS);
268	ret = sys_sigprocmask(how,
269			      set ? (old_sigset_t __user *) &s : NULL,
270			      oset ? (old_sigset_t __user *) &s : NULL);
271	set_fs(old_fs);
272	if (ret == 0)
273		if (oset)
274			ret = put_user(s, oset);
275	return ret;
276}
277
278asmlinkage long compat_sys_setrlimit(unsigned int resource,
279		struct compat_rlimit __user *rlim)
280{
281	struct rlimit r;
282
283	if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
284	    __get_user(r.rlim_cur, &rlim->rlim_cur) ||
285	    __get_user(r.rlim_max, &rlim->rlim_max))
286		return -EFAULT;
287
288	if (r.rlim_cur == COMPAT_RLIM_INFINITY)
289		r.rlim_cur = RLIM_INFINITY;
290	if (r.rlim_max == COMPAT_RLIM_INFINITY)
291		r.rlim_max = RLIM_INFINITY;
292	return do_prlimit(current, resource, &r, NULL);
293}
294
295#ifdef COMPAT_RLIM_OLD_INFINITY
296
297asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
298		struct compat_rlimit __user *rlim)
299{
300	struct rlimit r;
301	int ret;
302	mm_segment_t old_fs = get_fs();
303
304	set_fs(KERNEL_DS);
305	ret = sys_old_getrlimit(resource, &r);
306	set_fs(old_fs);
307
308	if (!ret) {
309		if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
310			r.rlim_cur = COMPAT_RLIM_INFINITY;
311		if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
312			r.rlim_max = COMPAT_RLIM_INFINITY;
313
314		if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
315		    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
316		    __put_user(r.rlim_max, &rlim->rlim_max))
317			return -EFAULT;
318	}
319	return ret;
320}
321
322#endif
323
324asmlinkage long compat_sys_getrlimit(unsigned int resource,
325		struct compat_rlimit __user *rlim)
326{
327	struct rlimit r;
328	int ret;
329
330	ret = do_prlimit(current, resource, NULL, &r);
331	if (!ret) {
332		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
333			r.rlim_cur = COMPAT_RLIM_INFINITY;
334		if (r.rlim_max > COMPAT_RLIM_INFINITY)
335			r.rlim_max = COMPAT_RLIM_INFINITY;
336
337		if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
338		    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
339		    __put_user(r.rlim_max, &rlim->rlim_max))
340			return -EFAULT;
341	}
342	return ret;
343}
344
345int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
346{
347	if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
348	    __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
349	    __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
350	    __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
351	    __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
352	    __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
353	    __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
354	    __put_user(r->ru_idrss, &ru->ru_idrss) ||
355	    __put_user(r->ru_isrss, &ru->ru_isrss) ||
356	    __put_user(r->ru_minflt, &ru->ru_minflt) ||
357	    __put_user(r->ru_majflt, &ru->ru_majflt) ||
358	    __put_user(r->ru_nswap, &ru->ru_nswap) ||
359	    __put_user(r->ru_inblock, &ru->ru_inblock) ||
360	    __put_user(r->ru_oublock, &ru->ru_oublock) ||
361	    __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
362	    __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
363	    __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
364	    __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
365	    __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
366		return -EFAULT;
367	return 0;
368}
369
370asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
371{
372	struct rusage r;
373	int ret;
374	mm_segment_t old_fs = get_fs();
375
376	set_fs(KERNEL_DS);
377	ret = sys_getrusage(who, (struct rusage __user *) &r);
378	set_fs(old_fs);
379
380	if (ret)
381		return ret;
382
383	if (put_compat_rusage(&r, ru))
384		return -EFAULT;
385
386	return 0;
387}
388
389asmlinkage long
390compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
391	struct compat_rusage __user *ru)
392{
393	if (!ru) {
394		return sys_wait4(pid, stat_addr, options, NULL);
395	} else {
396		struct rusage r;
397		int ret;
398		unsigned int status;
399		mm_segment_t old_fs = get_fs();
400
401		set_fs (KERNEL_DS);
402		ret = sys_wait4(pid,
403				(stat_addr ?
404				 (unsigned int __user *) &status : NULL),
405				options, (struct rusage __user *) &r);
406		set_fs (old_fs);
407
408		if (ret > 0) {
409			if (put_compat_rusage(&r, ru))
410				return -EFAULT;
411			if (stat_addr && put_user(status, stat_addr))
412				return -EFAULT;
413		}
414		return ret;
415	}
416}
417
418asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
419		struct compat_siginfo __user *uinfo, int options,
420		struct compat_rusage __user *uru)
421{
422	siginfo_t info;
423	struct rusage ru;
424	long ret;
425	mm_segment_t old_fs = get_fs();
426
427	memset(&info, 0, sizeof(info));
428
429	set_fs(KERNEL_DS);
430	ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
431			 uru ? (struct rusage __user *)&ru : NULL);
432	set_fs(old_fs);
433
434	if ((ret < 0) || (info.si_signo == 0))
435		return ret;
436
437	if (uru) {
438		ret = put_compat_rusage(&ru, uru);
439		if (ret)
440			return ret;
441	}
442
443	BUG_ON(info.si_code & __SI_MASK);
444	info.si_code |= __SI_CHLD;
445	return copy_siginfo_to_user32(uinfo, &info);
446}
447
448static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
449				    unsigned len, struct cpumask *new_mask)
450{
451	unsigned long *k;
452
453	if (len < cpumask_size())
454		memset(new_mask, 0, cpumask_size());
455	else if (len > cpumask_size())
456		len = cpumask_size();
457
458	k = cpumask_bits(new_mask);
459	return compat_get_bitmap(k, user_mask_ptr, len * 8);
460}
461
462asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
463					     unsigned int len,
464					     compat_ulong_t __user *user_mask_ptr)
465{
466	cpumask_var_t new_mask;
467	int retval;
468
469	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
470		return -ENOMEM;
471
472	retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
473	if (retval)
474		goto out;
475
476	retval = sched_setaffinity(pid, new_mask);
477out:
478	free_cpumask_var(new_mask);
479	return retval;
480}
481
482asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
483					     compat_ulong_t __user *user_mask_ptr)
484{
485	int ret;
486	cpumask_var_t mask;
487
488	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
489		return -EINVAL;
490	if (len & (sizeof(compat_ulong_t)-1))
491		return -EINVAL;
492
493	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
494		return -ENOMEM;
495
496	ret = sched_getaffinity(pid, mask);
497	if (ret == 0) {
498		size_t retlen = min_t(size_t, len, cpumask_size());
499
500		if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
501			ret = -EFAULT;
502		else
503			ret = retlen;
504	}
505	free_cpumask_var(mask);
506
507	return ret;
508}
509
510int get_compat_itimerspec(struct itimerspec *dst,
511			  const struct compat_itimerspec __user *src)
512{
513	if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
514	    get_compat_timespec(&dst->it_value, &src->it_value))
515		return -EFAULT;
516	return 0;
517}
518
519int put_compat_itimerspec(struct compat_itimerspec __user *dst,
520			  const struct itimerspec *src)
521{
522	if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
523	    put_compat_timespec(&src->it_value, &dst->it_value))
524		return -EFAULT;
525	return 0;
526}
527
528long compat_sys_timer_create(clockid_t which_clock,
529			struct compat_sigevent __user *timer_event_spec,
530			timer_t __user *created_timer_id)
531{
532	struct sigevent __user *event = NULL;
533
534	if (timer_event_spec) {
535		struct sigevent kevent;
536
537		event = compat_alloc_user_space(sizeof(*event));
538		if (get_compat_sigevent(&kevent, timer_event_spec) ||
539		    copy_to_user(event, &kevent, sizeof(*event)))
540			return -EFAULT;
541	}
542
543	return sys_timer_create(which_clock, event, created_timer_id);
544}
545
546long compat_sys_timer_settime(timer_t timer_id, int flags,
547			  struct compat_itimerspec __user *new,
548			  struct compat_itimerspec __user *old)
549{
550	long err;
551	mm_segment_t oldfs;
552	struct itimerspec newts, oldts;
553
554	if (!new)
555		return -EINVAL;
556	if (get_compat_itimerspec(&newts, new))
557		return -EFAULT;
558	oldfs = get_fs();
559	set_fs(KERNEL_DS);
560	err = sys_timer_settime(timer_id, flags,
561				(struct itimerspec __user *) &newts,
562				(struct itimerspec __user *) &oldts);
563	set_fs(oldfs);
564	if (!err && old && put_compat_itimerspec(old, &oldts))
565		return -EFAULT;
566	return err;
567}
568
569long compat_sys_timer_gettime(timer_t timer_id,
570		struct compat_itimerspec __user *setting)
571{
572	long err;
573	mm_segment_t oldfs;
574	struct itimerspec ts;
575
576	oldfs = get_fs();
577	set_fs(KERNEL_DS);
578	err = sys_timer_gettime(timer_id,
579				(struct itimerspec __user *) &ts);
580	set_fs(oldfs);
581	if (!err && put_compat_itimerspec(setting, &ts))
582		return -EFAULT;
583	return err;
584}
585
586long compat_sys_clock_settime(clockid_t which_clock,
587		struct compat_timespec __user *tp)
588{
589	long err;
590	mm_segment_t oldfs;
591	struct timespec ts;
592
593	if (get_compat_timespec(&ts, tp))
594		return -EFAULT;
595	oldfs = get_fs();
596	set_fs(KERNEL_DS);
597	err = sys_clock_settime(which_clock,
598				(struct timespec __user *) &ts);
599	set_fs(oldfs);
600	return err;
601}
602
603long compat_sys_clock_gettime(clockid_t which_clock,
604		struct compat_timespec __user *tp)
605{
606	long err;
607	mm_segment_t oldfs;
608	struct timespec ts;
609
610	oldfs = get_fs();
611	set_fs(KERNEL_DS);
612	err = sys_clock_gettime(which_clock,
613				(struct timespec __user *) &ts);
614	set_fs(oldfs);
615	if (!err && put_compat_timespec(&ts, tp))
616		return -EFAULT;
617	return err;
618}
619
620long compat_sys_clock_getres(clockid_t which_clock,
621		struct compat_timespec __user *tp)
622{
623	long err;
624	mm_segment_t oldfs;
625	struct timespec ts;
626
627	oldfs = get_fs();
628	set_fs(KERNEL_DS);
629	err = sys_clock_getres(which_clock,
630			       (struct timespec __user *) &ts);
631	set_fs(oldfs);
632	if (!err && tp && put_compat_timespec(&ts, tp))
633		return -EFAULT;
634	return err;
635}
636
637static long compat_clock_nanosleep_restart(struct restart_block *restart)
638{
639	long err;
640	mm_segment_t oldfs;
641	struct timespec tu;
642	struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
643
644	restart->nanosleep.rmtp = (struct timespec __user *) &tu;
645	oldfs = get_fs();
646	set_fs(KERNEL_DS);
647	err = clock_nanosleep_restart(restart);
648	set_fs(oldfs);
649
650	if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
651	    put_compat_timespec(&tu, rmtp))
652		return -EFAULT;
653
654	if (err == -ERESTART_RESTARTBLOCK) {
655		restart->fn = compat_clock_nanosleep_restart;
656		restart->nanosleep.compat_rmtp = rmtp;
657	}
658	return err;
659}
660
661long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
662			    struct compat_timespec __user *rqtp,
663			    struct compat_timespec __user *rmtp)
664{
665	long err;
666	mm_segment_t oldfs;
667	struct timespec in, out;
668	struct restart_block *restart;
669
670	if (get_compat_timespec(&in, rqtp))
671		return -EFAULT;
672
673	oldfs = get_fs();
674	set_fs(KERNEL_DS);
675	err = sys_clock_nanosleep(which_clock, flags,
676				  (struct timespec __user *) &in,
677				  (struct timespec __user *) &out);
678	set_fs(oldfs);
679
680	if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
681	    put_compat_timespec(&out, rmtp))
682		return -EFAULT;
683
684	if (err == -ERESTART_RESTARTBLOCK) {
685		restart = &current_thread_info()->restart_block;
686		restart->fn = compat_clock_nanosleep_restart;
687		restart->nanosleep.compat_rmtp = rmtp;
688	}
689	return err;
690}
691
692/*
693 * We currently only need the following fields from the sigevent
694 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
695 * sigev_notify_thread_id).  The others are handled in user mode.
696 * We also assume that copying sigev_value.sival_int is sufficient
697 * to keep all the bits of sigev_value.sival_ptr intact.
698 */
699int get_compat_sigevent(struct sigevent *event,
700		const struct compat_sigevent __user *u_event)
701{
702	memset(event, 0, sizeof(*event));
703	return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
704		__get_user(event->sigev_value.sival_int,
705			&u_event->sigev_value.sival_int) ||
706		__get_user(event->sigev_signo, &u_event->sigev_signo) ||
707		__get_user(event->sigev_notify, &u_event->sigev_notify) ||
708		__get_user(event->sigev_notify_thread_id,
709			&u_event->sigev_notify_thread_id))
710		? -EFAULT : 0;
711}
712
713long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
714		       unsigned long bitmap_size)
715{
716	int i, j;
717	unsigned long m;
718	compat_ulong_t um;
719	unsigned long nr_compat_longs;
720
721	/* align bitmap up to nearest compat_long_t boundary */
722	bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
723
724	if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
725		return -EFAULT;
726
727	nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
728
729	for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
730		m = 0;
731
732		for (j = 0; j < sizeof(m)/sizeof(um); j++) {
733			/*
734			 * We dont want to read past the end of the userspace
735			 * bitmap. We must however ensure the end of the
736			 * kernel bitmap is zeroed.
737			 */
738			if (nr_compat_longs-- > 0) {
739				if (__get_user(um, umask))
740					return -EFAULT;
741			} else {
742				um = 0;
743			}
744
745			umask++;
746			m |= (long)um << (j * BITS_PER_COMPAT_LONG);
747		}
748		*mask++ = m;
749	}
750
751	return 0;
752}
753
754long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
755		       unsigned long bitmap_size)
756{
757	int i, j;
758	unsigned long m;
759	compat_ulong_t um;
760	unsigned long nr_compat_longs;
761
762	/* align bitmap up to nearest compat_long_t boundary */
763	bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
764
765	if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
766		return -EFAULT;
767
768	nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
769
770	for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
771		m = *mask++;
772
773		for (j = 0; j < sizeof(m)/sizeof(um); j++) {
774			um = m;
775
776			/*
777			 * We dont want to write past the end of the userspace
778			 * bitmap.
779			 */
780			if (nr_compat_longs-- > 0) {
781				if (__put_user(um, umask))
782					return -EFAULT;
783			}
784
785			umask++;
786			m >>= 4*sizeof(um);
787			m >>= 4*sizeof(um);
788		}
789	}
790
791	return 0;
792}
793
794void
795sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
796{
797	switch (_NSIG_WORDS) {
798	case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
799	case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
800	case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
801	case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
802	}
803}
804
805asmlinkage long
806compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
807		struct compat_siginfo __user *uinfo,
808		struct compat_timespec __user *uts, compat_size_t sigsetsize)
809{
810	compat_sigset_t s32;
811	sigset_t s;
812	int sig;
813	struct timespec t;
814	siginfo_t info;
815	long ret, timeout = 0;
816
817	if (sigsetsize != sizeof(sigset_t))
818		return -EINVAL;
819
820	if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
821		return -EFAULT;
822	sigset_from_compat(&s, &s32);
823	sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP));
824	signotset(&s);
825
826	if (uts) {
827		if (get_compat_timespec (&t, uts))
828			return -EFAULT;
829		if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0
830				|| t.tv_sec < 0)
831			return -EINVAL;
832	}
833
834	spin_lock_irq(&current->sighand->siglock);
835	sig = dequeue_signal(current, &s, &info);
836	if (!sig) {
837		timeout = MAX_SCHEDULE_TIMEOUT;
838		if (uts)
839			timeout = timespec_to_jiffies(&t)
840				+(t.tv_sec || t.tv_nsec);
841		if (timeout) {
842			current->real_blocked = current->blocked;
843			sigandsets(&current->blocked, &current->blocked, &s);
844
845			recalc_sigpending();
846			spin_unlock_irq(&current->sighand->siglock);
847
848			timeout = schedule_timeout_interruptible(timeout);
849
850			spin_lock_irq(&current->sighand->siglock);
851			sig = dequeue_signal(current, &s, &info);
852			current->blocked = current->real_blocked;
853			siginitset(&current->real_blocked, 0);
854			recalc_sigpending();
855		}
856	}
857	spin_unlock_irq(&current->sighand->siglock);
858
859	if (sig) {
860		ret = sig;
861		if (uinfo) {
862			if (copy_siginfo_to_user32(uinfo, &info))
863				ret = -EFAULT;
864		}
865	}else {
866		ret = timeout?-EINTR:-EAGAIN;
867	}
868	return ret;
869
870}
871
872asmlinkage long
873compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
874			     struct compat_siginfo __user *uinfo)
875{
876	siginfo_t info;
877
878	if (copy_siginfo_from_user32(&info, uinfo))
879		return -EFAULT;
880	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
881}
882
883#ifdef __ARCH_WANT_COMPAT_SYS_TIME
884
885/* compat_time_t is a 32 bit "long" and needs to get converted. */
886
887asmlinkage long compat_sys_time(compat_time_t __user * tloc)
888{
889	compat_time_t i;
890	struct timeval tv;
891
892	do_gettimeofday(&tv);
893	i = tv.tv_sec;
894
895	if (tloc) {
896		if (put_user(i,tloc))
897			return -EFAULT;
898	}
899	force_successful_syscall_return();
900	return i;
901}
902
903asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
904{
905	struct timespec tv;
906	int err;
907
908	if (get_user(tv.tv_sec, tptr))
909		return -EFAULT;
910
911	tv.tv_nsec = 0;
912
913	err = security_settime(&tv, NULL);
914	if (err)
915		return err;
916
917	do_settimeofday(&tv);
918	return 0;
919}
920
921#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
922
923#ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
924asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize)
925{
926	sigset_t newset;
927	compat_sigset_t newset32;
928
929	if (sigsetsize != sizeof(sigset_t))
930		return -EINVAL;
931
932	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
933		return -EFAULT;
934	sigset_from_compat(&newset, &newset32);
935	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
936
937	spin_lock_irq(&current->sighand->siglock);
938	current->saved_sigmask = current->blocked;
939	current->blocked = newset;
940	recalc_sigpending();
941	spin_unlock_irq(&current->sighand->siglock);
942
943	current->state = TASK_INTERRUPTIBLE;
944	schedule();
945	set_restore_sigmask();
946	return -ERESTARTNOHAND;
947}
948#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
949
950asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
951{
952	struct timex txc;
953	int ret;
954
955	memset(&txc, 0, sizeof(struct timex));
956
957	if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
958			__get_user(txc.modes, &utp->modes) ||
959			__get_user(txc.offset, &utp->offset) ||
960			__get_user(txc.freq, &utp->freq) ||
961			__get_user(txc.maxerror, &utp->maxerror) ||
962			__get_user(txc.esterror, &utp->esterror) ||
963			__get_user(txc.status, &utp->status) ||
964			__get_user(txc.constant, &utp->constant) ||
965			__get_user(txc.precision, &utp->precision) ||
966			__get_user(txc.tolerance, &utp->tolerance) ||
967			__get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
968			__get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
969			__get_user(txc.tick, &utp->tick) ||
970			__get_user(txc.ppsfreq, &utp->ppsfreq) ||
971			__get_user(txc.jitter, &utp->jitter) ||
972			__get_user(txc.shift, &utp->shift) ||
973			__get_user(txc.stabil, &utp->stabil) ||
974			__get_user(txc.jitcnt, &utp->jitcnt) ||
975			__get_user(txc.calcnt, &utp->calcnt) ||
976			__get_user(txc.errcnt, &utp->errcnt) ||
977			__get_user(txc.stbcnt, &utp->stbcnt))
978		return -EFAULT;
979
980	ret = do_adjtimex(&txc);
981
982	if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
983			__put_user(txc.modes, &utp->modes) ||
984			__put_user(txc.offset, &utp->offset) ||
985			__put_user(txc.freq, &utp->freq) ||
986			__put_user(txc.maxerror, &utp->maxerror) ||
987			__put_user(txc.esterror, &utp->esterror) ||
988			__put_user(txc.status, &utp->status) ||
989			__put_user(txc.constant, &utp->constant) ||
990			__put_user(txc.precision, &utp->precision) ||
991			__put_user(txc.tolerance, &utp->tolerance) ||
992			__put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
993			__put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
994			__put_user(txc.tick, &utp->tick) ||
995			__put_user(txc.ppsfreq, &utp->ppsfreq) ||
996			__put_user(txc.jitter, &utp->jitter) ||
997			__put_user(txc.shift, &utp->shift) ||
998			__put_user(txc.stabil, &utp->stabil) ||
999			__put_user(txc.jitcnt, &utp->jitcnt) ||
1000			__put_user(txc.calcnt, &utp->calcnt) ||
1001			__put_user(txc.errcnt, &utp->errcnt) ||
1002			__put_user(txc.stbcnt, &utp->stbcnt) ||
1003			__put_user(txc.tai, &utp->tai))
1004		ret = -EFAULT;
1005
1006	return ret;
1007}
1008
1009#ifdef CONFIG_NUMA
1010asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
1011		compat_uptr_t __user *pages32,
1012		const int __user *nodes,
1013		int __user *status,
1014		int flags)
1015{
1016	const void __user * __user *pages;
1017	int i;
1018
1019	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1020	for (i = 0; i < nr_pages; i++) {
1021		compat_uptr_t p;
1022
1023		if (get_user(p, pages32 + i) ||
1024			put_user(compat_ptr(p), pages + i))
1025			return -EFAULT;
1026	}
1027	return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
1028}
1029
1030asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
1031			compat_ulong_t maxnode,
1032			const compat_ulong_t __user *old_nodes,
1033			const compat_ulong_t __user *new_nodes)
1034{
1035	unsigned long __user *old = NULL;
1036	unsigned long __user *new = NULL;
1037	nodemask_t tmp_mask;
1038	unsigned long nr_bits;
1039	unsigned long size;
1040
1041	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1042	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1043	if (old_nodes) {
1044		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1045			return -EFAULT;
1046		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1047		if (new_nodes)
1048			new = old + size / sizeof(unsigned long);
1049		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1050			return -EFAULT;
1051	}
1052	if (new_nodes) {
1053		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1054			return -EFAULT;
1055		if (new == NULL)
1056			new = compat_alloc_user_space(size);
1057		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1058			return -EFAULT;
1059	}
1060	return sys_migrate_pages(pid, nr_bits + 1, old, new);
1061}
1062#endif
1063
1064struct compat_sysinfo {
1065	s32 uptime;
1066	u32 loads[3];
1067	u32 totalram;
1068	u32 freeram;
1069	u32 sharedram;
1070	u32 bufferram;
1071	u32 totalswap;
1072	u32 freeswap;
1073	u16 procs;
1074	u16 pad;
1075	u32 totalhigh;
1076	u32 freehigh;
1077	u32 mem_unit;
1078	char _f[20-2*sizeof(u32)-sizeof(int)];
1079};
1080
1081asmlinkage long
1082compat_sys_sysinfo(struct compat_sysinfo __user *info)
1083{
1084	struct sysinfo s;
1085
1086	do_sysinfo(&s);
1087
1088	/* Check to see if any memory value is too large for 32-bit and scale
1089	 *  down if needed
1090	 */
1091	if ((s.totalram >> 32) || (s.totalswap >> 32)) {
1092		int bitcount = 0;
1093
1094		while (s.mem_unit < PAGE_SIZE) {
1095			s.mem_unit <<= 1;
1096			bitcount++;
1097		}
1098
1099		s.totalram >>= bitcount;
1100		s.freeram >>= bitcount;
1101		s.sharedram >>= bitcount;
1102		s.bufferram >>= bitcount;
1103		s.totalswap >>= bitcount;
1104		s.freeswap >>= bitcount;
1105		s.totalhigh >>= bitcount;
1106		s.freehigh >>= bitcount;
1107	}
1108
1109	if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
1110	    __put_user (s.uptime, &info->uptime) ||
1111	    __put_user (s.loads[0], &info->loads[0]) ||
1112	    __put_user (s.loads[1], &info->loads[1]) ||
1113	    __put_user (s.loads[2], &info->loads[2]) ||
1114	    __put_user (s.totalram, &info->totalram) ||
1115	    __put_user (s.freeram, &info->freeram) ||
1116	    __put_user (s.sharedram, &info->sharedram) ||
1117	    __put_user (s.bufferram, &info->bufferram) ||
1118	    __put_user (s.totalswap, &info->totalswap) ||
1119	    __put_user (s.freeswap, &info->freeswap) ||
1120	    __put_user (s.procs, &info->procs) ||
1121	    __put_user (s.totalhigh, &info->totalhigh) ||
1122	    __put_user (s.freehigh, &info->freehigh) ||
1123	    __put_user (s.mem_unit, &info->mem_unit))
1124		return -EFAULT;
1125
1126	return 0;
1127}
1128
1129/*
1130 * Allocate user-space memory for the duration of a single system call,
1131 * in order to marshall parameters inside a compat thunk.
1132 */
1133void __user *compat_alloc_user_space(unsigned long len)
1134{
1135	void __user *ptr;
1136
1137	/* If len would occupy more than half of the entire compat space... */
1138	if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
1139		return NULL;
1140
1141	ptr = arch_compat_alloc_user_space(len);
1142
1143	if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
1144		return NULL;
1145
1146	return ptr;
1147}
1148EXPORT_SYMBOL_GPL(compat_alloc_user_space);
1149