1/*
2 * linux/kernel/posix-timers.c
3 *
4 *
5 * 2002-10-15  Posix Clocks & timers
6 *                           by George Anzinger george@mvista.com
7 *
8 *			     Copyright (C) 2002 2003 by MontaVista Software.
9 *
10 * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 *			     Copyright (C) 2004 Boris Hu
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
28 */
29
30/* These are all the functions necessary to implement
31 * POSIX clocks & timers
32 */
33#include <linux/mm.h>
34#include <linux/interrupt.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#include <linux/list.h>
41#include <linux/init.h>
42#include <linux/compiler.h>
43#include <linux/idr.h>
44#include <linux/posix-timers.h>
45#include <linux/syscalls.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <linux/module.h>
49
50/*
51 * Management arrays for POSIX timers.	 Timers are kept in slab memory
52 * Timer ids are allocated by an external routine that keeps track of the
53 * id and the timer.  The external interface is:
54 *
55 * void *idr_find(struct idr *idp, int id);           to find timer_id <id>
56 * int idr_get_new(struct idr *idp, void *ptr);       to get a new id and
57 *                                                    related it to <ptr>
58 * void idr_remove(struct idr *idp, int id);          to release <id>
59 * void idr_init(struct idr *idp);                    to initialize <idp>
60 *                                                    which we supply.
61 * The idr_get_new *may* call slab for more memory so it must not be
62 * called under a spin lock.  Likewise idr_remore may release memory
63 * (but it may be ok to do this under a lock...).
64 * idr_find is just a memory look up and is quite fast.  A -1 return
65 * indicates that the requested id does not exist.
66 */
67
68/*
69 * Lets keep our timers in a slab cache :-)
70 */
71static struct kmem_cache *posix_timers_cache;
72static struct idr posix_timers_id;
73static DEFINE_SPINLOCK(idr_lock);
74
75/*
76 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
77 * SIGEV values.  Here we put out an error if this assumption fails.
78 */
79#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
80#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
81#endif
82
83
84/*
85 * The timer ID is turned into a timer address by idr_find().
86 * Verifying a valid ID consists of:
87 *
88 * a) checking that idr_find() returns other than -1.
89 * b) checking that the timer id matches the one in the timer itself.
90 * c) that the timer owner is in the callers thread group.
91 */
92
93/*
94 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
95 *	    to implement others.  This structure defines the various
96 *	    clocks and allows the possibility of adding others.	 We
97 *	    provide an interface to add clocks to the table and expect
98 *	    the "arch" code to add at least one clock that is high
99 *	    resolution.	 Here we define the standard CLOCK_REALTIME as a
100 *	    1/HZ resolution clock.
101 *
102 * RESOLUTION: Clock resolution is used to round up timer and interval
103 *	    times, NOT to report clock times, which are reported with as
104 *	    much resolution as the system can muster.  In some cases this
105 *	    resolution may depend on the underlying clock hardware and
106 *	    may not be quantifiable until run time, and only then is the
107 *	    necessary code is written.	The standard says we should say
108 *	    something about this issue in the documentation...
109 *
110 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
111 *	    various clock functions.  For clocks that use the standard
112 *	    system timer code these entries should be NULL.  This will
113 *	    allow dispatch without the overhead of indirect function
114 *	    calls.  CLOCKS that depend on other sources (e.g. WWV or GPS)
115 *	    must supply functions here, even if the function just returns
116 *	    ENOSYS.  The standard POSIX timer management code assumes the
117 *	    following: 1.) The k_itimer struct (sched.h) is used for the
118 *	    timer.  2.) The list, it_lock, it_clock, it_id and it_pid
119 *	    fields are not modified by timer code.
120 *
121 *          At this time all functions EXCEPT clock_nanosleep can be
122 *          redirected by the CLOCKS structure.  Clock_nanosleep is in
123 *          there, but the code ignores it.
124 *
125 * Permissions: It is assumed that the clock_settime() function defined
126 *	    for each clock will take care of permission checks.	 Some
127 *	    clocks may be set able by any user (i.e. local process
128 *	    clocks) others not.	 Currently the only set able clock we
129 *	    have is CLOCK_REALTIME and its high res counter part, both of
130 *	    which we beg off on and pass to do_sys_settimeofday().
131 */
132
133static struct k_clock posix_clocks[MAX_CLOCKS];
134
135/*
136 * These ones are defined below.
137 */
138static int common_nsleep(const clockid_t, int flags, struct timespec *t,
139			 struct timespec __user *rmtp);
140static void common_timer_get(struct k_itimer *, struct itimerspec *);
141static int common_timer_set(struct k_itimer *, int,
142			    struct itimerspec *, struct itimerspec *);
143static int common_timer_del(struct k_itimer *timer);
144
145static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
146
147static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
148
149static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
150{
151	spin_unlock_irqrestore(&timr->it_lock, flags);
152}
153
154/*
155 * Call the k_clock hook function if non-null, or the default function.
156 */
157#define CLOCK_DISPATCH(clock, call, arglist) \
158 	((clock) < 0 ? posix_cpu_##call arglist : \
159 	 (posix_clocks[clock].call != NULL \
160 	  ? (*posix_clocks[clock].call) arglist : common_##call arglist))
161
162/*
163 * Default clock hook functions when the struct k_clock passed
164 * to register_posix_clock leaves a function pointer null.
165 *
166 * The function common_CALL is the default implementation for
167 * the function pointer CALL in struct k_clock.
168 */
169
170static inline int common_clock_getres(const clockid_t which_clock,
171				      struct timespec *tp)
172{
173	tp->tv_sec = 0;
174	tp->tv_nsec = posix_clocks[which_clock].res;
175	return 0;
176}
177
178/*
179 * Get real time for posix timers
180 */
181static int common_clock_get(clockid_t which_clock, struct timespec *tp)
182{
183	ktime_get_real_ts(tp);
184	return 0;
185}
186
187static inline int common_clock_set(const clockid_t which_clock,
188				   struct timespec *tp)
189{
190	return do_sys_settimeofday(tp, NULL);
191}
192
193static int common_timer_create(struct k_itimer *new_timer)
194{
195	hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
196	return 0;
197}
198
199static int no_timer_create(struct k_itimer *new_timer)
200{
201	return -EOPNOTSUPP;
202}
203
204static int no_nsleep(const clockid_t which_clock, int flags,
205		     struct timespec *tsave, struct timespec __user *rmtp)
206{
207	return -EOPNOTSUPP;
208}
209
210/*
211 * Return nonzero if we know a priori this clockid_t value is bogus.
212 */
213static inline int invalid_clockid(const clockid_t which_clock)
214{
215	if (which_clock < 0)	/* CPU clock, posix_cpu_* will check it */
216		return 0;
217	if ((unsigned) which_clock >= MAX_CLOCKS)
218		return 1;
219	if (posix_clocks[which_clock].clock_getres != NULL)
220		return 0;
221	if (posix_clocks[which_clock].res != 0)
222		return 0;
223	return 1;
224}
225
226/*
227 * Get monotonic time for posix timers
228 */
229static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
230{
231	ktime_get_ts(tp);
232	return 0;
233}
234
235/*
236 * Get monotonic time for posix timers
237 */
238static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
239{
240	getrawmonotonic(tp);
241	return 0;
242}
243
244
245static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
246{
247	*tp = current_kernel_time();
248	return 0;
249}
250
251static int posix_get_monotonic_coarse(clockid_t which_clock,
252						struct timespec *tp)
253{
254	*tp = get_monotonic_coarse();
255	return 0;
256}
257
258static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
259{
260	*tp = ktime_to_timespec(KTIME_LOW_RES);
261	return 0;
262}
263/*
264 * Initialize everything, well, just everything in Posix clocks/timers ;)
265 */
266static __init int init_posix_timers(void)
267{
268	struct k_clock clock_realtime = {
269		.clock_getres = hrtimer_get_res,
270	};
271	struct k_clock clock_monotonic = {
272		.clock_getres = hrtimer_get_res,
273		.clock_get = posix_ktime_get_ts,
274		.clock_set = do_posix_clock_nosettime,
275	};
276	struct k_clock clock_monotonic_raw = {
277		.clock_getres = hrtimer_get_res,
278		.clock_get = posix_get_monotonic_raw,
279		.clock_set = do_posix_clock_nosettime,
280		.timer_create = no_timer_create,
281		.nsleep = no_nsleep,
282	};
283	struct k_clock clock_realtime_coarse = {
284		.clock_getres = posix_get_coarse_res,
285		.clock_get = posix_get_realtime_coarse,
286		.clock_set = do_posix_clock_nosettime,
287		.timer_create = no_timer_create,
288		.nsleep = no_nsleep,
289	};
290	struct k_clock clock_monotonic_coarse = {
291		.clock_getres = posix_get_coarse_res,
292		.clock_get = posix_get_monotonic_coarse,
293		.clock_set = do_posix_clock_nosettime,
294		.timer_create = no_timer_create,
295		.nsleep = no_nsleep,
296	};
297
298	register_posix_clock(CLOCK_REALTIME, &clock_realtime);
299	register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
300	register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
301	register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
302	register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
303
304	posix_timers_cache = kmem_cache_create("posix_timers_cache",
305					sizeof (struct k_itimer), 0, SLAB_PANIC,
306					NULL);
307	idr_init(&posix_timers_id);
308	return 0;
309}
310
311__initcall(init_posix_timers);
312
313static void schedule_next_timer(struct k_itimer *timr)
314{
315	struct hrtimer *timer = &timr->it.real.timer;
316
317	if (timr->it.real.interval.tv64 == 0)
318		return;
319
320	timr->it_overrun += (unsigned int) hrtimer_forward(timer,
321						timer->base->get_time(),
322						timr->it.real.interval);
323
324	timr->it_overrun_last = timr->it_overrun;
325	timr->it_overrun = -1;
326	++timr->it_requeue_pending;
327	hrtimer_restart(timer);
328}
329
330/*
331 * This function is exported for use by the signal deliver code.  It is
332 * called just prior to the info block being released and passes that
333 * block to us.  It's function is to update the overrun entry AND to
334 * restart the timer.  It should only be called if the timer is to be
335 * restarted (i.e. we have flagged this in the sys_private entry of the
336 * info block).
337 *
338 * To protect aginst the timer going away while the interrupt is queued,
339 * we require that the it_requeue_pending flag be set.
340 */
341void do_schedule_next_timer(struct siginfo *info)
342{
343	struct k_itimer *timr;
344	unsigned long flags;
345
346	timr = lock_timer(info->si_tid, &flags);
347
348	if (timr && timr->it_requeue_pending == info->si_sys_private) {
349		if (timr->it_clock < 0)
350			posix_cpu_timer_schedule(timr);
351		else
352			schedule_next_timer(timr);
353
354		info->si_overrun += timr->it_overrun_last;
355	}
356
357	if (timr)
358		unlock_timer(timr, flags);
359}
360
361int posix_timer_event(struct k_itimer *timr, int si_private)
362{
363	struct task_struct *task;
364	int shared, ret = -1;
365	timr->sigq->info.si_sys_private = si_private;
366
367	rcu_read_lock();
368	task = pid_task(timr->it_pid, PIDTYPE_PID);
369	if (task) {
370		shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
371		ret = send_sigqueue(timr->sigq, task, shared);
372	}
373	rcu_read_unlock();
374	/* If we failed to send the signal the timer stops. */
375	return ret > 0;
376}
377EXPORT_SYMBOL_GPL(posix_timer_event);
378
379/*
380 * This function gets called when a POSIX.1b interval timer expires.  It
381 * is used as a callback from the kernel internal timer.  The
382 * run_timer_list code ALWAYS calls with interrupts on.
383
384 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
385 */
386static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
387{
388	struct k_itimer *timr;
389	unsigned long flags;
390	int si_private = 0;
391	enum hrtimer_restart ret = HRTIMER_NORESTART;
392
393	timr = container_of(timer, struct k_itimer, it.real.timer);
394	spin_lock_irqsave(&timr->it_lock, flags);
395
396	if (timr->it.real.interval.tv64 != 0)
397		si_private = ++timr->it_requeue_pending;
398
399	if (posix_timer_event(timr, si_private)) {
400		/*
401		 * signal was not sent because of sig_ignor
402		 * we will not get a call back to restart it AND
403		 * it should be restarted.
404		 */
405		if (timr->it.real.interval.tv64 != 0) {
406			ktime_t now = hrtimer_cb_get_time(timer);
407
408#ifdef CONFIG_HIGH_RES_TIMERS
409			{
410				ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
411
412				if (timr->it.real.interval.tv64 < kj.tv64)
413					now = ktime_add(now, kj);
414			}
415#endif
416			timr->it_overrun += (unsigned int)
417				hrtimer_forward(timer, now,
418						timr->it.real.interval);
419			ret = HRTIMER_RESTART;
420			++timr->it_requeue_pending;
421		}
422	}
423
424	unlock_timer(timr, flags);
425	return ret;
426}
427
428static struct pid *good_sigevent(sigevent_t * event)
429{
430	struct task_struct *rtn = current->group_leader;
431
432	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
433		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
434		 !same_thread_group(rtn, current) ||
435		 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
436		return NULL;
437
438	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
439	    ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
440		return NULL;
441
442	return task_pid(rtn);
443}
444
445void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
446{
447	if ((unsigned) clock_id >= MAX_CLOCKS) {
448		printk("POSIX clock register failed for clock_id %d\n",
449		       clock_id);
450		return;
451	}
452
453	posix_clocks[clock_id] = *new_clock;
454}
455EXPORT_SYMBOL_GPL(register_posix_clock);
456
457static struct k_itimer * alloc_posix_timer(void)
458{
459	struct k_itimer *tmr;
460	tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
461	if (!tmr)
462		return tmr;
463	if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
464		kmem_cache_free(posix_timers_cache, tmr);
465		return NULL;
466	}
467	memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
468	return tmr;
469}
470
471#define IT_ID_SET	1
472#define IT_ID_NOT_SET	0
473static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
474{
475	if (it_id_set) {
476		unsigned long flags;
477		spin_lock_irqsave(&idr_lock, flags);
478		idr_remove(&posix_timers_id, tmr->it_id);
479		spin_unlock_irqrestore(&idr_lock, flags);
480	}
481	put_pid(tmr->it_pid);
482	sigqueue_free(tmr->sigq);
483	kmem_cache_free(posix_timers_cache, tmr);
484}
485
486/* Create a POSIX.1b interval timer. */
487
488SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
489		struct sigevent __user *, timer_event_spec,
490		timer_t __user *, created_timer_id)
491{
492	struct k_itimer *new_timer;
493	int error, new_timer_id;
494	sigevent_t event;
495	int it_id_set = IT_ID_NOT_SET;
496
497	if (invalid_clockid(which_clock))
498		return -EINVAL;
499
500	new_timer = alloc_posix_timer();
501	if (unlikely(!new_timer))
502		return -EAGAIN;
503
504	spin_lock_init(&new_timer->it_lock);
505 retry:
506	if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
507		error = -EAGAIN;
508		goto out;
509	}
510	spin_lock_irq(&idr_lock);
511	error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
512	spin_unlock_irq(&idr_lock);
513	if (error) {
514		if (error == -EAGAIN)
515			goto retry;
516		/*
517		 * Weird looking, but we return EAGAIN if the IDR is
518		 * full (proper POSIX return value for this)
519		 */
520		error = -EAGAIN;
521		goto out;
522	}
523
524	it_id_set = IT_ID_SET;
525	new_timer->it_id = (timer_t) new_timer_id;
526	new_timer->it_clock = which_clock;
527	new_timer->it_overrun = -1;
528
529	if (timer_event_spec) {
530		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
531			error = -EFAULT;
532			goto out;
533		}
534		rcu_read_lock();
535		new_timer->it_pid = get_pid(good_sigevent(&event));
536		rcu_read_unlock();
537		if (!new_timer->it_pid) {
538			error = -EINVAL;
539			goto out;
540		}
541	} else {
542		event.sigev_notify = SIGEV_SIGNAL;
543		event.sigev_signo = SIGALRM;
544		event.sigev_value.sival_int = new_timer->it_id;
545		new_timer->it_pid = get_pid(task_tgid(current));
546	}
547
548	new_timer->it_sigev_notify     = event.sigev_notify;
549	new_timer->sigq->info.si_signo = event.sigev_signo;
550	new_timer->sigq->info.si_value = event.sigev_value;
551	new_timer->sigq->info.si_tid   = new_timer->it_id;
552	new_timer->sigq->info.si_code  = SI_TIMER;
553
554	if (copy_to_user(created_timer_id,
555			 &new_timer_id, sizeof (new_timer_id))) {
556		error = -EFAULT;
557		goto out;
558	}
559
560	error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
561	if (error)
562		goto out;
563
564	spin_lock_irq(&current->sighand->siglock);
565	new_timer->it_signal = current->signal;
566	list_add(&new_timer->list, &current->signal->posix_timers);
567	spin_unlock_irq(&current->sighand->siglock);
568
569	return 0;
570 	/*
571	 * In the case of the timer belonging to another task, after
572	 * the task is unlocked, the timer is owned by the other task
573	 * and may cease to exist at any time.  Don't use or modify
574	 * new_timer after the unlock call.
575	 */
576out:
577	release_posix_timer(new_timer, it_id_set);
578	return error;
579}
580
581/*
582 * Locking issues: We need to protect the result of the id look up until
583 * we get the timer locked down so it is not deleted under us.  The
584 * removal is done under the idr spinlock so we use that here to bridge
585 * the find to the timer lock.  To avoid a dead lock, the timer id MUST
586 * be release with out holding the timer lock.
587 */
588static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
589{
590	struct k_itimer *timr;
591	/*
592	 * Watch out here.  We do a irqsave on the idr_lock and pass the
593	 * flags part over to the timer lock.  Must not let interrupts in
594	 * while we are moving the lock.
595	 */
596	spin_lock_irqsave(&idr_lock, *flags);
597	timr = idr_find(&posix_timers_id, (int)timer_id);
598	if (timr) {
599		spin_lock(&timr->it_lock);
600		if (timr->it_signal == current->signal) {
601			spin_unlock(&idr_lock);
602			return timr;
603		}
604		spin_unlock(&timr->it_lock);
605	}
606	spin_unlock_irqrestore(&idr_lock, *flags);
607
608	return NULL;
609}
610
611/*
612 * Get the time remaining on a POSIX.1b interval timer.  This function
613 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
614 * mess with irq.
615 *
616 * We have a couple of messes to clean up here.  First there is the case
617 * of a timer that has a requeue pending.  These timers should appear to
618 * be in the timer list with an expiry as if we were to requeue them
619 * now.
620 *
621 * The second issue is the SIGEV_NONE timer which may be active but is
622 * not really ever put in the timer list (to save system resources).
623 * This timer may be expired, and if so, we will do it here.  Otherwise
624 * it is the same as a requeue pending timer WRT to what we should
625 * report.
626 */
627static void
628common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
629{
630	ktime_t now, remaining, iv;
631	struct hrtimer *timer = &timr->it.real.timer;
632
633	memset(cur_setting, 0, sizeof(struct itimerspec));
634
635	iv = timr->it.real.interval;
636
637	/* interval timer ? */
638	if (iv.tv64)
639		cur_setting->it_interval = ktime_to_timespec(iv);
640	else if (!hrtimer_active(timer) &&
641		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
642		return;
643
644	now = timer->base->get_time();
645
646	/*
647	 * When a requeue is pending or this is a SIGEV_NONE
648	 * timer move the expiry time forward by intervals, so
649	 * expiry is > now.
650	 */
651	if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
652	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
653		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
654
655	remaining = ktime_sub(hrtimer_get_expires(timer), now);
656	/* Return 0 only, when the timer is expired and not pending */
657	if (remaining.tv64 <= 0) {
658		/*
659		 * A single shot SIGEV_NONE timer must return 0, when
660		 * it is expired !
661		 */
662		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
663			cur_setting->it_value.tv_nsec = 1;
664	} else
665		cur_setting->it_value = ktime_to_timespec(remaining);
666}
667
668/* Get the time remaining on a POSIX.1b interval timer. */
669SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
670		struct itimerspec __user *, setting)
671{
672	struct k_itimer *timr;
673	struct itimerspec cur_setting;
674	unsigned long flags;
675
676	timr = lock_timer(timer_id, &flags);
677	if (!timr)
678		return -EINVAL;
679
680	CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
681
682	unlock_timer(timr, flags);
683
684	if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
685		return -EFAULT;
686
687	return 0;
688}
689
690/*
691 * Get the number of overruns of a POSIX.1b interval timer.  This is to
692 * be the overrun of the timer last delivered.  At the same time we are
693 * accumulating overruns on the next timer.  The overrun is frozen when
694 * the signal is delivered, either at the notify time (if the info block
695 * is not queued) or at the actual delivery time (as we are informed by
696 * the call back to do_schedule_next_timer().  So all we need to do is
697 * to pick up the frozen overrun.
698 */
699SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
700{
701	struct k_itimer *timr;
702	int overrun;
703	unsigned long flags;
704
705	timr = lock_timer(timer_id, &flags);
706	if (!timr)
707		return -EINVAL;
708
709	overrun = timr->it_overrun_last;
710	unlock_timer(timr, flags);
711
712	return overrun;
713}
714
715/* Set a POSIX.1b interval timer. */
716/* timr->it_lock is taken. */
717static int
718common_timer_set(struct k_itimer *timr, int flags,
719		 struct itimerspec *new_setting, struct itimerspec *old_setting)
720{
721	struct hrtimer *timer = &timr->it.real.timer;
722	enum hrtimer_mode mode;
723
724	if (old_setting)
725		common_timer_get(timr, old_setting);
726
727	/* disable the timer */
728	timr->it.real.interval.tv64 = 0;
729	/*
730	 * careful here.  If smp we could be in the "fire" routine which will
731	 * be spinning as we hold the lock.  But this is ONLY an SMP issue.
732	 */
733	if (hrtimer_try_to_cancel(timer) < 0)
734		return TIMER_RETRY;
735
736	timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
737		~REQUEUE_PENDING;
738	timr->it_overrun_last = 0;
739
740	/* switch off the timer when it_value is zero */
741	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
742		return 0;
743
744	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
745	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
746	timr->it.real.timer.function = posix_timer_fn;
747
748	hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
749
750	/* Convert interval */
751	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
752
753	/* SIGEV_NONE timers are not queued ! See common_timer_get */
754	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
755		/* Setup correct expiry time for relative timers */
756		if (mode == HRTIMER_MODE_REL) {
757			hrtimer_add_expires(timer, timer->base->get_time());
758		}
759		return 0;
760	}
761
762	hrtimer_start_expires(timer, mode);
763	return 0;
764}
765
766/* Set a POSIX.1b interval timer */
767SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
768		const struct itimerspec __user *, new_setting,
769		struct itimerspec __user *, old_setting)
770{
771	struct k_itimer *timr;
772	struct itimerspec new_spec, old_spec;
773	int error = 0;
774	unsigned long flag;
775	struct itimerspec *rtn = old_setting ? &old_spec : NULL;
776
777	if (!new_setting)
778		return -EINVAL;
779
780	if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
781		return -EFAULT;
782
783	if (!timespec_valid(&new_spec.it_interval) ||
784	    !timespec_valid(&new_spec.it_value))
785		return -EINVAL;
786retry:
787	timr = lock_timer(timer_id, &flag);
788	if (!timr)
789		return -EINVAL;
790
791	error = CLOCK_DISPATCH(timr->it_clock, timer_set,
792			       (timr, flags, &new_spec, rtn));
793
794	unlock_timer(timr, flag);
795	if (error == TIMER_RETRY) {
796		rtn = NULL;	// We already got the old time...
797		goto retry;
798	}
799
800	if (old_setting && !error &&
801	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
802		error = -EFAULT;
803
804	return error;
805}
806
807static inline int common_timer_del(struct k_itimer *timer)
808{
809	timer->it.real.interval.tv64 = 0;
810
811	if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
812		return TIMER_RETRY;
813	return 0;
814}
815
816static inline int timer_delete_hook(struct k_itimer *timer)
817{
818	return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
819}
820
821/* Delete a POSIX.1b interval timer. */
822SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
823{
824	struct k_itimer *timer;
825	unsigned long flags;
826
827retry_delete:
828	timer = lock_timer(timer_id, &flags);
829	if (!timer)
830		return -EINVAL;
831
832	if (timer_delete_hook(timer) == TIMER_RETRY) {
833		unlock_timer(timer, flags);
834		goto retry_delete;
835	}
836
837	spin_lock(&current->sighand->siglock);
838	list_del(&timer->list);
839	spin_unlock(&current->sighand->siglock);
840	/*
841	 * This keeps any tasks waiting on the spin lock from thinking
842	 * they got something (see the lock code above).
843	 */
844	timer->it_signal = NULL;
845
846	unlock_timer(timer, flags);
847	release_posix_timer(timer, IT_ID_SET);
848	return 0;
849}
850
851/*
852 * return timer owned by the process, used by exit_itimers
853 */
854static void itimer_delete(struct k_itimer *timer)
855{
856	unsigned long flags;
857
858retry_delete:
859	spin_lock_irqsave(&timer->it_lock, flags);
860
861	if (timer_delete_hook(timer) == TIMER_RETRY) {
862		unlock_timer(timer, flags);
863		goto retry_delete;
864	}
865	list_del(&timer->list);
866	/*
867	 * This keeps any tasks waiting on the spin lock from thinking
868	 * they got something (see the lock code above).
869	 */
870	timer->it_signal = NULL;
871
872	unlock_timer(timer, flags);
873	release_posix_timer(timer, IT_ID_SET);
874}
875
876/*
877 * This is called by do_exit or de_thread, only when there are no more
878 * references to the shared signal_struct.
879 */
880void exit_itimers(struct signal_struct *sig)
881{
882	struct k_itimer *tmr;
883
884	while (!list_empty(&sig->posix_timers)) {
885		tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
886		itimer_delete(tmr);
887	}
888}
889
890/* Not available / possible... functions */
891int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
892{
893	return -EINVAL;
894}
895EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
896
897int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
898			       struct timespec *t, struct timespec __user *r)
899{
900#ifndef ENOTSUP
901	return -EOPNOTSUPP;	/* aka ENOTSUP in userland for POSIX */
902#else  /*  parisc does define it separately.  */
903	return -ENOTSUP;
904#endif
905}
906EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
907
908SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
909		const struct timespec __user *, tp)
910{
911	struct timespec new_tp;
912
913	if (invalid_clockid(which_clock))
914		return -EINVAL;
915	if (copy_from_user(&new_tp, tp, sizeof (*tp)))
916		return -EFAULT;
917
918	return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
919}
920
921SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
922		struct timespec __user *,tp)
923{
924	struct timespec kernel_tp;
925	int error;
926
927	if (invalid_clockid(which_clock))
928		return -EINVAL;
929	error = CLOCK_DISPATCH(which_clock, clock_get,
930			       (which_clock, &kernel_tp));
931	if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
932		error = -EFAULT;
933
934	return error;
935
936}
937
938SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
939		struct timespec __user *, tp)
940{
941	struct timespec rtn_tp;
942	int error;
943
944	if (invalid_clockid(which_clock))
945		return -EINVAL;
946
947	error = CLOCK_DISPATCH(which_clock, clock_getres,
948			       (which_clock, &rtn_tp));
949
950	if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
951		error = -EFAULT;
952	}
953
954	return error;
955}
956
957/*
958 * nanosleep for monotonic and realtime clocks
959 */
960static int common_nsleep(const clockid_t which_clock, int flags,
961			 struct timespec *tsave, struct timespec __user *rmtp)
962{
963	return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
964				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
965				 which_clock);
966}
967
968SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
969		const struct timespec __user *, rqtp,
970		struct timespec __user *, rmtp)
971{
972	struct timespec t;
973
974	if (invalid_clockid(which_clock))
975		return -EINVAL;
976
977	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
978		return -EFAULT;
979
980	if (!timespec_valid(&t))
981		return -EINVAL;
982
983	return CLOCK_DISPATCH(which_clock, nsleep,
984			      (which_clock, flags, &t, rmtp));
985}
986
987/*
988 * nanosleep_restart for monotonic and realtime clocks
989 */
990static int common_nsleep_restart(struct restart_block *restart_block)
991{
992	return hrtimer_nanosleep_restart(restart_block);
993}
994
995/*
996 * This will restart clock_nanosleep. This is required only by
997 * compat_clock_nanosleep_restart for now.
998 */
999long
1000clock_nanosleep_restart(struct restart_block *restart_block)
1001{
1002	clockid_t which_clock = restart_block->arg0;
1003
1004	return CLOCK_DISPATCH(which_clock, nsleep_restart,
1005			      (restart_block));
1006}
1007