1/*
2 *  fs/eventpoll.c (Efficent event polling implementation)
3 *  Copyright (C) 2001,...,2007	 Davide Libenzi
4 *
5 *  This program is free software; you can redistribute it and/or modify
6 *  it under the terms of the GNU General Public License as published by
7 *  the Free Software Foundation; either version 2 of the License, or
8 *  (at your option) any later version.
9 *
10 *  Davide Libenzi <davidel@xmailserver.org>
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/signal.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/poll.h>
24#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/hash.h>
27#include <linux/spinlock.h>
28#include <linux/syscalls.h>
29#include <linux/rbtree.h>
30#include <linux/wait.h>
31#include <linux/eventpoll.h>
32#include <linux/mount.h>
33#include <linux/bitops.h>
34#include <linux/mutex.h>
35#include <linux/anon_inodes.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38#include <asm/io.h>
39#include <asm/mman.h>
40#include <asm/atomic.h>
41
42/*
43 * LOCKING:
44 * There are three level of locking required by epoll :
45 *
46 * 1) epmutex (mutex)
47 * 2) ep->mtx (mutex)
48 * 3) ep->lock (spinlock)
49 *
50 * The acquire order is the one listed above, from 1 to 3.
51 * We need a spinlock (ep->lock) because we manipulate objects
52 * from inside the poll callback, that might be triggered from
53 * a wake_up() that in turn might be called from IRQ context.
54 * So we can't sleep inside the poll callback and hence we need
55 * a spinlock. During the event transfer loop (from kernel to
56 * user space) we could end up sleeping due a copy_to_user(), so
57 * we need a lock that will allow us to sleep. This lock is a
58 * mutex (ep->mtx). It is acquired during the event transfer loop,
59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
60 * Then we also need a global mutex to serialize eventpoll_release_file()
61 * and ep_free().
62 * This mutex is acquired by ep_free() during the epoll file
63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
66 * It is possible to drop the "ep->mtx" and to use the global
67 * mutex "epmutex" (together with "ep->lock") to have it working,
68 * but having "ep->mtx" will make the interface more scalable.
69 * Events that require holding "epmutex" are very rare, while for
70 * normal operations the epoll private "ep->mtx" will guarantee
71 * a better scalability.
72 */
73
74#define DEBUG_EPOLL 0
75
76#if DEBUG_EPOLL > 0
77#define DPRINTK(x) printk x
78#define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
79#else /* #if DEBUG_EPOLL > 0 */
80#define DPRINTK(x) (void) 0
81#define DNPRINTK(n, x) (void) 0
82#endif /* #if DEBUG_EPOLL > 0 */
83
84#define DEBUG_EPI 0
85
86#if DEBUG_EPI != 0
87#define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
88#else /* #if DEBUG_EPI != 0 */
89#define EPI_SLAB_DEBUG 0
90#endif /* #if DEBUG_EPI != 0 */
91
92/* Epoll private bits inside the event mask */
93#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
94
95/* Maximum number of poll wake up nests we are allowing */
96#define EP_MAX_POLLWAKE_NESTS 4
97
98/* Maximum msec timeout value storeable in a long int */
99#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
100
101#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
102
103#define EP_UNACTIVE_PTR ((void *) -1L)
104
105struct epoll_filefd {
106	struct file *file;
107	int fd;
108};
109
110/*
111 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
112 * It is used to keep track on all tasks that are currently inside the wake_up() code
113 * to 1) short-circuit the one coming from the same task and same wait queue head
114 * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting
115 * 3) let go the ones coming from other tasks.
116 */
117struct wake_task_node {
118	struct list_head llink;
119	struct task_struct *task;
120	wait_queue_head_t *wq;
121};
122
123/*
124 * This is used to implement the safe poll wake up avoiding to reenter
125 * the poll callback from inside wake_up().
126 */
127struct poll_safewake {
128	struct list_head wake_task_list;
129	spinlock_t lock;
130};
131
132/*
133 * Each file descriptor added to the eventpoll interface will
134 * have an entry of this type linked to the "rbr" RB tree.
135 */
136struct epitem {
137	/* RB tree node used to link this structure to the eventpoll RB tree */
138	struct rb_node rbn;
139
140	/* List header used to link this structure to the eventpoll ready list */
141	struct list_head rdllink;
142
143	/*
144	 * Works together "struct eventpoll"->ovflist in keeping the
145	 * single linked chain of items.
146	 */
147	struct epitem *next;
148
149	/* The file descriptor information this item refers to */
150	struct epoll_filefd ffd;
151
152	/* Number of active wait queue attached to poll operations */
153	int nwait;
154
155	/* List containing poll wait queues */
156	struct list_head pwqlist;
157
158	/* The "container" of this item */
159	struct eventpoll *ep;
160
161	/* List header used to link this item to the "struct file" items list */
162	struct list_head fllink;
163
164	/* The structure that describe the interested events and the source fd */
165	struct epoll_event event;
166};
167
168/*
169 * This structure is stored inside the "private_data" member of the file
170 * structure and rapresent the main data sructure for the eventpoll
171 * interface.
172 */
173struct eventpoll {
174	/* Protect the this structure access */
175	spinlock_t lock;
176
177	/*
178	 * This mutex is used to ensure that files are not removed
179	 * while epoll is using them. This is held during the event
180	 * collection loop, the file cleanup path, the epoll file exit
181	 * code and the ctl operations.
182	 */
183	struct mutex mtx;
184
185	/* Wait queue used by sys_epoll_wait() */
186	wait_queue_head_t wq;
187
188	/* Wait queue used by file->poll() */
189	wait_queue_head_t poll_wait;
190
191	/* List of ready file descriptors */
192	struct list_head rdllist;
193
194	/* RB tree root used to store monitored fd structs */
195	struct rb_root rbr;
196
197	/*
198	 * This is a single linked list that chains all the "struct epitem" that
199	 * happened while transfering ready events to userspace w/out
200	 * holding ->lock.
201	 */
202	struct epitem *ovflist;
203};
204
205/* Wait structure used by the poll hooks */
206struct eppoll_entry {
207	/* List header used to link this structure to the "struct epitem" */
208	struct list_head llink;
209
210	/* The "base" pointer is set to the container "struct epitem" */
211	void *base;
212
213	/*
214	 * Wait queue item that will be linked to the target file wait
215	 * queue head.
216	 */
217	wait_queue_t wait;
218
219	/* The wait queue head that linked the "wait" wait queue item */
220	wait_queue_head_t *whead;
221};
222
223/* Wrapper struct used by poll queueing */
224struct ep_pqueue {
225	poll_table pt;
226	struct epitem *epi;
227};
228
229/*
230 * This mutex is used to serialize ep_free() and eventpoll_release_file().
231 */
232static struct mutex epmutex;
233
234/* Safe wake up implementation */
235static struct poll_safewake psw;
236
237/* Slab cache used to allocate "struct epitem" */
238static struct kmem_cache *epi_cache __read_mostly;
239
240/* Slab cache used to allocate "struct eppoll_entry" */
241static struct kmem_cache *pwq_cache __read_mostly;
242
243
244/* Setup the structure that is used as key for the RB tree */
245static inline void ep_set_ffd(struct epoll_filefd *ffd,
246			      struct file *file, int fd)
247{
248	ffd->file = file;
249	ffd->fd = fd;
250}
251
252/* Compare RB tree keys */
253static inline int ep_cmp_ffd(struct epoll_filefd *p1,
254			     struct epoll_filefd *p2)
255{
256	return (p1->file > p2->file ? +1:
257	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
258}
259
260/* Special initialization for the RB tree node to detect linkage */
261static inline void ep_rb_initnode(struct rb_node *n)
262{
263	rb_set_parent(n, n);
264}
265
266/* Removes a node from the RB tree and marks it for a fast is-linked check */
267static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
268{
269	rb_erase(n, r);
270	rb_set_parent(n, n);
271}
272
273/* Fast check to verify that the item is linked to the main RB tree */
274static inline int ep_rb_linked(struct rb_node *n)
275{
276	return rb_parent(n) != n;
277}
278
279/* Tells us if the item is currently linked */
280static inline int ep_is_linked(struct list_head *p)
281{
282	return !list_empty(p);
283}
284
285/* Get the "struct epitem" from a wait queue pointer */
286static inline struct epitem * ep_item_from_wait(wait_queue_t *p)
287{
288	return container_of(p, struct eppoll_entry, wait)->base;
289}
290
291/* Get the "struct epitem" from an epoll queue wrapper */
292static inline struct epitem * ep_item_from_epqueue(poll_table *p)
293{
294	return container_of(p, struct ep_pqueue, pt)->epi;
295}
296
297/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
298static inline int ep_op_has_event(int op)
299{
300	return op != EPOLL_CTL_DEL;
301}
302
303/* Initialize the poll safe wake up structure */
304static void ep_poll_safewake_init(struct poll_safewake *psw)
305{
306
307	INIT_LIST_HEAD(&psw->wake_task_list);
308	spin_lock_init(&psw->lock);
309}
310
311/*
312 * Perform a safe wake up of the poll wait list. The problem is that
313 * with the new callback'd wake up system, it is possible that the
314 * poll callback is reentered from inside the call to wake_up() done
315 * on the poll wait queue head. The rule is that we cannot reenter the
316 * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
317 * and we cannot reenter the same wait queue head at all. This will
318 * enable to have a hierarchy of epoll file descriptor of no more than
319 * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
320 * because this one gets called by the poll callback, that in turn is called
321 * from inside a wake_up(), that might be called from irq context.
322 */
323static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
324{
325	int wake_nests = 0;
326	unsigned long flags;
327	struct task_struct *this_task = current;
328	struct list_head *lsthead = &psw->wake_task_list, *lnk;
329	struct wake_task_node *tncur;
330	struct wake_task_node tnode;
331
332	spin_lock_irqsave(&psw->lock, flags);
333
334	/* Try to see if the current task is already inside this wakeup call */
335	list_for_each(lnk, lsthead) {
336		tncur = list_entry(lnk, struct wake_task_node, llink);
337
338		if (tncur->wq == wq ||
339		    (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
340			/*
341			 * Ops ... loop detected or maximum nest level reached.
342			 * We abort this wake by breaking the cycle itself.
343			 */
344			spin_unlock_irqrestore(&psw->lock, flags);
345			return;
346		}
347	}
348
349	/* Add the current task to the list */
350	tnode.task = this_task;
351	tnode.wq = wq;
352	list_add(&tnode.llink, lsthead);
353
354	spin_unlock_irqrestore(&psw->lock, flags);
355
356	/* Do really wake up now */
357	wake_up(wq);
358
359	/* Remove the current task from the list */
360	spin_lock_irqsave(&psw->lock, flags);
361	list_del(&tnode.llink);
362	spin_unlock_irqrestore(&psw->lock, flags);
363}
364
365/*
366 * This function unregister poll callbacks from the associated file descriptor.
367 * Since this must be called without holding "ep->lock" the atomic exchange trick
368 * will protect us from multiple unregister.
369 */
370static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
371{
372	int nwait;
373	struct list_head *lsthead = &epi->pwqlist;
374	struct eppoll_entry *pwq;
375
376	/* This is called without locks, so we need the atomic exchange */
377	nwait = xchg(&epi->nwait, 0);
378
379	if (nwait) {
380		while (!list_empty(lsthead)) {
381			pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
382
383			list_del_init(&pwq->llink);
384			remove_wait_queue(pwq->whead, &pwq->wait);
385			kmem_cache_free(pwq_cache, pwq);
386		}
387	}
388}
389
390/*
391 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
392 * all the associated resources. Must be called with "mtx" held.
393 */
394static int ep_remove(struct eventpoll *ep, struct epitem *epi)
395{
396	unsigned long flags;
397	struct file *file = epi->ffd.file;
398
399	/*
400	 * Removes poll wait queue hooks. We _have_ to do this without holding
401	 * the "ep->lock" otherwise a deadlock might occur. This because of the
402	 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
403	 * queue head lock when unregistering the wait queue. The wakeup callback
404	 * will run by holding the wait queue head lock and will call our callback
405	 * that will try to get "ep->lock".
406	 */
407	ep_unregister_pollwait(ep, epi);
408
409	/* Remove the current item from the list of epoll hooks */
410	spin_lock(&file->f_ep_lock);
411	if (ep_is_linked(&epi->fllink))
412		list_del_init(&epi->fllink);
413	spin_unlock(&file->f_ep_lock);
414
415	if (ep_rb_linked(&epi->rbn))
416		ep_rb_erase(&epi->rbn, &ep->rbr);
417
418	spin_lock_irqsave(&ep->lock, flags);
419	if (ep_is_linked(&epi->rdllink))
420		list_del_init(&epi->rdllink);
421	spin_unlock_irqrestore(&ep->lock, flags);
422
423	/* At this point it is safe to free the eventpoll item */
424	kmem_cache_free(epi_cache, epi);
425
426	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
427		     current, ep, file));
428
429	return 0;
430}
431
432static void ep_free(struct eventpoll *ep)
433{
434	struct rb_node *rbp;
435	struct epitem *epi;
436
437	/* We need to release all tasks waiting for these file */
438	if (waitqueue_active(&ep->poll_wait))
439		ep_poll_safewake(&psw, &ep->poll_wait);
440
441	/*
442	 * We need to lock this because we could be hit by
443	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
444	 * We do not need to hold "ep->mtx" here because the epoll file
445	 * is on the way to be removed and no one has references to it
446	 * anymore. The only hit might come from eventpoll_release_file() but
447	 * holding "epmutex" is sufficent here.
448	 */
449	mutex_lock(&epmutex);
450
451	/*
452	 * Walks through the whole tree by unregistering poll callbacks.
453	 */
454	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
455		epi = rb_entry(rbp, struct epitem, rbn);
456
457		ep_unregister_pollwait(ep, epi);
458	}
459
460	/*
461	 * Walks through the whole tree by freeing each "struct epitem". At this
462	 * point we are sure no poll callbacks will be lingering around, and also by
463	 * holding "epmutex" we can be sure that no file cleanup code will hit
464	 * us during this operation. So we can avoid the lock on "ep->lock".
465	 */
466	while ((rbp = rb_first(&ep->rbr)) != 0) {
467		epi = rb_entry(rbp, struct epitem, rbn);
468		ep_remove(ep, epi);
469	}
470
471	mutex_unlock(&epmutex);
472	mutex_destroy(&ep->mtx);
473	kfree(ep);
474}
475
476static int ep_eventpoll_release(struct inode *inode, struct file *file)
477{
478	struct eventpoll *ep = file->private_data;
479
480	if (ep)
481		ep_free(ep);
482
483	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
484	return 0;
485}
486
487static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
488{
489	unsigned int pollflags = 0;
490	unsigned long flags;
491	struct eventpoll *ep = file->private_data;
492
493	/* Insert inside our poll wait queue */
494	poll_wait(file, &ep->poll_wait, wait);
495
496	/* Check our condition */
497	spin_lock_irqsave(&ep->lock, flags);
498	if (!list_empty(&ep->rdllist))
499		pollflags = POLLIN | POLLRDNORM;
500	spin_unlock_irqrestore(&ep->lock, flags);
501
502	return pollflags;
503}
504
505/* File callbacks that implement the eventpoll file behaviour */
506static const struct file_operations eventpoll_fops = {
507	.release	= ep_eventpoll_release,
508	.poll		= ep_eventpoll_poll
509};
510
511/* Fast test to see if the file is an evenpoll file */
512static inline int is_file_epoll(struct file *f)
513{
514	return f->f_op == &eventpoll_fops;
515}
516
517/*
518 * This is called from eventpoll_release() to unlink files from the eventpoll
519 * interface. We need to have this facility to cleanup correctly files that are
520 * closed without being removed from the eventpoll interface.
521 */
522void eventpoll_release_file(struct file *file)
523{
524	struct list_head *lsthead = &file->f_ep_links;
525	struct eventpoll *ep;
526	struct epitem *epi;
527
528	/*
529	 * We don't want to get "file->f_ep_lock" because it is not
530	 * necessary. It is not necessary because we're in the "struct file"
531	 * cleanup path, and this means that noone is using this file anymore.
532	 * So, for example, epoll_ctl() cannot hit here sicne if we reach this
533	 * point, the file counter already went to zero and fget() would fail.
534	 * The only hit might come from ep_free() but by holding the mutex
535	 * will correctly serialize the operation. We do need to acquire
536	 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
537	 * from anywhere but ep_free().
538	 */
539	mutex_lock(&epmutex);
540
541	while (!list_empty(lsthead)) {
542		epi = list_first_entry(lsthead, struct epitem, fllink);
543
544		ep = epi->ep;
545		list_del_init(&epi->fllink);
546		mutex_lock(&ep->mtx);
547		ep_remove(ep, epi);
548		mutex_unlock(&ep->mtx);
549	}
550
551	mutex_unlock(&epmutex);
552}
553
554static int ep_alloc(struct eventpoll **pep)
555{
556	struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
557
558	if (!ep)
559		return -ENOMEM;
560
561	spin_lock_init(&ep->lock);
562	mutex_init(&ep->mtx);
563	init_waitqueue_head(&ep->wq);
564	init_waitqueue_head(&ep->poll_wait);
565	INIT_LIST_HEAD(&ep->rdllist);
566	ep->rbr = RB_ROOT;
567	ep->ovflist = EP_UNACTIVE_PTR;
568
569	*pep = ep;
570
571	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n",
572		     current, ep));
573	return 0;
574}
575
576/*
577 * Search the file inside the eventpoll tree. The RB tree operations
578 * are protected by the "mtx" mutex, and ep_find() must be called with
579 * "mtx" held.
580 */
581static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
582{
583	int kcmp;
584	struct rb_node *rbp;
585	struct epitem *epi, *epir = NULL;
586	struct epoll_filefd ffd;
587
588	ep_set_ffd(&ffd, file, fd);
589	for (rbp = ep->rbr.rb_node; rbp; ) {
590		epi = rb_entry(rbp, struct epitem, rbn);
591		kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
592		if (kcmp > 0)
593			rbp = rbp->rb_right;
594		else if (kcmp < 0)
595			rbp = rbp->rb_left;
596		else {
597			epir = epi;
598			break;
599		}
600	}
601
602	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
603		     current, file, epir));
604
605	return epir;
606}
607
608/*
609 * This is the callback that is passed to the wait queue wakeup
610 * machanism. It is called by the stored file descriptors when they
611 * have events to report.
612 */
613static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
614{
615	int pwake = 0;
616	unsigned long flags;
617	struct epitem *epi = ep_item_from_wait(wait);
618	struct eventpoll *ep = epi->ep;
619
620	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
621		     current, epi->ffd.file, epi, ep));
622
623	spin_lock_irqsave(&ep->lock, flags);
624
625	/*
626	 * If the event mask does not contain any poll(2) event, we consider the
627	 * descriptor to be disabled. This condition is likely the effect of the
628	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
629	 * until the next EPOLL_CTL_MOD will be issued.
630	 */
631	if (!(epi->event.events & ~EP_PRIVATE_BITS))
632		goto out_unlock;
633
634	/*
635	 * If we are trasfering events to userspace, we can hold no locks
636	 * (because we're accessing user memory, and because of linux f_op->poll()
637	 * semantics). All the events that happens during that period of time are
638	 * chained in ep->ovflist and requeued later on.
639	 */
640	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
641		if (epi->next == EP_UNACTIVE_PTR) {
642			epi->next = ep->ovflist;
643			ep->ovflist = epi;
644		}
645		goto out_unlock;
646	}
647
648	/* If this file is already in the ready list we exit soon */
649	if (ep_is_linked(&epi->rdllink))
650		goto is_linked;
651
652	list_add_tail(&epi->rdllink, &ep->rdllist);
653
654is_linked:
655	/*
656	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
657	 * wait list.
658	 */
659	if (waitqueue_active(&ep->wq))
660		__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
661				 TASK_INTERRUPTIBLE);
662	if (waitqueue_active(&ep->poll_wait))
663		pwake++;
664
665out_unlock:
666	spin_unlock_irqrestore(&ep->lock, flags);
667
668	/* We have to call this outside the lock */
669	if (pwake)
670		ep_poll_safewake(&psw, &ep->poll_wait);
671
672	return 1;
673}
674
675/*
676 * This is the callback that is used to add our wait queue to the
677 * target file wakeup lists.
678 */
679static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
680				 poll_table *pt)
681{
682	struct epitem *epi = ep_item_from_epqueue(pt);
683	struct eppoll_entry *pwq;
684
685	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
686		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
687		pwq->whead = whead;
688		pwq->base = epi;
689		add_wait_queue(whead, &pwq->wait);
690		list_add_tail(&pwq->llink, &epi->pwqlist);
691		epi->nwait++;
692	} else {
693		/* We have to signal that an error occurred */
694		epi->nwait = -1;
695	}
696}
697
698static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
699{
700	int kcmp;
701	struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
702	struct epitem *epic;
703
704	while (*p) {
705		parent = *p;
706		epic = rb_entry(parent, struct epitem, rbn);
707		kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
708		if (kcmp > 0)
709			p = &parent->rb_right;
710		else
711			p = &parent->rb_left;
712	}
713	rb_link_node(&epi->rbn, parent, p);
714	rb_insert_color(&epi->rbn, &ep->rbr);
715}
716
717/*
718 * Must be called with "mtx" held.
719 */
720static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
721		     struct file *tfile, int fd)
722{
723	int error, revents, pwake = 0;
724	unsigned long flags;
725	struct epitem *epi;
726	struct ep_pqueue epq;
727
728	error = -ENOMEM;
729	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
730		goto error_return;
731
732	/* Item initialization follow here ... */
733	ep_rb_initnode(&epi->rbn);
734	INIT_LIST_HEAD(&epi->rdllink);
735	INIT_LIST_HEAD(&epi->fllink);
736	INIT_LIST_HEAD(&epi->pwqlist);
737	epi->ep = ep;
738	ep_set_ffd(&epi->ffd, tfile, fd);
739	epi->event = *event;
740	epi->nwait = 0;
741	epi->next = EP_UNACTIVE_PTR;
742
743	/* Initialize the poll table using the queue callback */
744	epq.epi = epi;
745	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
746
747	/*
748	 * Attach the item to the poll hooks and get current event bits.
749	 * We can safely use the file* here because its usage count has
750	 * been increased by the caller of this function. Note that after
751	 * this operation completes, the poll callback can start hitting
752	 * the new item.
753	 */
754	revents = tfile->f_op->poll(tfile, &epq.pt);
755
756	/*
757	 * We have to check if something went wrong during the poll wait queue
758	 * install process. Namely an allocation for a wait queue failed due
759	 * high memory pressure.
760	 */
761	if (epi->nwait < 0)
762		goto error_unregister;
763
764	/* Add the current item to the list of active epoll hook for this file */
765	spin_lock(&tfile->f_ep_lock);
766	list_add_tail(&epi->fllink, &tfile->f_ep_links);
767	spin_unlock(&tfile->f_ep_lock);
768
769	/*
770	 * Add the current item to the RB tree. All RB tree operations are
771	 * protected by "mtx", and ep_insert() is called with "mtx" held.
772	 */
773	ep_rbtree_insert(ep, epi);
774
775	/* We have to drop the new item inside our item list to keep track of it */
776	spin_lock_irqsave(&ep->lock, flags);
777
778	/* If the file is already "ready" we drop it inside the ready list */
779	if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
780		list_add_tail(&epi->rdllink, &ep->rdllist);
781
782		/* Notify waiting tasks that events are available */
783		if (waitqueue_active(&ep->wq))
784			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
785		if (waitqueue_active(&ep->poll_wait))
786			pwake++;
787	}
788
789	spin_unlock_irqrestore(&ep->lock, flags);
790
791	/* We have to call this outside the lock */
792	if (pwake)
793		ep_poll_safewake(&psw, &ep->poll_wait);
794
795	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n",
796		     current, ep, tfile, fd));
797
798	return 0;
799
800error_unregister:
801	ep_unregister_pollwait(ep, epi);
802
803	/*
804	 * We need to do this because an event could have been arrived on some
805	 * allocated wait queue. Note that we don't care about the ep->ovflist
806	 * list, since that is used/cleaned only inside a section bound by "mtx".
807	 * And ep_insert() is called with "mtx" held.
808	 */
809	spin_lock_irqsave(&ep->lock, flags);
810	if (ep_is_linked(&epi->rdllink))
811		list_del_init(&epi->rdllink);
812	spin_unlock_irqrestore(&ep->lock, flags);
813
814	kmem_cache_free(epi_cache, epi);
815error_return:
816	return error;
817}
818
819/*
820 * Modify the interest event mask by dropping an event if the new mask
821 * has a match in the current file status. Must be called with "mtx" held.
822 */
823static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
824{
825	int pwake = 0;
826	unsigned int revents;
827	unsigned long flags;
828
829	/*
830	 * Set the new event interest mask before calling f_op->poll(), otherwise
831	 * a potential race might occur. In fact if we do this operation inside
832	 * the lock, an event might happen between the f_op->poll() call and the
833	 * new event set registering.
834	 */
835	epi->event.events = event->events;
836
837	/*
838	 * Get current event bits. We can safely use the file* here because
839	 * its usage count has been increased by the caller of this function.
840	 */
841	revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
842
843	spin_lock_irqsave(&ep->lock, flags);
844
845	/* Copy the data member from inside the lock */
846	epi->event.data = event->data;
847
848	/*
849	 * If the item is "hot" and it is not registered inside the ready
850	 * list, push it inside.
851	 */
852	if (revents & event->events) {
853		if (!ep_is_linked(&epi->rdllink)) {
854			list_add_tail(&epi->rdllink, &ep->rdllist);
855
856			/* Notify waiting tasks that events are available */
857			if (waitqueue_active(&ep->wq))
858				__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
859						 TASK_INTERRUPTIBLE);
860			if (waitqueue_active(&ep->poll_wait))
861				pwake++;
862		}
863	}
864	spin_unlock_irqrestore(&ep->lock, flags);
865
866	/* We have to call this outside the lock */
867	if (pwake)
868		ep_poll_safewake(&psw, &ep->poll_wait);
869
870	return 0;
871}
872
873static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
874			  int maxevents)
875{
876	int eventcnt, error = -EFAULT, pwake = 0;
877	unsigned int revents;
878	unsigned long flags;
879	struct epitem *epi, *nepi;
880	struct list_head txlist;
881
882	INIT_LIST_HEAD(&txlist);
883
884	/*
885	 * We need to lock this because we could be hit by
886	 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
887	 */
888	mutex_lock(&ep->mtx);
889
890	/*
891	 * Steal the ready list, and re-init the original one to the
892	 * empty list. Also, set ep->ovflist to NULL so that events
893	 * happening while looping w/out locks, are not lost. We cannot
894	 * have the poll callback to queue directly on ep->rdllist,
895	 * because we are doing it in the loop below, in a lockless way.
896	 */
897	spin_lock_irqsave(&ep->lock, flags);
898	list_splice(&ep->rdllist, &txlist);
899	INIT_LIST_HEAD(&ep->rdllist);
900	ep->ovflist = NULL;
901	spin_unlock_irqrestore(&ep->lock, flags);
902
903	/*
904	 * We can loop without lock because this is a task private list.
905	 * We just splice'd out the ep->rdllist in ep_collect_ready_items().
906	 * Items cannot vanish during the loop because we are holding "mtx".
907	 */
908	for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) {
909		epi = list_first_entry(&txlist, struct epitem, rdllink);
910
911		list_del_init(&epi->rdllink);
912
913		/*
914		 * Get the ready file event set. We can safely use the file
915		 * because we are holding the "mtx" and this will guarantee
916		 * that both the file and the item will not vanish.
917		 */
918		revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
919		revents &= epi->event.events;
920
921		/*
922		 * Is the event mask intersect the caller-requested one,
923		 * deliver the event to userspace. Again, we are holding
924		 * "mtx", so no operations coming from userspace can change
925		 * the item.
926		 */
927		if (revents) {
928			if (__put_user(revents,
929				       &events[eventcnt].events) ||
930			    __put_user(epi->event.data,
931				       &events[eventcnt].data))
932				goto errxit;
933			if (epi->event.events & EPOLLONESHOT)
934				epi->event.events &= EP_PRIVATE_BITS;
935			eventcnt++;
936		}
937		/*
938		 * At this point, noone can insert into ep->rdllist besides
939		 * us. The epoll_ctl() callers are locked out by us holding
940		 * "mtx" and the poll callback will queue them in ep->ovflist.
941		 */
942		if (!(epi->event.events & EPOLLET) &&
943		    (revents & epi->event.events))
944			list_add_tail(&epi->rdllink, &ep->rdllist);
945	}
946	error = 0;
947
948errxit:
949
950	spin_lock_irqsave(&ep->lock, flags);
951	/*
952	 * During the time we spent in the loop above, some other events
953	 * might have been queued by the poll callback. We re-insert them
954	 * here (in case they are not already queued, or they're one-shot).
955	 */
956	for (nepi = ep->ovflist; (epi = nepi) != NULL;
957	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
958		if (!ep_is_linked(&epi->rdllink) &&
959		    (epi->event.events & ~EP_PRIVATE_BITS))
960			list_add_tail(&epi->rdllink, &ep->rdllist);
961	}
962	/*
963	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
964	 * releasing the lock, events will be queued in the normal way inside
965	 * ep->rdllist.
966	 */
967	ep->ovflist = EP_UNACTIVE_PTR;
968
969	/*
970	 * In case of error in the event-send loop, or in case the number of
971	 * ready events exceeds the userspace limit, we need to splice the
972	 * "txlist" back inside ep->rdllist.
973	 */
974	list_splice(&txlist, &ep->rdllist);
975
976	if (!list_empty(&ep->rdllist)) {
977		/*
978		 * Wake up (if active) both the eventpoll wait list and the ->poll()
979		 * wait list (delayed after we release the lock).
980		 */
981		if (waitqueue_active(&ep->wq))
982			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
983					 TASK_INTERRUPTIBLE);
984		if (waitqueue_active(&ep->poll_wait))
985			pwake++;
986	}
987	spin_unlock_irqrestore(&ep->lock, flags);
988
989	mutex_unlock(&ep->mtx);
990
991	/* We have to call this outside the lock */
992	if (pwake)
993		ep_poll_safewake(&psw, &ep->poll_wait);
994
995	return eventcnt == 0 ? error: eventcnt;
996}
997
998static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
999		   int maxevents, long timeout)
1000{
1001	int res, eavail;
1002	unsigned long flags;
1003	long jtimeout;
1004	wait_queue_t wait;
1005
1006	/*
1007	 * Calculate the timeout by checking for the "infinite" value ( -1 )
1008	 * and the overflow condition. The passed timeout is in milliseconds,
1009	 * that why (t * HZ) / 1000.
1010	 */
1011	jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
1012		MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
1013
1014retry:
1015	spin_lock_irqsave(&ep->lock, flags);
1016
1017	res = 0;
1018	if (list_empty(&ep->rdllist)) {
1019		/*
1020		 * We don't have any available event to return to the caller.
1021		 * We need to sleep here, and we will be wake up by
1022		 * ep_poll_callback() when events will become available.
1023		 */
1024		init_waitqueue_entry(&wait, current);
1025		wait.flags |= WQ_FLAG_EXCLUSIVE;
1026		__add_wait_queue(&ep->wq, &wait);
1027
1028		for (;;) {
1029			/*
1030			 * We don't want to sleep if the ep_poll_callback() sends us
1031			 * a wakeup in between. That's why we set the task state
1032			 * to TASK_INTERRUPTIBLE before doing the checks.
1033			 */
1034			set_current_state(TASK_INTERRUPTIBLE);
1035			if (!list_empty(&ep->rdllist) || !jtimeout)
1036				break;
1037			if (signal_pending(current)) {
1038				res = -EINTR;
1039				break;
1040			}
1041
1042			spin_unlock_irqrestore(&ep->lock, flags);
1043			jtimeout = schedule_timeout(jtimeout);
1044			spin_lock_irqsave(&ep->lock, flags);
1045		}
1046		__remove_wait_queue(&ep->wq, &wait);
1047
1048		set_current_state(TASK_RUNNING);
1049	}
1050
1051	/* Is it worth to try to dig for events ? */
1052	eavail = !list_empty(&ep->rdllist);
1053
1054	spin_unlock_irqrestore(&ep->lock, flags);
1055
1056	/*
1057	 * Try to transfer events to user space. In case we get 0 events and
1058	 * there's still timeout left over, we go trying again in search of
1059	 * more luck.
1060	 */
1061	if (!res && eavail &&
1062	    !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
1063		goto retry;
1064
1065	return res;
1066}
1067
1068/*
1069 * It opens an eventpoll file descriptor. The "size" parameter is there
1070 * for historical reasons, when epoll was using an hash instead of an
1071 * RB tree. With the current implementation, the "size" parameter is ignored
1072 * (besides sanity checks).
1073 */
1074asmlinkage long sys_epoll_create(int size)
1075{
1076	int error, fd = -1;
1077	struct eventpoll *ep;
1078	struct inode *inode;
1079	struct file *file;
1080
1081	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
1082		     current, size));
1083
1084	/*
1085	 * Sanity check on the size parameter, and create the internal data
1086	 * structure ( "struct eventpoll" ).
1087	 */
1088	error = -EINVAL;
1089	if (size <= 0 || (error = ep_alloc(&ep)) != 0)
1090		goto error_return;
1091
1092	/*
1093	 * Creates all the items needed to setup an eventpoll file. That is,
1094	 * a file structure, and inode and a free file descriptor.
1095	 */
1096	error = anon_inode_getfd(&fd, &inode, &file, "[eventpoll]",
1097				 &eventpoll_fops, ep);
1098	if (error)
1099		goto error_free;
1100
1101	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1102		     current, size, fd));
1103
1104	return fd;
1105
1106error_free:
1107	ep_free(ep);
1108error_return:
1109	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1110		     current, size, error));
1111	return error;
1112}
1113
1114/*
1115 * The following function implements the controller interface for
1116 * the eventpoll file that enables the insertion/removal/change of
1117 * file descriptors inside the interest set.
1118 */
1119asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1120			      struct epoll_event __user *event)
1121{
1122	int error;
1123	struct file *file, *tfile;
1124	struct eventpoll *ep;
1125	struct epitem *epi;
1126	struct epoll_event epds;
1127
1128	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
1129		     current, epfd, op, fd, event));
1130
1131	error = -EFAULT;
1132	if (ep_op_has_event(op) &&
1133	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
1134		goto error_return;
1135
1136	/* Get the "struct file *" for the eventpoll file */
1137	error = -EBADF;
1138	file = fget(epfd);
1139	if (!file)
1140		goto error_return;
1141
1142	/* Get the "struct file *" for the target file */
1143	tfile = fget(fd);
1144	if (!tfile)
1145		goto error_fput;
1146
1147	/* The target file descriptor must support poll */
1148	error = -EPERM;
1149	if (!tfile->f_op || !tfile->f_op->poll)
1150		goto error_tgt_fput;
1151
1152	/*
1153	 * We have to check that the file structure underneath the file descriptor
1154	 * the user passed to us _is_ an eventpoll file. And also we do not permit
1155	 * adding an epoll file descriptor inside itself.
1156	 */
1157	error = -EINVAL;
1158	if (file == tfile || !is_file_epoll(file))
1159		goto error_tgt_fput;
1160
1161	/*
1162	 * At this point it is safe to assume that the "private_data" contains
1163	 * our own data structure.
1164	 */
1165	ep = file->private_data;
1166
1167	mutex_lock(&ep->mtx);
1168
1169	/*
1170	 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1171	 * above, we can be sure to be able to use the item looked up by
1172	 * ep_find() till we release the mutex.
1173	 */
1174	epi = ep_find(ep, tfile, fd);
1175
1176	error = -EINVAL;
1177	switch (op) {
1178	case EPOLL_CTL_ADD:
1179		if (!epi) {
1180			epds.events |= POLLERR | POLLHUP;
1181
1182			error = ep_insert(ep, &epds, tfile, fd);
1183		} else
1184			error = -EEXIST;
1185		break;
1186	case EPOLL_CTL_DEL:
1187		if (epi)
1188			error = ep_remove(ep, epi);
1189		else
1190			error = -ENOENT;
1191		break;
1192	case EPOLL_CTL_MOD:
1193		if (epi) {
1194			epds.events |= POLLERR | POLLHUP;
1195			error = ep_modify(ep, epi, &epds);
1196		} else
1197			error = -ENOENT;
1198		break;
1199	}
1200	mutex_unlock(&ep->mtx);
1201
1202error_tgt_fput:
1203	fput(tfile);
1204error_fput:
1205	fput(file);
1206error_return:
1207	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
1208		     current, epfd, op, fd, event, error));
1209
1210	return error;
1211}
1212
1213/*
1214 * Implement the event wait interface for the eventpoll file. It is the kernel
1215 * part of the user space epoll_wait(2).
1216 */
1217asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
1218			       int maxevents, int timeout)
1219{
1220	int error;
1221	struct file *file;
1222	struct eventpoll *ep;
1223
1224	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
1225		     current, epfd, events, maxevents, timeout));
1226
1227	/* The maximum number of event must be greater than zero */
1228	if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
1229		return -EINVAL;
1230
1231	/* Verify that the area passed by the user is writeable */
1232	if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
1233		error = -EFAULT;
1234		goto error_return;
1235	}
1236
1237	/* Get the "struct file *" for the eventpoll file */
1238	error = -EBADF;
1239	file = fget(epfd);
1240	if (!file)
1241		goto error_return;
1242
1243	/*
1244	 * We have to check that the file structure underneath the fd
1245	 * the user passed to us _is_ an eventpoll file.
1246	 */
1247	error = -EINVAL;
1248	if (!is_file_epoll(file))
1249		goto error_fput;
1250
1251	/*
1252	 * At this point it is safe to assume that the "private_data" contains
1253	 * our own data structure.
1254	 */
1255	ep = file->private_data;
1256
1257	/* Time to fish for events ... */
1258	error = ep_poll(ep, events, maxevents, timeout);
1259
1260error_fput:
1261	fput(file);
1262error_return:
1263	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
1264		     current, epfd, events, maxevents, timeout, error));
1265
1266	return error;
1267}
1268
1269#ifdef TIF_RESTORE_SIGMASK
1270
1271/*
1272 * Implement the event wait interface for the eventpoll file. It is the kernel
1273 * part of the user space epoll_pwait(2).
1274 */
1275asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
1276		int maxevents, int timeout, const sigset_t __user *sigmask,
1277		size_t sigsetsize)
1278{
1279	int error;
1280	sigset_t ksigmask, sigsaved;
1281
1282	/*
1283	 * If the caller wants a certain signal mask to be set during the wait,
1284	 * we apply it here.
1285	 */
1286	if (sigmask) {
1287		if (sigsetsize != sizeof(sigset_t))
1288			return -EINVAL;
1289		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1290			return -EFAULT;
1291		sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1292		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1293	}
1294
1295	error = sys_epoll_wait(epfd, events, maxevents, timeout);
1296
1297	/*
1298	 * If we changed the signal mask, we need to restore the original one.
1299	 * In case we've got a signal while waiting, we do not restore the
1300	 * signal mask yet, and we allow do_signal() to deliver the signal on
1301	 * the way back to userspace, before the signal mask is restored.
1302	 */
1303	if (sigmask) {
1304		if (error == -EINTR) {
1305			memcpy(&current->saved_sigmask, &sigsaved,
1306			       sizeof(sigsaved));
1307			set_thread_flag(TIF_RESTORE_SIGMASK);
1308		} else
1309			sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1310	}
1311
1312	return error;
1313}
1314
1315#endif /* #ifdef TIF_RESTORE_SIGMASK */
1316
1317static int __init eventpoll_init(void)
1318{
1319	mutex_init(&epmutex);
1320
1321	/* Initialize the structure used to perform safe poll wait head wake ups */
1322	ep_poll_safewake_init(&psw);
1323
1324	/* Allocates slab cache used to allocate "struct epitem" items */
1325	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
1326			0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
1327			NULL, NULL);
1328
1329	/* Allocates slab cache used to allocate "struct eppoll_entry" */
1330	pwq_cache = kmem_cache_create("eventpoll_pwq",
1331			sizeof(struct eppoll_entry), 0,
1332			EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
1333
1334	return 0;
1335}
1336fs_initcall(eventpoll_init);
1337