1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * 			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/netlink.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
30#include <linux/signal.h>
31#include <linux/mutex.h>
32#include <linux/nsproxy.h>
33#include <linux/pid.h>
34#include <linux/ipc_namespace.h>
35#include <linux/slab.h>
36
37#include <net/sock.h>
38#include "util.h"
39
40#define MQUEUE_MAGIC	0x19800202
41#define DIRENT_SIZE	20
42#define FILENT_SIZE	80
43
44#define SEND		0
45#define RECV		1
46
47#define STATE_NONE	0
48#define STATE_PENDING	1
49#define STATE_READY	2
50
51struct ext_wait_queue {		/* queue of sleeping tasks */
52	struct task_struct *task;
53	struct list_head list;
54	struct msg_msg *msg;	/* ptr of loaded message */
55	int state;		/* one of STATE_* values */
56};
57
58struct mqueue_inode_info {
59	spinlock_t lock;
60	struct inode vfs_inode;
61	wait_queue_head_t wait_q;
62
63	struct msg_msg **messages;
64	struct mq_attr attr;
65
66	struct sigevent notify;
67	struct pid* notify_owner;
68	struct user_struct *user;	/* user who created, for accounting */
69	struct sock *notify_sock;
70	struct sk_buff *notify_cookie;
71
72	/* for tasks waiting for free space and messages, respectively */
73	struct ext_wait_queue e_wait_q[2];
74
75	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
76};
77
78static const struct inode_operations mqueue_dir_inode_operations;
79static const struct file_operations mqueue_file_operations;
80static const struct super_operations mqueue_super_ops;
81static void remove_notification(struct mqueue_inode_info *info);
82
83static struct kmem_cache *mqueue_inode_cachep;
84
85static struct ctl_table_header * mq_sysctl_table;
86
87static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
88{
89	return container_of(inode, struct mqueue_inode_info, vfs_inode);
90}
91
92/*
93 * This routine should be called with the mq_lock held.
94 */
95static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
96{
97	return get_ipc_ns(inode->i_sb->s_fs_info);
98}
99
100static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
101{
102	struct ipc_namespace *ns;
103
104	spin_lock(&mq_lock);
105	ns = __get_ns_from_inode(inode);
106	spin_unlock(&mq_lock);
107	return ns;
108}
109
110static struct inode *mqueue_get_inode(struct super_block *sb,
111		struct ipc_namespace *ipc_ns, int mode,
112		struct mq_attr *attr)
113{
114	struct user_struct *u = current_user();
115	struct inode *inode;
116
117	inode = new_inode(sb);
118	if (inode) {
119		inode->i_mode = mode;
120		inode->i_uid = current_fsuid();
121		inode->i_gid = current_fsgid();
122		inode->i_mtime = inode->i_ctime = inode->i_atime =
123				CURRENT_TIME;
124
125		if (S_ISREG(mode)) {
126			struct mqueue_inode_info *info;
127			struct task_struct *p = current;
128			unsigned long mq_bytes, mq_msg_tblsz;
129
130			inode->i_fop = &mqueue_file_operations;
131			inode->i_size = FILENT_SIZE;
132			/* mqueue specific info */
133			info = MQUEUE_I(inode);
134			spin_lock_init(&info->lock);
135			init_waitqueue_head(&info->wait_q);
136			INIT_LIST_HEAD(&info->e_wait_q[0].list);
137			INIT_LIST_HEAD(&info->e_wait_q[1].list);
138			info->notify_owner = NULL;
139			info->qsize = 0;
140			info->user = NULL;	/* set when all is ok */
141			memset(&info->attr, 0, sizeof(info->attr));
142			info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
143			info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
144			if (attr) {
145				info->attr.mq_maxmsg = attr->mq_maxmsg;
146				info->attr.mq_msgsize = attr->mq_msgsize;
147			}
148			mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
149			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
150			if (!info->messages)
151				goto out_inode;
152
153			mq_bytes = (mq_msg_tblsz +
154				(info->attr.mq_maxmsg * info->attr.mq_msgsize));
155
156			spin_lock(&mq_lock);
157			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
158		 	    u->mq_bytes + mq_bytes >
159			    task_rlimit(p, RLIMIT_MSGQUEUE)) {
160				spin_unlock(&mq_lock);
161				/* mqueue_evict_inode() releases info->messages */
162				goto out_inode;
163			}
164			u->mq_bytes += mq_bytes;
165			spin_unlock(&mq_lock);
166
167			/* all is ok */
168			info->user = get_uid(u);
169		} else if (S_ISDIR(mode)) {
170			inc_nlink(inode);
171			/* Some things misbehave if size == 0 on a directory */
172			inode->i_size = 2 * DIRENT_SIZE;
173			inode->i_op = &mqueue_dir_inode_operations;
174			inode->i_fop = &simple_dir_operations;
175		}
176	}
177	return inode;
178out_inode:
179	iput(inode);
180	return NULL;
181}
182
183static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
184{
185	struct inode *inode;
186	struct ipc_namespace *ns = data;
187	int error;
188
189	sb->s_blocksize = PAGE_CACHE_SIZE;
190	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
191	sb->s_magic = MQUEUE_MAGIC;
192	sb->s_op = &mqueue_super_ops;
193
194	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO,
195				NULL);
196	if (!inode) {
197		error = -ENOMEM;
198		goto out;
199	}
200
201	sb->s_root = d_alloc_root(inode);
202	if (!sb->s_root) {
203		iput(inode);
204		error = -ENOMEM;
205		goto out;
206	}
207	error = 0;
208
209out:
210	return error;
211}
212
213static int mqueue_get_sb(struct file_system_type *fs_type,
214			 int flags, const char *dev_name,
215			 void *data, struct vfsmount *mnt)
216{
217	if (!(flags & MS_KERNMOUNT))
218		data = current->nsproxy->ipc_ns;
219	return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt);
220}
221
222static void init_once(void *foo)
223{
224	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
225
226	inode_init_once(&p->vfs_inode);
227}
228
229static struct inode *mqueue_alloc_inode(struct super_block *sb)
230{
231	struct mqueue_inode_info *ei;
232
233	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
234	if (!ei)
235		return NULL;
236	return &ei->vfs_inode;
237}
238
239static void mqueue_destroy_inode(struct inode *inode)
240{
241	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
242}
243
244static void mqueue_evict_inode(struct inode *inode)
245{
246	struct mqueue_inode_info *info;
247	struct user_struct *user;
248	unsigned long mq_bytes;
249	int i;
250	struct ipc_namespace *ipc_ns;
251
252	end_writeback(inode);
253
254	if (S_ISDIR(inode->i_mode))
255		return;
256
257	ipc_ns = get_ns_from_inode(inode);
258	info = MQUEUE_I(inode);
259	spin_lock(&info->lock);
260	for (i = 0; i < info->attr.mq_curmsgs; i++)
261		free_msg(info->messages[i]);
262	kfree(info->messages);
263	spin_unlock(&info->lock);
264
265	/* Total amount of bytes accounted for the mqueue */
266	mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
267	    + info->attr.mq_msgsize);
268	user = info->user;
269	if (user) {
270		spin_lock(&mq_lock);
271		user->mq_bytes -= mq_bytes;
272		/*
273		 * get_ns_from_inode() ensures that the
274		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
275		 * to which we now hold a reference, or it is NULL.
276		 * We can't put it here under mq_lock, though.
277		 */
278		if (ipc_ns)
279			ipc_ns->mq_queues_count--;
280		spin_unlock(&mq_lock);
281		free_uid(user);
282	}
283	if (ipc_ns)
284		put_ipc_ns(ipc_ns);
285}
286
287static int mqueue_create(struct inode *dir, struct dentry *dentry,
288				int mode, struct nameidata *nd)
289{
290	struct inode *inode;
291	struct mq_attr *attr = dentry->d_fsdata;
292	int error;
293	struct ipc_namespace *ipc_ns;
294
295	spin_lock(&mq_lock);
296	ipc_ns = __get_ns_from_inode(dir);
297	if (!ipc_ns) {
298		error = -EACCES;
299		goto out_unlock;
300	}
301	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
302			!capable(CAP_SYS_RESOURCE)) {
303		error = -ENOSPC;
304		goto out_unlock;
305	}
306	ipc_ns->mq_queues_count++;
307	spin_unlock(&mq_lock);
308
309	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
310	if (!inode) {
311		error = -ENOMEM;
312		spin_lock(&mq_lock);
313		ipc_ns->mq_queues_count--;
314		goto out_unlock;
315	}
316
317	put_ipc_ns(ipc_ns);
318	dir->i_size += DIRENT_SIZE;
319	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
320
321	d_instantiate(dentry, inode);
322	dget(dentry);
323	return 0;
324out_unlock:
325	spin_unlock(&mq_lock);
326	if (ipc_ns)
327		put_ipc_ns(ipc_ns);
328	return error;
329}
330
331static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
332{
333  	struct inode *inode = dentry->d_inode;
334
335	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
336	dir->i_size -= DIRENT_SIZE;
337  	drop_nlink(inode);
338  	dput(dentry);
339  	return 0;
340}
341
342/*
343*	This is routine for system read from queue file.
344*	To avoid mess with doing here some sort of mq_receive we allow
345*	to read only queue size & notification info (the only values
346*	that are interesting from user point of view and aren't accessible
347*	through std routines)
348*/
349static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
350				size_t count, loff_t *off)
351{
352	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
353	char buffer[FILENT_SIZE];
354	ssize_t ret;
355
356	spin_lock(&info->lock);
357	snprintf(buffer, sizeof(buffer),
358			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
359			info->qsize,
360			info->notify_owner ? info->notify.sigev_notify : 0,
361			(info->notify_owner &&
362			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
363				info->notify.sigev_signo : 0,
364			pid_vnr(info->notify_owner));
365	spin_unlock(&info->lock);
366	buffer[sizeof(buffer)-1] = '\0';
367
368	ret = simple_read_from_buffer(u_data, count, off, buffer,
369				strlen(buffer));
370	if (ret <= 0)
371		return ret;
372
373	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
374	return ret;
375}
376
377static int mqueue_flush_file(struct file *filp, fl_owner_t id)
378{
379	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
380
381	spin_lock(&info->lock);
382	if (task_tgid(current) == info->notify_owner)
383		remove_notification(info);
384
385	spin_unlock(&info->lock);
386	return 0;
387}
388
389static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
390{
391	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
392	int retval = 0;
393
394	poll_wait(filp, &info->wait_q, poll_tab);
395
396	spin_lock(&info->lock);
397	if (info->attr.mq_curmsgs)
398		retval = POLLIN | POLLRDNORM;
399
400	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
401		retval |= POLLOUT | POLLWRNORM;
402	spin_unlock(&info->lock);
403
404	return retval;
405}
406
407/* Adds current to info->e_wait_q[sr] before element with smaller prio */
408static void wq_add(struct mqueue_inode_info *info, int sr,
409			struct ext_wait_queue *ewp)
410{
411	struct ext_wait_queue *walk;
412
413	ewp->task = current;
414
415	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
416		if (walk->task->static_prio <= current->static_prio) {
417			list_add_tail(&ewp->list, &walk->list);
418			return;
419		}
420	}
421	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
422}
423
424/*
425 * Puts current task to sleep. Caller must hold queue lock. After return
426 * lock isn't held.
427 * sr: SEND or RECV
428 */
429static int wq_sleep(struct mqueue_inode_info *info, int sr,
430		    ktime_t *timeout, struct ext_wait_queue *ewp)
431{
432	int retval;
433	signed long time;
434
435	wq_add(info, sr, ewp);
436
437	for (;;) {
438		set_current_state(TASK_INTERRUPTIBLE);
439
440		spin_unlock(&info->lock);
441		time = schedule_hrtimeout_range_clock(timeout,
442		    HRTIMER_MODE_ABS, 0, CLOCK_REALTIME);
443
444		while (ewp->state == STATE_PENDING)
445			cpu_relax();
446
447		if (ewp->state == STATE_READY) {
448			retval = 0;
449			goto out;
450		}
451		spin_lock(&info->lock);
452		if (ewp->state == STATE_READY) {
453			retval = 0;
454			goto out_unlock;
455		}
456		if (signal_pending(current)) {
457			retval = -ERESTARTSYS;
458			break;
459		}
460		if (time == 0) {
461			retval = -ETIMEDOUT;
462			break;
463		}
464	}
465	list_del(&ewp->list);
466out_unlock:
467	spin_unlock(&info->lock);
468out:
469	return retval;
470}
471
472/*
473 * Returns waiting task that should be serviced first or NULL if none exists
474 */
475static struct ext_wait_queue *wq_get_first_waiter(
476		struct mqueue_inode_info *info, int sr)
477{
478	struct list_head *ptr;
479
480	ptr = info->e_wait_q[sr].list.prev;
481	if (ptr == &info->e_wait_q[sr].list)
482		return NULL;
483	return list_entry(ptr, struct ext_wait_queue, list);
484}
485
486/* Auxiliary functions to manipulate messages' list */
487static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
488{
489	int k;
490
491	k = info->attr.mq_curmsgs - 1;
492	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
493		info->messages[k + 1] = info->messages[k];
494		k--;
495	}
496	info->attr.mq_curmsgs++;
497	info->qsize += ptr->m_ts;
498	info->messages[k + 1] = ptr;
499}
500
501static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
502{
503	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
504	return info->messages[info->attr.mq_curmsgs];
505}
506
507static inline void set_cookie(struct sk_buff *skb, char code)
508{
509	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
510}
511
512/*
513 * The next function is only to split too long sys_mq_timedsend
514 */
515static void __do_notify(struct mqueue_inode_info *info)
516{
517	/* notification
518	 * invoked when there is registered process and there isn't process
519	 * waiting synchronously for message AND state of queue changed from
520	 * empty to not empty. Here we are sure that no one is waiting
521	 * synchronously. */
522	if (info->notify_owner &&
523	    info->attr.mq_curmsgs == 1) {
524		struct siginfo sig_i;
525		switch (info->notify.sigev_notify) {
526		case SIGEV_NONE:
527			break;
528		case SIGEV_SIGNAL:
529			/* sends signal */
530
531			sig_i.si_signo = info->notify.sigev_signo;
532			sig_i.si_errno = 0;
533			sig_i.si_code = SI_MESGQ;
534			sig_i.si_value = info->notify.sigev_value;
535			sig_i.si_pid = task_tgid_nr_ns(current,
536						ns_of_pid(info->notify_owner));
537			sig_i.si_uid = current_uid();
538
539			kill_pid_info(info->notify.sigev_signo,
540				      &sig_i, info->notify_owner);
541			break;
542		case SIGEV_THREAD:
543			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
544			netlink_sendskb(info->notify_sock, info->notify_cookie);
545			break;
546		}
547		/* after notification unregisters process */
548		put_pid(info->notify_owner);
549		info->notify_owner = NULL;
550	}
551	wake_up(&info->wait_q);
552}
553
554static int prepare_timeout(const struct timespec __user *u_abs_timeout,
555			   ktime_t *expires, struct timespec *ts)
556{
557	if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
558		return -EFAULT;
559	if (!timespec_valid(ts))
560		return -EINVAL;
561
562	*expires = timespec_to_ktime(*ts);
563	return 0;
564}
565
566static void remove_notification(struct mqueue_inode_info *info)
567{
568	if (info->notify_owner != NULL &&
569	    info->notify.sigev_notify == SIGEV_THREAD) {
570		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
571		netlink_sendskb(info->notify_sock, info->notify_cookie);
572	}
573	put_pid(info->notify_owner);
574	info->notify_owner = NULL;
575}
576
577static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
578{
579	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
580		return 0;
581	if (capable(CAP_SYS_RESOURCE)) {
582		if (attr->mq_maxmsg > HARD_MSGMAX)
583			return 0;
584	} else {
585		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
586				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
587			return 0;
588	}
589	/* check for overflow */
590	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
591		return 0;
592	if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
593	    + sizeof (struct msg_msg *))) <
594	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
595		return 0;
596	return 1;
597}
598
599/*
600 * Invoked when creating a new queue via sys_mq_open
601 */
602static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
603			struct dentry *dentry, int oflag, mode_t mode,
604			struct mq_attr *attr)
605{
606	const struct cred *cred = current_cred();
607	struct file *result;
608	int ret;
609
610	if (attr) {
611		if (!mq_attr_ok(ipc_ns, attr)) {
612			ret = -EINVAL;
613			goto out;
614		}
615		/* store for use during create */
616		dentry->d_fsdata = attr;
617	}
618
619	mode &= ~current_umask();
620	ret = mnt_want_write(ipc_ns->mq_mnt);
621	if (ret)
622		goto out;
623	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
624	dentry->d_fsdata = NULL;
625	if (ret)
626		goto out_drop_write;
627
628	result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
629	/*
630	 * dentry_open() took a persistent mnt_want_write(),
631	 * so we can now drop this one.
632	 */
633	mnt_drop_write(ipc_ns->mq_mnt);
634	return result;
635
636out_drop_write:
637	mnt_drop_write(ipc_ns->mq_mnt);
638out:
639	dput(dentry);
640	mntput(ipc_ns->mq_mnt);
641	return ERR_PTR(ret);
642}
643
644/* Opens existing queue */
645static struct file *do_open(struct ipc_namespace *ipc_ns,
646				struct dentry *dentry, int oflag)
647{
648	int ret;
649	const struct cred *cred = current_cred();
650
651	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
652						  MAY_READ | MAY_WRITE };
653
654	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
655		ret = -EINVAL;
656		goto err;
657	}
658
659	if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
660		ret = -EACCES;
661		goto err;
662	}
663
664	return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
665
666err:
667	dput(dentry);
668	mntput(ipc_ns->mq_mnt);
669	return ERR_PTR(ret);
670}
671
672SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
673		struct mq_attr __user *, u_attr)
674{
675	struct dentry *dentry;
676	struct file *filp;
677	char *name;
678	struct mq_attr attr;
679	int fd, error;
680	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
681
682	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
683		return -EFAULT;
684
685	audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
686
687	if (IS_ERR(name = getname(u_name)))
688		return PTR_ERR(name);
689
690	fd = get_unused_fd_flags(O_CLOEXEC);
691	if (fd < 0)
692		goto out_putname;
693
694	mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
695	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
696	if (IS_ERR(dentry)) {
697		error = PTR_ERR(dentry);
698		goto out_putfd;
699	}
700	mntget(ipc_ns->mq_mnt);
701
702	if (oflag & O_CREAT) {
703		if (dentry->d_inode) {	/* entry already exists */
704			audit_inode(name, dentry);
705			if (oflag & O_EXCL) {
706				error = -EEXIST;
707				goto out;
708			}
709			filp = do_open(ipc_ns, dentry, oflag);
710		} else {
711			filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
712						dentry, oflag, mode,
713						u_attr ? &attr : NULL);
714		}
715	} else {
716		if (!dentry->d_inode) {
717			error = -ENOENT;
718			goto out;
719		}
720		audit_inode(name, dentry);
721		filp = do_open(ipc_ns, dentry, oflag);
722	}
723
724	if (IS_ERR(filp)) {
725		error = PTR_ERR(filp);
726		goto out_putfd;
727	}
728
729	fd_install(fd, filp);
730	goto out_upsem;
731
732out:
733	dput(dentry);
734	mntput(ipc_ns->mq_mnt);
735out_putfd:
736	put_unused_fd(fd);
737	fd = error;
738out_upsem:
739	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
740out_putname:
741	putname(name);
742	return fd;
743}
744
745SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
746{
747	int err;
748	char *name;
749	struct dentry *dentry;
750	struct inode *inode = NULL;
751	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
752
753	name = getname(u_name);
754	if (IS_ERR(name))
755		return PTR_ERR(name);
756
757	mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
758			I_MUTEX_PARENT);
759	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
760	if (IS_ERR(dentry)) {
761		err = PTR_ERR(dentry);
762		goto out_unlock;
763	}
764
765	if (!dentry->d_inode) {
766		err = -ENOENT;
767		goto out_err;
768	}
769
770	inode = dentry->d_inode;
771	if (inode)
772		atomic_inc(&inode->i_count);
773	err = mnt_want_write(ipc_ns->mq_mnt);
774	if (err)
775		goto out_err;
776	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
777	mnt_drop_write(ipc_ns->mq_mnt);
778out_err:
779	dput(dentry);
780
781out_unlock:
782	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
783	putname(name);
784	if (inode)
785		iput(inode);
786
787	return err;
788}
789
790/* Pipelined send and receive functions.
791 *
792 * If a receiver finds no waiting message, then it registers itself in the
793 * list of waiting receivers. A sender checks that list before adding the new
794 * message into the message array. If there is a waiting receiver, then it
795 * bypasses the message array and directly hands the message over to the
796 * receiver.
797 * The receiver accepts the message and returns without grabbing the queue
798 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
799 * are necessary. The same algorithm is used for sysv semaphores, see
800 * ipc/sem.c for more details.
801 *
802 * The same algorithm is used for senders.
803 */
804
805/* pipelined_send() - send a message directly to the task waiting in
806 * sys_mq_timedreceive() (without inserting message into a queue).
807 */
808static inline void pipelined_send(struct mqueue_inode_info *info,
809				  struct msg_msg *message,
810				  struct ext_wait_queue *receiver)
811{
812	receiver->msg = message;
813	list_del(&receiver->list);
814	receiver->state = STATE_PENDING;
815	wake_up_process(receiver->task);
816	smp_wmb();
817	receiver->state = STATE_READY;
818}
819
820/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
821 * gets its message and put to the queue (we have one free place for sure). */
822static inline void pipelined_receive(struct mqueue_inode_info *info)
823{
824	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
825
826	if (!sender) {
827		/* for poll */
828		wake_up_interruptible(&info->wait_q);
829		return;
830	}
831	msg_insert(sender->msg, info);
832	list_del(&sender->list);
833	sender->state = STATE_PENDING;
834	wake_up_process(sender->task);
835	smp_wmb();
836	sender->state = STATE_READY;
837}
838
839SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
840		size_t, msg_len, unsigned int, msg_prio,
841		const struct timespec __user *, u_abs_timeout)
842{
843	struct file *filp;
844	struct inode *inode;
845	struct ext_wait_queue wait;
846	struct ext_wait_queue *receiver;
847	struct msg_msg *msg_ptr;
848	struct mqueue_inode_info *info;
849	ktime_t expires, *timeout = NULL;
850	struct timespec ts;
851	int ret;
852
853	if (u_abs_timeout) {
854		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
855		if (res)
856			return res;
857		timeout = &expires;
858	}
859
860	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
861		return -EINVAL;
862
863	audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
864
865	filp = fget(mqdes);
866	if (unlikely(!filp)) {
867		ret = -EBADF;
868		goto out;
869	}
870
871	inode = filp->f_path.dentry->d_inode;
872	if (unlikely(filp->f_op != &mqueue_file_operations)) {
873		ret = -EBADF;
874		goto out_fput;
875	}
876	info = MQUEUE_I(inode);
877	audit_inode(NULL, filp->f_path.dentry);
878
879	if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
880		ret = -EBADF;
881		goto out_fput;
882	}
883
884	if (unlikely(msg_len > info->attr.mq_msgsize)) {
885		ret = -EMSGSIZE;
886		goto out_fput;
887	}
888
889	/* First try to allocate memory, before doing anything with
890	 * existing queues. */
891	msg_ptr = load_msg(u_msg_ptr, msg_len);
892	if (IS_ERR(msg_ptr)) {
893		ret = PTR_ERR(msg_ptr);
894		goto out_fput;
895	}
896	msg_ptr->m_ts = msg_len;
897	msg_ptr->m_type = msg_prio;
898
899	spin_lock(&info->lock);
900
901	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
902		if (filp->f_flags & O_NONBLOCK) {
903			spin_unlock(&info->lock);
904			ret = -EAGAIN;
905		} else {
906			wait.task = current;
907			wait.msg = (void *) msg_ptr;
908			wait.state = STATE_NONE;
909			ret = wq_sleep(info, SEND, timeout, &wait);
910		}
911		if (ret < 0)
912			free_msg(msg_ptr);
913	} else {
914		receiver = wq_get_first_waiter(info, RECV);
915		if (receiver) {
916			pipelined_send(info, msg_ptr, receiver);
917		} else {
918			/* adds message to the queue */
919			msg_insert(msg_ptr, info);
920			__do_notify(info);
921		}
922		inode->i_atime = inode->i_mtime = inode->i_ctime =
923				CURRENT_TIME;
924		spin_unlock(&info->lock);
925		ret = 0;
926	}
927out_fput:
928	fput(filp);
929out:
930	return ret;
931}
932
933SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
934		size_t, msg_len, unsigned int __user *, u_msg_prio,
935		const struct timespec __user *, u_abs_timeout)
936{
937	ssize_t ret;
938	struct msg_msg *msg_ptr;
939	struct file *filp;
940	struct inode *inode;
941	struct mqueue_inode_info *info;
942	struct ext_wait_queue wait;
943	ktime_t expires, *timeout = NULL;
944	struct timespec ts;
945
946	if (u_abs_timeout) {
947		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
948		if (res)
949			return res;
950		timeout = &expires;
951	}
952
953	audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
954
955	filp = fget(mqdes);
956	if (unlikely(!filp)) {
957		ret = -EBADF;
958		goto out;
959	}
960
961	inode = filp->f_path.dentry->d_inode;
962	if (unlikely(filp->f_op != &mqueue_file_operations)) {
963		ret = -EBADF;
964		goto out_fput;
965	}
966	info = MQUEUE_I(inode);
967	audit_inode(NULL, filp->f_path.dentry);
968
969	if (unlikely(!(filp->f_mode & FMODE_READ))) {
970		ret = -EBADF;
971		goto out_fput;
972	}
973
974	/* checks if buffer is big enough */
975	if (unlikely(msg_len < info->attr.mq_msgsize)) {
976		ret = -EMSGSIZE;
977		goto out_fput;
978	}
979
980	spin_lock(&info->lock);
981	if (info->attr.mq_curmsgs == 0) {
982		if (filp->f_flags & O_NONBLOCK) {
983			spin_unlock(&info->lock);
984			ret = -EAGAIN;
985		} else {
986			wait.task = current;
987			wait.state = STATE_NONE;
988			ret = wq_sleep(info, RECV, timeout, &wait);
989			msg_ptr = wait.msg;
990		}
991	} else {
992		msg_ptr = msg_get(info);
993
994		inode->i_atime = inode->i_mtime = inode->i_ctime =
995				CURRENT_TIME;
996
997		/* There is now free space in queue. */
998		pipelined_receive(info);
999		spin_unlock(&info->lock);
1000		ret = 0;
1001	}
1002	if (ret == 0) {
1003		ret = msg_ptr->m_ts;
1004
1005		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1006			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1007			ret = -EFAULT;
1008		}
1009		free_msg(msg_ptr);
1010	}
1011out_fput:
1012	fput(filp);
1013out:
1014	return ret;
1015}
1016
1017/*
1018 * Notes: the case when user wants us to deregister (with NULL as pointer)
1019 * and he isn't currently owner of notification, will be silently discarded.
1020 * It isn't explicitly defined in the POSIX.
1021 */
1022SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1023		const struct sigevent __user *, u_notification)
1024{
1025	int ret;
1026	struct file *filp;
1027	struct sock *sock;
1028	struct inode *inode;
1029	struct sigevent notification;
1030	struct mqueue_inode_info *info;
1031	struct sk_buff *nc;
1032
1033	if (u_notification) {
1034		if (copy_from_user(&notification, u_notification,
1035					sizeof(struct sigevent)))
1036			return -EFAULT;
1037	}
1038
1039	audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1040
1041	nc = NULL;
1042	sock = NULL;
1043	if (u_notification != NULL) {
1044		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1045			     notification.sigev_notify != SIGEV_SIGNAL &&
1046			     notification.sigev_notify != SIGEV_THREAD))
1047			return -EINVAL;
1048		if (notification.sigev_notify == SIGEV_SIGNAL &&
1049			!valid_signal(notification.sigev_signo)) {
1050			return -EINVAL;
1051		}
1052		if (notification.sigev_notify == SIGEV_THREAD) {
1053			long timeo;
1054
1055			/* create the notify skb */
1056			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1057			if (!nc) {
1058				ret = -ENOMEM;
1059				goto out;
1060			}
1061			if (copy_from_user(nc->data,
1062					notification.sigev_value.sival_ptr,
1063					NOTIFY_COOKIE_LEN)) {
1064				ret = -EFAULT;
1065				goto out;
1066			}
1067
1068			/* TODO: add a header? */
1069			skb_put(nc, NOTIFY_COOKIE_LEN);
1070			/* and attach it to the socket */
1071retry:
1072			filp = fget(notification.sigev_signo);
1073			if (!filp) {
1074				ret = -EBADF;
1075				goto out;
1076			}
1077			sock = netlink_getsockbyfilp(filp);
1078			fput(filp);
1079			if (IS_ERR(sock)) {
1080				ret = PTR_ERR(sock);
1081				sock = NULL;
1082				goto out;
1083			}
1084
1085			timeo = MAX_SCHEDULE_TIMEOUT;
1086			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1087			if (ret == 1)
1088				goto retry;
1089			if (ret) {
1090				sock = NULL;
1091				nc = NULL;
1092				goto out;
1093			}
1094		}
1095	}
1096
1097	filp = fget(mqdes);
1098	if (!filp) {
1099		ret = -EBADF;
1100		goto out;
1101	}
1102
1103	inode = filp->f_path.dentry->d_inode;
1104	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1105		ret = -EBADF;
1106		goto out_fput;
1107	}
1108	info = MQUEUE_I(inode);
1109
1110	ret = 0;
1111	spin_lock(&info->lock);
1112	if (u_notification == NULL) {
1113		if (info->notify_owner == task_tgid(current)) {
1114			remove_notification(info);
1115			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1116		}
1117	} else if (info->notify_owner != NULL) {
1118		ret = -EBUSY;
1119	} else {
1120		switch (notification.sigev_notify) {
1121		case SIGEV_NONE:
1122			info->notify.sigev_notify = SIGEV_NONE;
1123			break;
1124		case SIGEV_THREAD:
1125			info->notify_sock = sock;
1126			info->notify_cookie = nc;
1127			sock = NULL;
1128			nc = NULL;
1129			info->notify.sigev_notify = SIGEV_THREAD;
1130			break;
1131		case SIGEV_SIGNAL:
1132			info->notify.sigev_signo = notification.sigev_signo;
1133			info->notify.sigev_value = notification.sigev_value;
1134			info->notify.sigev_notify = SIGEV_SIGNAL;
1135			break;
1136		}
1137
1138		info->notify_owner = get_pid(task_tgid(current));
1139		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1140	}
1141	spin_unlock(&info->lock);
1142out_fput:
1143	fput(filp);
1144out:
1145	if (sock) {
1146		netlink_detachskb(sock, nc);
1147	} else if (nc) {
1148		dev_kfree_skb(nc);
1149	}
1150	return ret;
1151}
1152
1153SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1154		const struct mq_attr __user *, u_mqstat,
1155		struct mq_attr __user *, u_omqstat)
1156{
1157	int ret;
1158	struct mq_attr mqstat, omqstat;
1159	struct file *filp;
1160	struct inode *inode;
1161	struct mqueue_inode_info *info;
1162
1163	if (u_mqstat != NULL) {
1164		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1165			return -EFAULT;
1166		if (mqstat.mq_flags & (~O_NONBLOCK))
1167			return -EINVAL;
1168	}
1169
1170	filp = fget(mqdes);
1171	if (!filp) {
1172		ret = -EBADF;
1173		goto out;
1174	}
1175
1176	inode = filp->f_path.dentry->d_inode;
1177	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1178		ret = -EBADF;
1179		goto out_fput;
1180	}
1181	info = MQUEUE_I(inode);
1182
1183	spin_lock(&info->lock);
1184
1185	omqstat = info->attr;
1186	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1187	if (u_mqstat) {
1188		audit_mq_getsetattr(mqdes, &mqstat);
1189		spin_lock(&filp->f_lock);
1190		if (mqstat.mq_flags & O_NONBLOCK)
1191			filp->f_flags |= O_NONBLOCK;
1192		else
1193			filp->f_flags &= ~O_NONBLOCK;
1194		spin_unlock(&filp->f_lock);
1195
1196		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1197	}
1198
1199	spin_unlock(&info->lock);
1200
1201	ret = 0;
1202	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1203						sizeof(struct mq_attr)))
1204		ret = -EFAULT;
1205
1206out_fput:
1207	fput(filp);
1208out:
1209	return ret;
1210}
1211
1212static const struct inode_operations mqueue_dir_inode_operations = {
1213	.lookup = simple_lookup,
1214	.create = mqueue_create,
1215	.unlink = mqueue_unlink,
1216};
1217
1218static const struct file_operations mqueue_file_operations = {
1219	.flush = mqueue_flush_file,
1220	.poll = mqueue_poll_file,
1221	.read = mqueue_read_file,
1222};
1223
1224static const struct super_operations mqueue_super_ops = {
1225	.alloc_inode = mqueue_alloc_inode,
1226	.destroy_inode = mqueue_destroy_inode,
1227	.evict_inode = mqueue_evict_inode,
1228	.statfs = simple_statfs,
1229};
1230
1231static struct file_system_type mqueue_fs_type = {
1232	.name = "mqueue",
1233	.get_sb = mqueue_get_sb,
1234	.kill_sb = kill_litter_super,
1235};
1236
1237int mq_init_ns(struct ipc_namespace *ns)
1238{
1239	ns->mq_queues_count  = 0;
1240	ns->mq_queues_max    = DFLT_QUEUESMAX;
1241	ns->mq_msg_max       = DFLT_MSGMAX;
1242	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1243
1244	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1245	if (IS_ERR(ns->mq_mnt)) {
1246		int err = PTR_ERR(ns->mq_mnt);
1247		ns->mq_mnt = NULL;
1248		return err;
1249	}
1250	return 0;
1251}
1252
1253void mq_clear_sbinfo(struct ipc_namespace *ns)
1254{
1255	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1256}
1257
1258void mq_put_mnt(struct ipc_namespace *ns)
1259{
1260	mntput(ns->mq_mnt);
1261}
1262
1263static int __init init_mqueue_fs(void)
1264{
1265	int error;
1266
1267	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1268				sizeof(struct mqueue_inode_info), 0,
1269				SLAB_HWCACHE_ALIGN, init_once);
1270	if (mqueue_inode_cachep == NULL)
1271		return -ENOMEM;
1272
1273	/* ignore failures - they are not fatal */
1274	mq_sysctl_table = mq_register_sysctl_table();
1275
1276	error = register_filesystem(&mqueue_fs_type);
1277	if (error)
1278		goto out_sysctl;
1279
1280	spin_lock_init(&mq_lock);
1281
1282	init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
1283	if (IS_ERR(init_ipc_ns.mq_mnt)) {
1284		error = PTR_ERR(init_ipc_ns.mq_mnt);
1285		goto out_filesystem;
1286	}
1287
1288	return 0;
1289
1290out_filesystem:
1291	unregister_filesystem(&mqueue_fs_type);
1292out_sysctl:
1293	if (mq_sysctl_table)
1294		unregister_sysctl_table(mq_sysctl_table);
1295	kmem_cache_destroy(mqueue_inode_cachep);
1296	return error;
1297}
1298
1299__initcall(init_mqueue_fs);
1300