1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 *			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/fs_context.h>
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
31#include <linux/audit.h>
32#include <linux/signal.h>
33#include <linux/mutex.h>
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
36#include <linux/ipc_namespace.h>
37#include <linux/user_namespace.h>
38#include <linux/slab.h>
39#include <linux/sched/wake_q.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/user.h>
42
43#include <net/sock.h>
44#include "util.h"
45
46struct mqueue_fs_context {
47	struct ipc_namespace	*ipc_ns;
48};
49
50#define MQUEUE_MAGIC	0x19800202
51#define DIRENT_SIZE	20
52#define FILENT_SIZE	80
53
54#define SEND		0
55#define RECV		1
56
57#define STATE_NONE	0
58#define STATE_READY	1
59
60struct posix_msg_tree_node {
61	struct rb_node		rb_node;
62	struct list_head	msg_list;
63	int			priority;
64};
65
66/*
67 * Locking:
68 *
69 * Accesses to a message queue are synchronized by acquiring info->lock.
70 *
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 *   framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 *   any locks. If it is STATE_READY, then the syscall is completed without
76 *   acquiring info->lock.
77 *
78 * MQ_BARRIER:
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
82 *
83 * This prevents the following races:
84 *
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 *    the increase of the reference happens
87 * Thread A
88 *				Thread B
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
91 *				wake_q_add(A)
92 *				if (cmpxchg()) // success
93 *				   ->state = STATE_READY (reordered)
94 * <timeout returns>
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
97 * sys_exit()
98 *				get_task_struct() // UaF
99 *
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
102 *
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 *    could read stale data
105 *
106 * Thread A
107 *				Thread B
108 * do_mq_timedreceive
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 *				state = STATE_READY;
112 * <timeout returns>
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg;		// Access to stale data!
115 *				receiver->msg = message; (reordered)
116 *
117 * Solution: use _release and _acquire barriers.
118 *
119 * 3) There is intentionally no barrier when setting current->state
120 *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 *    release memory barrier, and the wakeup is triggered when holding
122 *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 *    acquire memory barrier.
124 */
125
126struct ext_wait_queue {		/* queue of sleeping tasks */
127	struct task_struct *task;
128	struct list_head list;
129	struct msg_msg *msg;	/* ptr of loaded message */
130	int state;		/* one of STATE_* values */
131};
132
133struct mqueue_inode_info {
134	spinlock_t lock;
135	struct inode vfs_inode;
136	wait_queue_head_t wait_q;
137
138	struct rb_root msg_tree;
139	struct rb_node *msg_tree_rightmost;
140	struct posix_msg_tree_node *node_cache;
141	struct mq_attr attr;
142
143	struct sigevent notify;
144	struct pid *notify_owner;
145	u32 notify_self_exec_id;
146	struct user_namespace *notify_user_ns;
147	struct ucounts *ucounts;	/* user who created, for accounting */
148	struct sock *notify_sock;
149	struct sk_buff *notify_cookie;
150
151	/* for tasks waiting for free space and messages, respectively */
152	struct ext_wait_queue e_wait_q[2];
153
154	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
155};
156
157static struct file_system_type mqueue_fs_type;
158static const struct inode_operations mqueue_dir_inode_operations;
159static const struct file_operations mqueue_file_operations;
160static const struct super_operations mqueue_super_ops;
161static const struct fs_context_operations mqueue_fs_context_ops;
162static void remove_notification(struct mqueue_inode_info *info);
163
164static struct kmem_cache *mqueue_inode_cachep;
165
166static struct ctl_table_header *mq_sysctl_table;
167
168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
169{
170	return container_of(inode, struct mqueue_inode_info, vfs_inode);
171}
172
173/*
174 * This routine should be called with the mq_lock held.
175 */
176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
177{
178	return get_ipc_ns(inode->i_sb->s_fs_info);
179}
180
181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
182{
183	struct ipc_namespace *ns;
184
185	spin_lock(&mq_lock);
186	ns = __get_ns_from_inode(inode);
187	spin_unlock(&mq_lock);
188	return ns;
189}
190
191/* Auxiliary functions to manipulate messages' list */
192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
193{
194	struct rb_node **p, *parent = NULL;
195	struct posix_msg_tree_node *leaf;
196	bool rightmost = true;
197
198	p = &info->msg_tree.rb_node;
199	while (*p) {
200		parent = *p;
201		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
202
203		if (likely(leaf->priority == msg->m_type))
204			goto insert_msg;
205		else if (msg->m_type < leaf->priority) {
206			p = &(*p)->rb_left;
207			rightmost = false;
208		} else
209			p = &(*p)->rb_right;
210	}
211	if (info->node_cache) {
212		leaf = info->node_cache;
213		info->node_cache = NULL;
214	} else {
215		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
216		if (!leaf)
217			return -ENOMEM;
218		INIT_LIST_HEAD(&leaf->msg_list);
219	}
220	leaf->priority = msg->m_type;
221
222	if (rightmost)
223		info->msg_tree_rightmost = &leaf->rb_node;
224
225	rb_link_node(&leaf->rb_node, parent, p);
226	rb_insert_color(&leaf->rb_node, &info->msg_tree);
227insert_msg:
228	info->attr.mq_curmsgs++;
229	info->qsize += msg->m_ts;
230	list_add_tail(&msg->m_list, &leaf->msg_list);
231	return 0;
232}
233
234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
235				  struct mqueue_inode_info *info)
236{
237	struct rb_node *node = &leaf->rb_node;
238
239	if (info->msg_tree_rightmost == node)
240		info->msg_tree_rightmost = rb_prev(node);
241
242	rb_erase(node, &info->msg_tree);
243	if (info->node_cache)
244		kfree(leaf);
245	else
246		info->node_cache = leaf;
247}
248
249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
250{
251	struct rb_node *parent = NULL;
252	struct posix_msg_tree_node *leaf;
253	struct msg_msg *msg;
254
255try_again:
256	/*
257	 * During insert, low priorities go to the left and high to the
258	 * right.  On receive, we want the highest priorities first, so
259	 * walk all the way to the right.
260	 */
261	parent = info->msg_tree_rightmost;
262	if (!parent) {
263		if (info->attr.mq_curmsgs) {
264			pr_warn_once("Inconsistency in POSIX message queue, "
265				     "no tree element, but supposedly messages "
266				     "should exist!\n");
267			info->attr.mq_curmsgs = 0;
268		}
269		return NULL;
270	}
271	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
272	if (unlikely(list_empty(&leaf->msg_list))) {
273		pr_warn_once("Inconsistency in POSIX message queue, "
274			     "empty leaf node but we haven't implemented "
275			     "lazy leaf delete!\n");
276		msg_tree_erase(leaf, info);
277		goto try_again;
278	} else {
279		msg = list_first_entry(&leaf->msg_list,
280				       struct msg_msg, m_list);
281		list_del(&msg->m_list);
282		if (list_empty(&leaf->msg_list)) {
283			msg_tree_erase(leaf, info);
284		}
285	}
286	info->attr.mq_curmsgs--;
287	info->qsize -= msg->m_ts;
288	return msg;
289}
290
291static struct inode *mqueue_get_inode(struct super_block *sb,
292		struct ipc_namespace *ipc_ns, umode_t mode,
293		struct mq_attr *attr)
294{
295	struct inode *inode;
296	int ret = -ENOMEM;
297
298	inode = new_inode(sb);
299	if (!inode)
300		goto err;
301
302	inode->i_ino = get_next_ino();
303	inode->i_mode = mode;
304	inode->i_uid = current_fsuid();
305	inode->i_gid = current_fsgid();
306	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
307
308	if (S_ISREG(mode)) {
309		struct mqueue_inode_info *info;
310		unsigned long mq_bytes, mq_treesize;
311
312		inode->i_fop = &mqueue_file_operations;
313		inode->i_size = FILENT_SIZE;
314		/* mqueue specific info */
315		info = MQUEUE_I(inode);
316		spin_lock_init(&info->lock);
317		init_waitqueue_head(&info->wait_q);
318		INIT_LIST_HEAD(&info->e_wait_q[0].list);
319		INIT_LIST_HEAD(&info->e_wait_q[1].list);
320		info->notify_owner = NULL;
321		info->notify_user_ns = NULL;
322		info->qsize = 0;
323		info->ucounts = NULL;	/* set when all is ok */
324		info->msg_tree = RB_ROOT;
325		info->msg_tree_rightmost = NULL;
326		info->node_cache = NULL;
327		memset(&info->attr, 0, sizeof(info->attr));
328		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
329					   ipc_ns->mq_msg_default);
330		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
331					    ipc_ns->mq_msgsize_default);
332		if (attr) {
333			info->attr.mq_maxmsg = attr->mq_maxmsg;
334			info->attr.mq_msgsize = attr->mq_msgsize;
335		}
336		/*
337		 * We used to allocate a static array of pointers and account
338		 * the size of that array as well as one msg_msg struct per
339		 * possible message into the queue size. That's no longer
340		 * accurate as the queue is now an rbtree and will grow and
341		 * shrink depending on usage patterns.  We can, however, still
342		 * account one msg_msg struct per message, but the nodes are
343		 * allocated depending on priority usage, and most programs
344		 * only use one, or a handful, of priorities.  However, since
345		 * this is pinned memory, we need to assume worst case, so
346		 * that means the min(mq_maxmsg, max_priorities) * struct
347		 * posix_msg_tree_node.
348		 */
349
350		ret = -EINVAL;
351		if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
352			goto out_inode;
353		if (capable(CAP_SYS_RESOURCE)) {
354			if (info->attr.mq_maxmsg > HARD_MSGMAX ||
355			    info->attr.mq_msgsize > HARD_MSGSIZEMAX)
356				goto out_inode;
357		} else {
358			if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
359					info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
360				goto out_inode;
361		}
362		ret = -EOVERFLOW;
363		/* check for overflow */
364		if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
365			goto out_inode;
366		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
367			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
368			sizeof(struct posix_msg_tree_node);
369		mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
370		if (mq_bytes + mq_treesize < mq_bytes)
371			goto out_inode;
372		mq_bytes += mq_treesize;
373		info->ucounts = get_ucounts(current_ucounts());
374		if (info->ucounts) {
375			long msgqueue;
376
377			spin_lock(&mq_lock);
378			msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
379			if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
380				dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
381				spin_unlock(&mq_lock);
382				put_ucounts(info->ucounts);
383				info->ucounts = NULL;
384				/* mqueue_evict_inode() releases info->messages */
385				ret = -EMFILE;
386				goto out_inode;
387			}
388			spin_unlock(&mq_lock);
389		}
390	} else if (S_ISDIR(mode)) {
391		inc_nlink(inode);
392		/* Some things misbehave if size == 0 on a directory */
393		inode->i_size = 2 * DIRENT_SIZE;
394		inode->i_op = &mqueue_dir_inode_operations;
395		inode->i_fop = &simple_dir_operations;
396	}
397
398	return inode;
399out_inode:
400	iput(inode);
401err:
402	return ERR_PTR(ret);
403}
404
405static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
406{
407	struct inode *inode;
408	struct ipc_namespace *ns = sb->s_fs_info;
409
410	sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
411	sb->s_blocksize = PAGE_SIZE;
412	sb->s_blocksize_bits = PAGE_SHIFT;
413	sb->s_magic = MQUEUE_MAGIC;
414	sb->s_op = &mqueue_super_ops;
415
416	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
417	if (IS_ERR(inode))
418		return PTR_ERR(inode);
419
420	sb->s_root = d_make_root(inode);
421	if (!sb->s_root)
422		return -ENOMEM;
423	return 0;
424}
425
426static int mqueue_get_tree(struct fs_context *fc)
427{
428	struct mqueue_fs_context *ctx = fc->fs_private;
429
430	return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
431}
432
433static void mqueue_fs_context_free(struct fs_context *fc)
434{
435	struct mqueue_fs_context *ctx = fc->fs_private;
436
437	put_ipc_ns(ctx->ipc_ns);
438	kfree(ctx);
439}
440
441static int mqueue_init_fs_context(struct fs_context *fc)
442{
443	struct mqueue_fs_context *ctx;
444
445	ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
446	if (!ctx)
447		return -ENOMEM;
448
449	ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
450	put_user_ns(fc->user_ns);
451	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
452	fc->fs_private = ctx;
453	fc->ops = &mqueue_fs_context_ops;
454	return 0;
455}
456
457static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
458{
459	struct mqueue_fs_context *ctx;
460	struct fs_context *fc;
461	struct vfsmount *mnt;
462
463	fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
464	if (IS_ERR(fc))
465		return ERR_CAST(fc);
466
467	ctx = fc->fs_private;
468	put_ipc_ns(ctx->ipc_ns);
469	ctx->ipc_ns = get_ipc_ns(ns);
470	put_user_ns(fc->user_ns);
471	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
472
473	mnt = fc_mount(fc);
474	put_fs_context(fc);
475	return mnt;
476}
477
478static void init_once(void *foo)
479{
480	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
481
482	inode_init_once(&p->vfs_inode);
483}
484
485static struct inode *mqueue_alloc_inode(struct super_block *sb)
486{
487	struct mqueue_inode_info *ei;
488
489	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
490	if (!ei)
491		return NULL;
492	return &ei->vfs_inode;
493}
494
495static void mqueue_free_inode(struct inode *inode)
496{
497	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
498}
499
500static void mqueue_evict_inode(struct inode *inode)
501{
502	struct mqueue_inode_info *info;
503	struct ipc_namespace *ipc_ns;
504	struct msg_msg *msg, *nmsg;
505	LIST_HEAD(tmp_msg);
506
507	clear_inode(inode);
508
509	if (S_ISDIR(inode->i_mode))
510		return;
511
512	ipc_ns = get_ns_from_inode(inode);
513	info = MQUEUE_I(inode);
514	spin_lock(&info->lock);
515	while ((msg = msg_get(info)) != NULL)
516		list_add_tail(&msg->m_list, &tmp_msg);
517	kfree(info->node_cache);
518	spin_unlock(&info->lock);
519
520	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
521		list_del(&msg->m_list);
522		free_msg(msg);
523	}
524
525	if (info->ucounts) {
526		unsigned long mq_bytes, mq_treesize;
527
528		/* Total amount of bytes accounted for the mqueue */
529		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
530			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
531			sizeof(struct posix_msg_tree_node);
532
533		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
534					  info->attr.mq_msgsize);
535
536		spin_lock(&mq_lock);
537		dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
538		/*
539		 * get_ns_from_inode() ensures that the
540		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
541		 * to which we now hold a reference, or it is NULL.
542		 * We can't put it here under mq_lock, though.
543		 */
544		if (ipc_ns)
545			ipc_ns->mq_queues_count--;
546		spin_unlock(&mq_lock);
547		put_ucounts(info->ucounts);
548		info->ucounts = NULL;
549	}
550	if (ipc_ns)
551		put_ipc_ns(ipc_ns);
552}
553
554static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
555{
556	struct inode *dir = dentry->d_parent->d_inode;
557	struct inode *inode;
558	struct mq_attr *attr = arg;
559	int error;
560	struct ipc_namespace *ipc_ns;
561
562	spin_lock(&mq_lock);
563	ipc_ns = __get_ns_from_inode(dir);
564	if (!ipc_ns) {
565		error = -EACCES;
566		goto out_unlock;
567	}
568
569	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
570	    !capable(CAP_SYS_RESOURCE)) {
571		error = -ENOSPC;
572		goto out_unlock;
573	}
574	ipc_ns->mq_queues_count++;
575	spin_unlock(&mq_lock);
576
577	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
578	if (IS_ERR(inode)) {
579		error = PTR_ERR(inode);
580		spin_lock(&mq_lock);
581		ipc_ns->mq_queues_count--;
582		goto out_unlock;
583	}
584
585	put_ipc_ns(ipc_ns);
586	dir->i_size += DIRENT_SIZE;
587	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
588
589	d_instantiate(dentry, inode);
590	dget(dentry);
591	return 0;
592out_unlock:
593	spin_unlock(&mq_lock);
594	if (ipc_ns)
595		put_ipc_ns(ipc_ns);
596	return error;
597}
598
599static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
600			 struct dentry *dentry, umode_t mode, bool excl)
601{
602	return mqueue_create_attr(dentry, mode, NULL);
603}
604
605static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
606{
607	struct inode *inode = d_inode(dentry);
608
609	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
610	dir->i_size -= DIRENT_SIZE;
611	drop_nlink(inode);
612	dput(dentry);
613	return 0;
614}
615
616/*
617*	This is routine for system read from queue file.
618*	To avoid mess with doing here some sort of mq_receive we allow
619*	to read only queue size & notification info (the only values
620*	that are interesting from user point of view and aren't accessible
621*	through std routines)
622*/
623static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
624				size_t count, loff_t *off)
625{
626	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
627	char buffer[FILENT_SIZE];
628	ssize_t ret;
629
630	spin_lock(&info->lock);
631	snprintf(buffer, sizeof(buffer),
632			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
633			info->qsize,
634			info->notify_owner ? info->notify.sigev_notify : 0,
635			(info->notify_owner &&
636			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
637				info->notify.sigev_signo : 0,
638			pid_vnr(info->notify_owner));
639	spin_unlock(&info->lock);
640	buffer[sizeof(buffer)-1] = '\0';
641
642	ret = simple_read_from_buffer(u_data, count, off, buffer,
643				strlen(buffer));
644	if (ret <= 0)
645		return ret;
646
647	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
648	return ret;
649}
650
651static int mqueue_flush_file(struct file *filp, fl_owner_t id)
652{
653	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
654
655	spin_lock(&info->lock);
656	if (task_tgid(current) == info->notify_owner)
657		remove_notification(info);
658
659	spin_unlock(&info->lock);
660	return 0;
661}
662
663static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
664{
665	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
666	__poll_t retval = 0;
667
668	poll_wait(filp, &info->wait_q, poll_tab);
669
670	spin_lock(&info->lock);
671	if (info->attr.mq_curmsgs)
672		retval = EPOLLIN | EPOLLRDNORM;
673
674	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
675		retval |= EPOLLOUT | EPOLLWRNORM;
676	spin_unlock(&info->lock);
677
678	return retval;
679}
680
681/* Adds current to info->e_wait_q[sr] before element with smaller prio */
682static void wq_add(struct mqueue_inode_info *info, int sr,
683			struct ext_wait_queue *ewp)
684{
685	struct ext_wait_queue *walk;
686
687	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
688		if (walk->task->prio <= current->prio) {
689			list_add_tail(&ewp->list, &walk->list);
690			return;
691		}
692	}
693	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
694}
695
696/*
697 * Puts current task to sleep. Caller must hold queue lock. After return
698 * lock isn't held.
699 * sr: SEND or RECV
700 */
701static int wq_sleep(struct mqueue_inode_info *info, int sr,
702		    ktime_t *timeout, struct ext_wait_queue *ewp)
703	__releases(&info->lock)
704{
705	int retval;
706	signed long time;
707
708	wq_add(info, sr, ewp);
709
710	for (;;) {
711		/* memory barrier not required, we hold info->lock */
712		__set_current_state(TASK_INTERRUPTIBLE);
713
714		spin_unlock(&info->lock);
715		time = schedule_hrtimeout_range_clock(timeout, 0,
716			HRTIMER_MODE_ABS, CLOCK_REALTIME);
717
718		if (READ_ONCE(ewp->state) == STATE_READY) {
719			/* see MQ_BARRIER for purpose/pairing */
720			smp_acquire__after_ctrl_dep();
721			retval = 0;
722			goto out;
723		}
724		spin_lock(&info->lock);
725
726		/* we hold info->lock, so no memory barrier required */
727		if (READ_ONCE(ewp->state) == STATE_READY) {
728			retval = 0;
729			goto out_unlock;
730		}
731		if (signal_pending(current)) {
732			retval = -ERESTARTSYS;
733			break;
734		}
735		if (time == 0) {
736			retval = -ETIMEDOUT;
737			break;
738		}
739	}
740	list_del(&ewp->list);
741out_unlock:
742	spin_unlock(&info->lock);
743out:
744	return retval;
745}
746
747/*
748 * Returns waiting task that should be serviced first or NULL if none exists
749 */
750static struct ext_wait_queue *wq_get_first_waiter(
751		struct mqueue_inode_info *info, int sr)
752{
753	struct list_head *ptr;
754
755	ptr = info->e_wait_q[sr].list.prev;
756	if (ptr == &info->e_wait_q[sr].list)
757		return NULL;
758	return list_entry(ptr, struct ext_wait_queue, list);
759}
760
761
762static inline void set_cookie(struct sk_buff *skb, char code)
763{
764	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
765}
766
767/*
768 * The next function is only to split too long sys_mq_timedsend
769 */
770static void __do_notify(struct mqueue_inode_info *info)
771{
772	/* notification
773	 * invoked when there is registered process and there isn't process
774	 * waiting synchronously for message AND state of queue changed from
775	 * empty to not empty. Here we are sure that no one is waiting
776	 * synchronously. */
777	if (info->notify_owner &&
778	    info->attr.mq_curmsgs == 1) {
779		switch (info->notify.sigev_notify) {
780		case SIGEV_NONE:
781			break;
782		case SIGEV_SIGNAL: {
783			struct kernel_siginfo sig_i;
784			struct task_struct *task;
785
786			/* do_mq_notify() accepts sigev_signo == 0, why?? */
787			if (!info->notify.sigev_signo)
788				break;
789
790			clear_siginfo(&sig_i);
791			sig_i.si_signo = info->notify.sigev_signo;
792			sig_i.si_errno = 0;
793			sig_i.si_code = SI_MESGQ;
794			sig_i.si_value = info->notify.sigev_value;
795			rcu_read_lock();
796			/* map current pid/uid into info->owner's namespaces */
797			sig_i.si_pid = task_tgid_nr_ns(current,
798						ns_of_pid(info->notify_owner));
799			sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
800						current_uid());
801			/*
802			 * We can't use kill_pid_info(), this signal should
803			 * bypass check_kill_permission(). It is from kernel
804			 * but si_fromuser() can't know this.
805			 * We do check the self_exec_id, to avoid sending
806			 * signals to programs that don't expect them.
807			 */
808			task = pid_task(info->notify_owner, PIDTYPE_TGID);
809			if (task && task->self_exec_id ==
810						info->notify_self_exec_id) {
811				do_send_sig_info(info->notify.sigev_signo,
812						&sig_i, task, PIDTYPE_TGID);
813			}
814			rcu_read_unlock();
815			break;
816		}
817		case SIGEV_THREAD:
818			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
819			netlink_sendskb(info->notify_sock, info->notify_cookie);
820			break;
821		}
822		/* after notification unregisters process */
823		put_pid(info->notify_owner);
824		put_user_ns(info->notify_user_ns);
825		info->notify_owner = NULL;
826		info->notify_user_ns = NULL;
827	}
828	wake_up(&info->wait_q);
829}
830
831static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
832			   struct timespec64 *ts)
833{
834	if (get_timespec64(ts, u_abs_timeout))
835		return -EFAULT;
836	if (!timespec64_valid(ts))
837		return -EINVAL;
838	return 0;
839}
840
841static void remove_notification(struct mqueue_inode_info *info)
842{
843	if (info->notify_owner != NULL &&
844	    info->notify.sigev_notify == SIGEV_THREAD) {
845		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
846		netlink_sendskb(info->notify_sock, info->notify_cookie);
847	}
848	put_pid(info->notify_owner);
849	put_user_ns(info->notify_user_ns);
850	info->notify_owner = NULL;
851	info->notify_user_ns = NULL;
852}
853
854static int prepare_open(struct dentry *dentry, int oflag, int ro,
855			umode_t mode, struct filename *name,
856			struct mq_attr *attr)
857{
858	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
859						  MAY_READ | MAY_WRITE };
860	int acc;
861
862	if (d_really_is_negative(dentry)) {
863		if (!(oflag & O_CREAT))
864			return -ENOENT;
865		if (ro)
866			return ro;
867		audit_inode_parent_hidden(name, dentry->d_parent);
868		return vfs_mkobj(dentry, mode & ~current_umask(),
869				  mqueue_create_attr, attr);
870	}
871	/* it already existed */
872	audit_inode(name, dentry, 0);
873	if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
874		return -EEXIST;
875	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
876		return -EINVAL;
877	acc = oflag2acc[oflag & O_ACCMODE];
878	return inode_permission(&init_user_ns, d_inode(dentry), acc);
879}
880
881static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
882		      struct mq_attr *attr)
883{
884	struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
885	struct dentry *root = mnt->mnt_root;
886	struct filename *name;
887	struct path path;
888	int fd, error;
889	int ro;
890
891	audit_mq_open(oflag, mode, attr);
892
893	if (IS_ERR(name = getname(u_name)))
894		return PTR_ERR(name);
895
896	fd = get_unused_fd_flags(O_CLOEXEC);
897	if (fd < 0)
898		goto out_putname;
899
900	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
901	inode_lock(d_inode(root));
902	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
903	if (IS_ERR(path.dentry)) {
904		error = PTR_ERR(path.dentry);
905		goto out_putfd;
906	}
907	path.mnt = mntget(mnt);
908	error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
909	if (!error) {
910		struct file *file = dentry_open(&path, oflag, current_cred());
911		if (!IS_ERR(file))
912			fd_install(fd, file);
913		else
914			error = PTR_ERR(file);
915	}
916	path_put(&path);
917out_putfd:
918	if (error) {
919		put_unused_fd(fd);
920		fd = error;
921	}
922	inode_unlock(d_inode(root));
923	if (!ro)
924		mnt_drop_write(mnt);
925out_putname:
926	putname(name);
927	return fd;
928}
929
930SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
931		struct mq_attr __user *, u_attr)
932{
933	struct mq_attr attr;
934	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
935		return -EFAULT;
936
937	return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
938}
939
940SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
941{
942	int err;
943	struct filename *name;
944	struct dentry *dentry;
945	struct inode *inode = NULL;
946	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
947	struct vfsmount *mnt = ipc_ns->mq_mnt;
948
949	name = getname(u_name);
950	if (IS_ERR(name))
951		return PTR_ERR(name);
952
953	audit_inode_parent_hidden(name, mnt->mnt_root);
954	err = mnt_want_write(mnt);
955	if (err)
956		goto out_name;
957	inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
958	dentry = lookup_one_len(name->name, mnt->mnt_root,
959				strlen(name->name));
960	if (IS_ERR(dentry)) {
961		err = PTR_ERR(dentry);
962		goto out_unlock;
963	}
964
965	inode = d_inode(dentry);
966	if (!inode) {
967		err = -ENOENT;
968	} else {
969		ihold(inode);
970		err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
971				 dentry, NULL);
972	}
973	dput(dentry);
974
975out_unlock:
976	inode_unlock(d_inode(mnt->mnt_root));
977	if (inode)
978		iput(inode);
979	mnt_drop_write(mnt);
980out_name:
981	putname(name);
982
983	return err;
984}
985
986/* Pipelined send and receive functions.
987 *
988 * If a receiver finds no waiting message, then it registers itself in the
989 * list of waiting receivers. A sender checks that list before adding the new
990 * message into the message array. If there is a waiting receiver, then it
991 * bypasses the message array and directly hands the message over to the
992 * receiver. The receiver accepts the message and returns without grabbing the
993 * queue spinlock:
994 *
995 * - Set pointer to message.
996 * - Queue the receiver task for later wakeup (without the info->lock).
997 * - Update its state to STATE_READY. Now the receiver can continue.
998 * - Wake up the process after the lock is dropped. Should the process wake up
999 *   before this wakeup (due to a timeout or a signal) it will either see
1000 *   STATE_READY and continue or acquire the lock to check the state again.
1001 *
1002 * The same algorithm is used for senders.
1003 */
1004
1005static inline void __pipelined_op(struct wake_q_head *wake_q,
1006				  struct mqueue_inode_info *info,
1007				  struct ext_wait_queue *this)
1008{
1009	struct task_struct *task;
1010
1011	list_del(&this->list);
1012	task = get_task_struct(this->task);
1013
1014	/* see MQ_BARRIER for purpose/pairing */
1015	smp_store_release(&this->state, STATE_READY);
1016	wake_q_add_safe(wake_q, task);
1017}
1018
1019/* pipelined_send() - send a message directly to the task waiting in
1020 * sys_mq_timedreceive() (without inserting message into a queue).
1021 */
1022static inline void pipelined_send(struct wake_q_head *wake_q,
1023				  struct mqueue_inode_info *info,
1024				  struct msg_msg *message,
1025				  struct ext_wait_queue *receiver)
1026{
1027	receiver->msg = message;
1028	__pipelined_op(wake_q, info, receiver);
1029}
1030
1031/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1032 * gets its message and put to the queue (we have one free place for sure). */
1033static inline void pipelined_receive(struct wake_q_head *wake_q,
1034				     struct mqueue_inode_info *info)
1035{
1036	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1037
1038	if (!sender) {
1039		/* for poll */
1040		wake_up_interruptible(&info->wait_q);
1041		return;
1042	}
1043	if (msg_insert(sender->msg, info))
1044		return;
1045
1046	__pipelined_op(wake_q, info, sender);
1047}
1048
1049static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1050		size_t msg_len, unsigned int msg_prio,
1051		struct timespec64 *ts)
1052{
1053	struct fd f;
1054	struct inode *inode;
1055	struct ext_wait_queue wait;
1056	struct ext_wait_queue *receiver;
1057	struct msg_msg *msg_ptr;
1058	struct mqueue_inode_info *info;
1059	ktime_t expires, *timeout = NULL;
1060	struct posix_msg_tree_node *new_leaf = NULL;
1061	int ret = 0;
1062	DEFINE_WAKE_Q(wake_q);
1063
1064	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1065		return -EINVAL;
1066
1067	if (ts) {
1068		expires = timespec64_to_ktime(*ts);
1069		timeout = &expires;
1070	}
1071
1072	audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1073
1074	f = fdget(mqdes);
1075	if (unlikely(!f.file)) {
1076		ret = -EBADF;
1077		goto out;
1078	}
1079
1080	inode = file_inode(f.file);
1081	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1082		ret = -EBADF;
1083		goto out_fput;
1084	}
1085	info = MQUEUE_I(inode);
1086	audit_file(f.file);
1087
1088	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1089		ret = -EBADF;
1090		goto out_fput;
1091	}
1092
1093	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1094		ret = -EMSGSIZE;
1095		goto out_fput;
1096	}
1097
1098	/* First try to allocate memory, before doing anything with
1099	 * existing queues. */
1100	msg_ptr = load_msg(u_msg_ptr, msg_len);
1101	if (IS_ERR(msg_ptr)) {
1102		ret = PTR_ERR(msg_ptr);
1103		goto out_fput;
1104	}
1105	msg_ptr->m_ts = msg_len;
1106	msg_ptr->m_type = msg_prio;
1107
1108	/*
1109	 * msg_insert really wants us to have a valid, spare node struct so
1110	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1111	 * fall back to that if necessary.
1112	 */
1113	if (!info->node_cache)
1114		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1115
1116	spin_lock(&info->lock);
1117
1118	if (!info->node_cache && new_leaf) {
1119		/* Save our speculative allocation into the cache */
1120		INIT_LIST_HEAD(&new_leaf->msg_list);
1121		info->node_cache = new_leaf;
1122		new_leaf = NULL;
1123	} else {
1124		kfree(new_leaf);
1125	}
1126
1127	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1128		if (f.file->f_flags & O_NONBLOCK) {
1129			ret = -EAGAIN;
1130		} else {
1131			wait.task = current;
1132			wait.msg = (void *) msg_ptr;
1133
1134			/* memory barrier not required, we hold info->lock */
1135			WRITE_ONCE(wait.state, STATE_NONE);
1136			ret = wq_sleep(info, SEND, timeout, &wait);
1137			/*
1138			 * wq_sleep must be called with info->lock held, and
1139			 * returns with the lock released
1140			 */
1141			goto out_free;
1142		}
1143	} else {
1144		receiver = wq_get_first_waiter(info, RECV);
1145		if (receiver) {
1146			pipelined_send(&wake_q, info, msg_ptr, receiver);
1147		} else {
1148			/* adds message to the queue */
1149			ret = msg_insert(msg_ptr, info);
1150			if (ret)
1151				goto out_unlock;
1152			__do_notify(info);
1153		}
1154		inode->i_atime = inode->i_mtime = inode->i_ctime =
1155				current_time(inode);
1156	}
1157out_unlock:
1158	spin_unlock(&info->lock);
1159	wake_up_q(&wake_q);
1160out_free:
1161	if (ret)
1162		free_msg(msg_ptr);
1163out_fput:
1164	fdput(f);
1165out:
1166	return ret;
1167}
1168
1169static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1170		size_t msg_len, unsigned int __user *u_msg_prio,
1171		struct timespec64 *ts)
1172{
1173	ssize_t ret;
1174	struct msg_msg *msg_ptr;
1175	struct fd f;
1176	struct inode *inode;
1177	struct mqueue_inode_info *info;
1178	struct ext_wait_queue wait;
1179	ktime_t expires, *timeout = NULL;
1180	struct posix_msg_tree_node *new_leaf = NULL;
1181
1182	if (ts) {
1183		expires = timespec64_to_ktime(*ts);
1184		timeout = &expires;
1185	}
1186
1187	audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1188
1189	f = fdget(mqdes);
1190	if (unlikely(!f.file)) {
1191		ret = -EBADF;
1192		goto out;
1193	}
1194
1195	inode = file_inode(f.file);
1196	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1197		ret = -EBADF;
1198		goto out_fput;
1199	}
1200	info = MQUEUE_I(inode);
1201	audit_file(f.file);
1202
1203	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1204		ret = -EBADF;
1205		goto out_fput;
1206	}
1207
1208	/* checks if buffer is big enough */
1209	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1210		ret = -EMSGSIZE;
1211		goto out_fput;
1212	}
1213
1214	/*
1215	 * msg_insert really wants us to have a valid, spare node struct so
1216	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1217	 * fall back to that if necessary.
1218	 */
1219	if (!info->node_cache)
1220		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1221
1222	spin_lock(&info->lock);
1223
1224	if (!info->node_cache && new_leaf) {
1225		/* Save our speculative allocation into the cache */
1226		INIT_LIST_HEAD(&new_leaf->msg_list);
1227		info->node_cache = new_leaf;
1228	} else {
1229		kfree(new_leaf);
1230	}
1231
1232	if (info->attr.mq_curmsgs == 0) {
1233		if (f.file->f_flags & O_NONBLOCK) {
1234			spin_unlock(&info->lock);
1235			ret = -EAGAIN;
1236		} else {
1237			wait.task = current;
1238
1239			/* memory barrier not required, we hold info->lock */
1240			WRITE_ONCE(wait.state, STATE_NONE);
1241			ret = wq_sleep(info, RECV, timeout, &wait);
1242			msg_ptr = wait.msg;
1243		}
1244	} else {
1245		DEFINE_WAKE_Q(wake_q);
1246
1247		msg_ptr = msg_get(info);
1248
1249		inode->i_atime = inode->i_mtime = inode->i_ctime =
1250				current_time(inode);
1251
1252		/* There is now free space in queue. */
1253		pipelined_receive(&wake_q, info);
1254		spin_unlock(&info->lock);
1255		wake_up_q(&wake_q);
1256		ret = 0;
1257	}
1258	if (ret == 0) {
1259		ret = msg_ptr->m_ts;
1260
1261		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1262			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1263			ret = -EFAULT;
1264		}
1265		free_msg(msg_ptr);
1266	}
1267out_fput:
1268	fdput(f);
1269out:
1270	return ret;
1271}
1272
1273SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1274		size_t, msg_len, unsigned int, msg_prio,
1275		const struct __kernel_timespec __user *, u_abs_timeout)
1276{
1277	struct timespec64 ts, *p = NULL;
1278	if (u_abs_timeout) {
1279		int res = prepare_timeout(u_abs_timeout, &ts);
1280		if (res)
1281			return res;
1282		p = &ts;
1283	}
1284	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1285}
1286
1287SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1288		size_t, msg_len, unsigned int __user *, u_msg_prio,
1289		const struct __kernel_timespec __user *, u_abs_timeout)
1290{
1291	struct timespec64 ts, *p = NULL;
1292	if (u_abs_timeout) {
1293		int res = prepare_timeout(u_abs_timeout, &ts);
1294		if (res)
1295			return res;
1296		p = &ts;
1297	}
1298	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1299}
1300
1301/*
1302 * Notes: the case when user wants us to deregister (with NULL as pointer)
1303 * and he isn't currently owner of notification, will be silently discarded.
1304 * It isn't explicitly defined in the POSIX.
1305 */
1306static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1307{
1308	int ret;
1309	struct fd f;
1310	struct sock *sock;
1311	struct inode *inode;
1312	struct mqueue_inode_info *info;
1313	struct sk_buff *nc;
1314
1315	audit_mq_notify(mqdes, notification);
1316
1317	nc = NULL;
1318	sock = NULL;
1319	if (notification != NULL) {
1320		if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1321			     notification->sigev_notify != SIGEV_SIGNAL &&
1322			     notification->sigev_notify != SIGEV_THREAD))
1323			return -EINVAL;
1324		if (notification->sigev_notify == SIGEV_SIGNAL &&
1325			!valid_signal(notification->sigev_signo)) {
1326			return -EINVAL;
1327		}
1328		if (notification->sigev_notify == SIGEV_THREAD) {
1329			long timeo;
1330
1331			/* create the notify skb */
1332			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1333			if (!nc)
1334				return -ENOMEM;
1335
1336			if (copy_from_user(nc->data,
1337					notification->sigev_value.sival_ptr,
1338					NOTIFY_COOKIE_LEN)) {
1339				ret = -EFAULT;
1340				goto free_skb;
1341			}
1342
1343			/* TODO: add a header? */
1344			skb_put(nc, NOTIFY_COOKIE_LEN);
1345			/* and attach it to the socket */
1346retry:
1347			f = fdget(notification->sigev_signo);
1348			if (!f.file) {
1349				ret = -EBADF;
1350				goto out;
1351			}
1352			sock = netlink_getsockbyfilp(f.file);
1353			fdput(f);
1354			if (IS_ERR(sock)) {
1355				ret = PTR_ERR(sock);
1356				goto free_skb;
1357			}
1358
1359			timeo = MAX_SCHEDULE_TIMEOUT;
1360			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1361			if (ret == 1) {
1362				sock = NULL;
1363				goto retry;
1364			}
1365			if (ret)
1366				return ret;
1367		}
1368	}
1369
1370	f = fdget(mqdes);
1371	if (!f.file) {
1372		ret = -EBADF;
1373		goto out;
1374	}
1375
1376	inode = file_inode(f.file);
1377	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1378		ret = -EBADF;
1379		goto out_fput;
1380	}
1381	info = MQUEUE_I(inode);
1382
1383	ret = 0;
1384	spin_lock(&info->lock);
1385	if (notification == NULL) {
1386		if (info->notify_owner == task_tgid(current)) {
1387			remove_notification(info);
1388			inode->i_atime = inode->i_ctime = current_time(inode);
1389		}
1390	} else if (info->notify_owner != NULL) {
1391		ret = -EBUSY;
1392	} else {
1393		switch (notification->sigev_notify) {
1394		case SIGEV_NONE:
1395			info->notify.sigev_notify = SIGEV_NONE;
1396			break;
1397		case SIGEV_THREAD:
1398			info->notify_sock = sock;
1399			info->notify_cookie = nc;
1400			sock = NULL;
1401			nc = NULL;
1402			info->notify.sigev_notify = SIGEV_THREAD;
1403			break;
1404		case SIGEV_SIGNAL:
1405			info->notify.sigev_signo = notification->sigev_signo;
1406			info->notify.sigev_value = notification->sigev_value;
1407			info->notify.sigev_notify = SIGEV_SIGNAL;
1408			info->notify_self_exec_id = current->self_exec_id;
1409			break;
1410		}
1411
1412		info->notify_owner = get_pid(task_tgid(current));
1413		info->notify_user_ns = get_user_ns(current_user_ns());
1414		inode->i_atime = inode->i_ctime = current_time(inode);
1415	}
1416	spin_unlock(&info->lock);
1417out_fput:
1418	fdput(f);
1419out:
1420	if (sock)
1421		netlink_detachskb(sock, nc);
1422	else
1423free_skb:
1424		dev_kfree_skb(nc);
1425
1426	return ret;
1427}
1428
1429SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1430		const struct sigevent __user *, u_notification)
1431{
1432	struct sigevent n, *p = NULL;
1433	if (u_notification) {
1434		if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1435			return -EFAULT;
1436		p = &n;
1437	}
1438	return do_mq_notify(mqdes, p);
1439}
1440
1441static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1442{
1443	struct fd f;
1444	struct inode *inode;
1445	struct mqueue_inode_info *info;
1446
1447	if (new && (new->mq_flags & (~O_NONBLOCK)))
1448		return -EINVAL;
1449
1450	f = fdget(mqdes);
1451	if (!f.file)
1452		return -EBADF;
1453
1454	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1455		fdput(f);
1456		return -EBADF;
1457	}
1458
1459	inode = file_inode(f.file);
1460	info = MQUEUE_I(inode);
1461
1462	spin_lock(&info->lock);
1463
1464	if (old) {
1465		*old = info->attr;
1466		old->mq_flags = f.file->f_flags & O_NONBLOCK;
1467	}
1468	if (new) {
1469		audit_mq_getsetattr(mqdes, new);
1470		spin_lock(&f.file->f_lock);
1471		if (new->mq_flags & O_NONBLOCK)
1472			f.file->f_flags |= O_NONBLOCK;
1473		else
1474			f.file->f_flags &= ~O_NONBLOCK;
1475		spin_unlock(&f.file->f_lock);
1476
1477		inode->i_atime = inode->i_ctime = current_time(inode);
1478	}
1479
1480	spin_unlock(&info->lock);
1481	fdput(f);
1482	return 0;
1483}
1484
1485SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1486		const struct mq_attr __user *, u_mqstat,
1487		struct mq_attr __user *, u_omqstat)
1488{
1489	int ret;
1490	struct mq_attr mqstat, omqstat;
1491	struct mq_attr *new = NULL, *old = NULL;
1492
1493	if (u_mqstat) {
1494		new = &mqstat;
1495		if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1496			return -EFAULT;
1497	}
1498	if (u_omqstat)
1499		old = &omqstat;
1500
1501	ret = do_mq_getsetattr(mqdes, new, old);
1502	if (ret || !old)
1503		return ret;
1504
1505	if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1506		return -EFAULT;
1507	return 0;
1508}
1509
1510#ifdef CONFIG_COMPAT
1511
1512struct compat_mq_attr {
1513	compat_long_t mq_flags;      /* message queue flags		     */
1514	compat_long_t mq_maxmsg;     /* maximum number of messages	     */
1515	compat_long_t mq_msgsize;    /* maximum message size		     */
1516	compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1517	compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1518};
1519
1520static inline int get_compat_mq_attr(struct mq_attr *attr,
1521			const struct compat_mq_attr __user *uattr)
1522{
1523	struct compat_mq_attr v;
1524
1525	if (copy_from_user(&v, uattr, sizeof(*uattr)))
1526		return -EFAULT;
1527
1528	memset(attr, 0, sizeof(*attr));
1529	attr->mq_flags = v.mq_flags;
1530	attr->mq_maxmsg = v.mq_maxmsg;
1531	attr->mq_msgsize = v.mq_msgsize;
1532	attr->mq_curmsgs = v.mq_curmsgs;
1533	return 0;
1534}
1535
1536static inline int put_compat_mq_attr(const struct mq_attr *attr,
1537			struct compat_mq_attr __user *uattr)
1538{
1539	struct compat_mq_attr v;
1540
1541	memset(&v, 0, sizeof(v));
1542	v.mq_flags = attr->mq_flags;
1543	v.mq_maxmsg = attr->mq_maxmsg;
1544	v.mq_msgsize = attr->mq_msgsize;
1545	v.mq_curmsgs = attr->mq_curmsgs;
1546	if (copy_to_user(uattr, &v, sizeof(*uattr)))
1547		return -EFAULT;
1548	return 0;
1549}
1550
1551COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1552		       int, oflag, compat_mode_t, mode,
1553		       struct compat_mq_attr __user *, u_attr)
1554{
1555	struct mq_attr attr, *p = NULL;
1556	if (u_attr && oflag & O_CREAT) {
1557		p = &attr;
1558		if (get_compat_mq_attr(&attr, u_attr))
1559			return -EFAULT;
1560	}
1561	return do_mq_open(u_name, oflag, mode, p);
1562}
1563
1564COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1565		       const struct compat_sigevent __user *, u_notification)
1566{
1567	struct sigevent n, *p = NULL;
1568	if (u_notification) {
1569		if (get_compat_sigevent(&n, u_notification))
1570			return -EFAULT;
1571		if (n.sigev_notify == SIGEV_THREAD)
1572			n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1573		p = &n;
1574	}
1575	return do_mq_notify(mqdes, p);
1576}
1577
1578COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1579		       const struct compat_mq_attr __user *, u_mqstat,
1580		       struct compat_mq_attr __user *, u_omqstat)
1581{
1582	int ret;
1583	struct mq_attr mqstat, omqstat;
1584	struct mq_attr *new = NULL, *old = NULL;
1585
1586	if (u_mqstat) {
1587		new = &mqstat;
1588		if (get_compat_mq_attr(new, u_mqstat))
1589			return -EFAULT;
1590	}
1591	if (u_omqstat)
1592		old = &omqstat;
1593
1594	ret = do_mq_getsetattr(mqdes, new, old);
1595	if (ret || !old)
1596		return ret;
1597
1598	if (put_compat_mq_attr(old, u_omqstat))
1599		return -EFAULT;
1600	return 0;
1601}
1602#endif
1603
1604#ifdef CONFIG_COMPAT_32BIT_TIME
1605static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1606				   struct timespec64 *ts)
1607{
1608	if (get_old_timespec32(ts, p))
1609		return -EFAULT;
1610	if (!timespec64_valid(ts))
1611		return -EINVAL;
1612	return 0;
1613}
1614
1615SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1616		const char __user *, u_msg_ptr,
1617		unsigned int, msg_len, unsigned int, msg_prio,
1618		const struct old_timespec32 __user *, u_abs_timeout)
1619{
1620	struct timespec64 ts, *p = NULL;
1621	if (u_abs_timeout) {
1622		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1623		if (res)
1624			return res;
1625		p = &ts;
1626	}
1627	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1628}
1629
1630SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1631		char __user *, u_msg_ptr,
1632		unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1633		const struct old_timespec32 __user *, u_abs_timeout)
1634{
1635	struct timespec64 ts, *p = NULL;
1636	if (u_abs_timeout) {
1637		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1638		if (res)
1639			return res;
1640		p = &ts;
1641	}
1642	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1643}
1644#endif
1645
1646static const struct inode_operations mqueue_dir_inode_operations = {
1647	.lookup = simple_lookup,
1648	.create = mqueue_create,
1649	.unlink = mqueue_unlink,
1650};
1651
1652static const struct file_operations mqueue_file_operations = {
1653	.flush = mqueue_flush_file,
1654	.poll = mqueue_poll_file,
1655	.read = mqueue_read_file,
1656	.llseek = default_llseek,
1657};
1658
1659static const struct super_operations mqueue_super_ops = {
1660	.alloc_inode = mqueue_alloc_inode,
1661	.free_inode = mqueue_free_inode,
1662	.evict_inode = mqueue_evict_inode,
1663	.statfs = simple_statfs,
1664};
1665
1666static const struct fs_context_operations mqueue_fs_context_ops = {
1667	.free		= mqueue_fs_context_free,
1668	.get_tree	= mqueue_get_tree,
1669};
1670
1671static struct file_system_type mqueue_fs_type = {
1672	.name			= "mqueue",
1673	.init_fs_context	= mqueue_init_fs_context,
1674	.kill_sb		= kill_litter_super,
1675	.fs_flags		= FS_USERNS_MOUNT,
1676};
1677
1678int mq_init_ns(struct ipc_namespace *ns)
1679{
1680	struct vfsmount *m;
1681
1682	ns->mq_queues_count  = 0;
1683	ns->mq_queues_max    = DFLT_QUEUESMAX;
1684	ns->mq_msg_max       = DFLT_MSGMAX;
1685	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1686	ns->mq_msg_default   = DFLT_MSG;
1687	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1688
1689	m = mq_create_mount(ns);
1690	if (IS_ERR(m))
1691		return PTR_ERR(m);
1692	ns->mq_mnt = m;
1693	return 0;
1694}
1695
1696void mq_clear_sbinfo(struct ipc_namespace *ns)
1697{
1698	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1699}
1700
1701void mq_put_mnt(struct ipc_namespace *ns)
1702{
1703	kern_unmount(ns->mq_mnt);
1704}
1705
1706static int __init init_mqueue_fs(void)
1707{
1708	int error;
1709
1710	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1711				sizeof(struct mqueue_inode_info), 0,
1712				SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1713	if (mqueue_inode_cachep == NULL)
1714		return -ENOMEM;
1715
1716	/* ignore failures - they are not fatal */
1717	mq_sysctl_table = mq_register_sysctl_table();
1718
1719	error = register_filesystem(&mqueue_fs_type);
1720	if (error)
1721		goto out_sysctl;
1722
1723	spin_lock_init(&mq_lock);
1724
1725	error = mq_init_ns(&init_ipc_ns);
1726	if (error)
1727		goto out_filesystem;
1728
1729	return 0;
1730
1731out_filesystem:
1732	unregister_filesystem(&mqueue_fs_type);
1733out_sysctl:
1734	if (mq_sysctl_table)
1735		unregister_sysctl_table(mq_sysctl_table);
1736	kmem_cache_destroy(mqueue_inode_cachep);
1737	return error;
1738}
1739
1740device_initcall(init_mqueue_fs);
1741