mqueue.c revision a11ddb37
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 *			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/fs_context.h>
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
31#include <linux/audit.h>
32#include <linux/signal.h>
33#include <linux/mutex.h>
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
36#include <linux/ipc_namespace.h>
37#include <linux/user_namespace.h>
38#include <linux/slab.h>
39#include <linux/sched/wake_q.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/user.h>
42
43#include <net/sock.h>
44#include "util.h"
45
46struct mqueue_fs_context {
47	struct ipc_namespace	*ipc_ns;
48};
49
50#define MQUEUE_MAGIC	0x19800202
51#define DIRENT_SIZE	20
52#define FILENT_SIZE	80
53
54#define SEND		0
55#define RECV		1
56
57#define STATE_NONE	0
58#define STATE_READY	1
59
60struct posix_msg_tree_node {
61	struct rb_node		rb_node;
62	struct list_head	msg_list;
63	int			priority;
64};
65
66/*
67 * Locking:
68 *
69 * Accesses to a message queue are synchronized by acquiring info->lock.
70 *
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 *   framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 *   any locks. If it is STATE_READY, then the syscall is completed without
76 *   acquiring info->lock.
77 *
78 * MQ_BARRIER:
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
82 *
83 * This prevents the following races:
84 *
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 *    the increase of the reference happens
87 * Thread A
88 *				Thread B
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
91 *				wake_q_add(A)
92 *				if (cmpxchg()) // success
93 *				   ->state = STATE_READY (reordered)
94 * <timeout returns>
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
97 * sys_exit()
98 *				get_task_struct() // UaF
99 *
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
102 *
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 *    could read stale data
105 *
106 * Thread A
107 *				Thread B
108 * do_mq_timedreceive
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 *				state = STATE_READY;
112 * <timeout returns>
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg;		// Access to stale data!
115 *				receiver->msg = message; (reordered)
116 *
117 * Solution: use _release and _acquire barriers.
118 *
119 * 3) There is intentionally no barrier when setting current->state
120 *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 *    release memory barrier, and the wakeup is triggered when holding
122 *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 *    acquire memory barrier.
124 */
125
126struct ext_wait_queue {		/* queue of sleeping tasks */
127	struct task_struct *task;
128	struct list_head list;
129	struct msg_msg *msg;	/* ptr of loaded message */
130	int state;		/* one of STATE_* values */
131};
132
133struct mqueue_inode_info {
134	spinlock_t lock;
135	struct inode vfs_inode;
136	wait_queue_head_t wait_q;
137
138	struct rb_root msg_tree;
139	struct rb_node *msg_tree_rightmost;
140	struct posix_msg_tree_node *node_cache;
141	struct mq_attr attr;
142
143	struct sigevent notify;
144	struct pid *notify_owner;
145	u32 notify_self_exec_id;
146	struct user_namespace *notify_user_ns;
147	struct user_struct *user;	/* user who created, for accounting */
148	struct sock *notify_sock;
149	struct sk_buff *notify_cookie;
150
151	/* for tasks waiting for free space and messages, respectively */
152	struct ext_wait_queue e_wait_q[2];
153
154	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
155};
156
157static struct file_system_type mqueue_fs_type;
158static const struct inode_operations mqueue_dir_inode_operations;
159static const struct file_operations mqueue_file_operations;
160static const struct super_operations mqueue_super_ops;
161static const struct fs_context_operations mqueue_fs_context_ops;
162static void remove_notification(struct mqueue_inode_info *info);
163
164static struct kmem_cache *mqueue_inode_cachep;
165
166static struct ctl_table_header *mq_sysctl_table;
167
168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
169{
170	return container_of(inode, struct mqueue_inode_info, vfs_inode);
171}
172
173/*
174 * This routine should be called with the mq_lock held.
175 */
176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
177{
178	return get_ipc_ns(inode->i_sb->s_fs_info);
179}
180
181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
182{
183	struct ipc_namespace *ns;
184
185	spin_lock(&mq_lock);
186	ns = __get_ns_from_inode(inode);
187	spin_unlock(&mq_lock);
188	return ns;
189}
190
191/* Auxiliary functions to manipulate messages' list */
192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
193{
194	struct rb_node **p, *parent = NULL;
195	struct posix_msg_tree_node *leaf;
196	bool rightmost = true;
197
198	p = &info->msg_tree.rb_node;
199	while (*p) {
200		parent = *p;
201		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
202
203		if (likely(leaf->priority == msg->m_type))
204			goto insert_msg;
205		else if (msg->m_type < leaf->priority) {
206			p = &(*p)->rb_left;
207			rightmost = false;
208		} else
209			p = &(*p)->rb_right;
210	}
211	if (info->node_cache) {
212		leaf = info->node_cache;
213		info->node_cache = NULL;
214	} else {
215		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
216		if (!leaf)
217			return -ENOMEM;
218		INIT_LIST_HEAD(&leaf->msg_list);
219	}
220	leaf->priority = msg->m_type;
221
222	if (rightmost)
223		info->msg_tree_rightmost = &leaf->rb_node;
224
225	rb_link_node(&leaf->rb_node, parent, p);
226	rb_insert_color(&leaf->rb_node, &info->msg_tree);
227insert_msg:
228	info->attr.mq_curmsgs++;
229	info->qsize += msg->m_ts;
230	list_add_tail(&msg->m_list, &leaf->msg_list);
231	return 0;
232}
233
234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
235				  struct mqueue_inode_info *info)
236{
237	struct rb_node *node = &leaf->rb_node;
238
239	if (info->msg_tree_rightmost == node)
240		info->msg_tree_rightmost = rb_prev(node);
241
242	rb_erase(node, &info->msg_tree);
243	if (info->node_cache)
244		kfree(leaf);
245	else
246		info->node_cache = leaf;
247}
248
249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
250{
251	struct rb_node *parent = NULL;
252	struct posix_msg_tree_node *leaf;
253	struct msg_msg *msg;
254
255try_again:
256	/*
257	 * During insert, low priorities go to the left and high to the
258	 * right.  On receive, we want the highest priorities first, so
259	 * walk all the way to the right.
260	 */
261	parent = info->msg_tree_rightmost;
262	if (!parent) {
263		if (info->attr.mq_curmsgs) {
264			pr_warn_once("Inconsistency in POSIX message queue, "
265				     "no tree element, but supposedly messages "
266				     "should exist!\n");
267			info->attr.mq_curmsgs = 0;
268		}
269		return NULL;
270	}
271	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
272	if (unlikely(list_empty(&leaf->msg_list))) {
273		pr_warn_once("Inconsistency in POSIX message queue, "
274			     "empty leaf node but we haven't implemented "
275			     "lazy leaf delete!\n");
276		msg_tree_erase(leaf, info);
277		goto try_again;
278	} else {
279		msg = list_first_entry(&leaf->msg_list,
280				       struct msg_msg, m_list);
281		list_del(&msg->m_list);
282		if (list_empty(&leaf->msg_list)) {
283			msg_tree_erase(leaf, info);
284		}
285	}
286	info->attr.mq_curmsgs--;
287	info->qsize -= msg->m_ts;
288	return msg;
289}
290
291static struct inode *mqueue_get_inode(struct super_block *sb,
292		struct ipc_namespace *ipc_ns, umode_t mode,
293		struct mq_attr *attr)
294{
295	struct user_struct *u = current_user();
296	struct inode *inode;
297	int ret = -ENOMEM;
298
299	inode = new_inode(sb);
300	if (!inode)
301		goto err;
302
303	inode->i_ino = get_next_ino();
304	inode->i_mode = mode;
305	inode->i_uid = current_fsuid();
306	inode->i_gid = current_fsgid();
307	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
308
309	if (S_ISREG(mode)) {
310		struct mqueue_inode_info *info;
311		unsigned long mq_bytes, mq_treesize;
312
313		inode->i_fop = &mqueue_file_operations;
314		inode->i_size = FILENT_SIZE;
315		/* mqueue specific info */
316		info = MQUEUE_I(inode);
317		spin_lock_init(&info->lock);
318		init_waitqueue_head(&info->wait_q);
319		INIT_LIST_HEAD(&info->e_wait_q[0].list);
320		INIT_LIST_HEAD(&info->e_wait_q[1].list);
321		info->notify_owner = NULL;
322		info->notify_user_ns = NULL;
323		info->qsize = 0;
324		info->user = NULL;	/* set when all is ok */
325		info->msg_tree = RB_ROOT;
326		info->msg_tree_rightmost = NULL;
327		info->node_cache = NULL;
328		memset(&info->attr, 0, sizeof(info->attr));
329		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
330					   ipc_ns->mq_msg_default);
331		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
332					    ipc_ns->mq_msgsize_default);
333		if (attr) {
334			info->attr.mq_maxmsg = attr->mq_maxmsg;
335			info->attr.mq_msgsize = attr->mq_msgsize;
336		}
337		/*
338		 * We used to allocate a static array of pointers and account
339		 * the size of that array as well as one msg_msg struct per
340		 * possible message into the queue size. That's no longer
341		 * accurate as the queue is now an rbtree and will grow and
342		 * shrink depending on usage patterns.  We can, however, still
343		 * account one msg_msg struct per message, but the nodes are
344		 * allocated depending on priority usage, and most programs
345		 * only use one, or a handful, of priorities.  However, since
346		 * this is pinned memory, we need to assume worst case, so
347		 * that means the min(mq_maxmsg, max_priorities) * struct
348		 * posix_msg_tree_node.
349		 */
350
351		ret = -EINVAL;
352		if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
353			goto out_inode;
354		if (capable(CAP_SYS_RESOURCE)) {
355			if (info->attr.mq_maxmsg > HARD_MSGMAX ||
356			    info->attr.mq_msgsize > HARD_MSGSIZEMAX)
357				goto out_inode;
358		} else {
359			if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
360					info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
361				goto out_inode;
362		}
363		ret = -EOVERFLOW;
364		/* check for overflow */
365		if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
366			goto out_inode;
367		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
368			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
369			sizeof(struct posix_msg_tree_node);
370		mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
371		if (mq_bytes + mq_treesize < mq_bytes)
372			goto out_inode;
373		mq_bytes += mq_treesize;
374		spin_lock(&mq_lock);
375		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
376		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
377			spin_unlock(&mq_lock);
378			/* mqueue_evict_inode() releases info->messages */
379			ret = -EMFILE;
380			goto out_inode;
381		}
382		u->mq_bytes += mq_bytes;
383		spin_unlock(&mq_lock);
384
385		/* all is ok */
386		info->user = get_uid(u);
387	} else if (S_ISDIR(mode)) {
388		inc_nlink(inode);
389		/* Some things misbehave if size == 0 on a directory */
390		inode->i_size = 2 * DIRENT_SIZE;
391		inode->i_op = &mqueue_dir_inode_operations;
392		inode->i_fop = &simple_dir_operations;
393	}
394
395	return inode;
396out_inode:
397	iput(inode);
398err:
399	return ERR_PTR(ret);
400}
401
402static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
403{
404	struct inode *inode;
405	struct ipc_namespace *ns = sb->s_fs_info;
406
407	sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
408	sb->s_blocksize = PAGE_SIZE;
409	sb->s_blocksize_bits = PAGE_SHIFT;
410	sb->s_magic = MQUEUE_MAGIC;
411	sb->s_op = &mqueue_super_ops;
412
413	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
414	if (IS_ERR(inode))
415		return PTR_ERR(inode);
416
417	sb->s_root = d_make_root(inode);
418	if (!sb->s_root)
419		return -ENOMEM;
420	return 0;
421}
422
423static int mqueue_get_tree(struct fs_context *fc)
424{
425	struct mqueue_fs_context *ctx = fc->fs_private;
426
427	return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
428}
429
430static void mqueue_fs_context_free(struct fs_context *fc)
431{
432	struct mqueue_fs_context *ctx = fc->fs_private;
433
434	put_ipc_ns(ctx->ipc_ns);
435	kfree(ctx);
436}
437
438static int mqueue_init_fs_context(struct fs_context *fc)
439{
440	struct mqueue_fs_context *ctx;
441
442	ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
443	if (!ctx)
444		return -ENOMEM;
445
446	ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
447	put_user_ns(fc->user_ns);
448	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
449	fc->fs_private = ctx;
450	fc->ops = &mqueue_fs_context_ops;
451	return 0;
452}
453
454static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
455{
456	struct mqueue_fs_context *ctx;
457	struct fs_context *fc;
458	struct vfsmount *mnt;
459
460	fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
461	if (IS_ERR(fc))
462		return ERR_CAST(fc);
463
464	ctx = fc->fs_private;
465	put_ipc_ns(ctx->ipc_ns);
466	ctx->ipc_ns = get_ipc_ns(ns);
467	put_user_ns(fc->user_ns);
468	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
469
470	mnt = fc_mount(fc);
471	put_fs_context(fc);
472	return mnt;
473}
474
475static void init_once(void *foo)
476{
477	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
478
479	inode_init_once(&p->vfs_inode);
480}
481
482static struct inode *mqueue_alloc_inode(struct super_block *sb)
483{
484	struct mqueue_inode_info *ei;
485
486	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
487	if (!ei)
488		return NULL;
489	return &ei->vfs_inode;
490}
491
492static void mqueue_free_inode(struct inode *inode)
493{
494	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
495}
496
497static void mqueue_evict_inode(struct inode *inode)
498{
499	struct mqueue_inode_info *info;
500	struct user_struct *user;
501	struct ipc_namespace *ipc_ns;
502	struct msg_msg *msg, *nmsg;
503	LIST_HEAD(tmp_msg);
504
505	clear_inode(inode);
506
507	if (S_ISDIR(inode->i_mode))
508		return;
509
510	ipc_ns = get_ns_from_inode(inode);
511	info = MQUEUE_I(inode);
512	spin_lock(&info->lock);
513	while ((msg = msg_get(info)) != NULL)
514		list_add_tail(&msg->m_list, &tmp_msg);
515	kfree(info->node_cache);
516	spin_unlock(&info->lock);
517
518	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
519		list_del(&msg->m_list);
520		free_msg(msg);
521	}
522
523	user = info->user;
524	if (user) {
525		unsigned long mq_bytes, mq_treesize;
526
527		/* Total amount of bytes accounted for the mqueue */
528		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
529			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
530			sizeof(struct posix_msg_tree_node);
531
532		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
533					  info->attr.mq_msgsize);
534
535		spin_lock(&mq_lock);
536		user->mq_bytes -= mq_bytes;
537		/*
538		 * get_ns_from_inode() ensures that the
539		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
540		 * to which we now hold a reference, or it is NULL.
541		 * We can't put it here under mq_lock, though.
542		 */
543		if (ipc_ns)
544			ipc_ns->mq_queues_count--;
545		spin_unlock(&mq_lock);
546		free_uid(user);
547	}
548	if (ipc_ns)
549		put_ipc_ns(ipc_ns);
550}
551
552static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
553{
554	struct inode *dir = dentry->d_parent->d_inode;
555	struct inode *inode;
556	struct mq_attr *attr = arg;
557	int error;
558	struct ipc_namespace *ipc_ns;
559
560	spin_lock(&mq_lock);
561	ipc_ns = __get_ns_from_inode(dir);
562	if (!ipc_ns) {
563		error = -EACCES;
564		goto out_unlock;
565	}
566
567	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
568	    !capable(CAP_SYS_RESOURCE)) {
569		error = -ENOSPC;
570		goto out_unlock;
571	}
572	ipc_ns->mq_queues_count++;
573	spin_unlock(&mq_lock);
574
575	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
576	if (IS_ERR(inode)) {
577		error = PTR_ERR(inode);
578		spin_lock(&mq_lock);
579		ipc_ns->mq_queues_count--;
580		goto out_unlock;
581	}
582
583	put_ipc_ns(ipc_ns);
584	dir->i_size += DIRENT_SIZE;
585	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
586
587	d_instantiate(dentry, inode);
588	dget(dentry);
589	return 0;
590out_unlock:
591	spin_unlock(&mq_lock);
592	if (ipc_ns)
593		put_ipc_ns(ipc_ns);
594	return error;
595}
596
597static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
598			 struct dentry *dentry, umode_t mode, bool excl)
599{
600	return mqueue_create_attr(dentry, mode, NULL);
601}
602
603static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
604{
605	struct inode *inode = d_inode(dentry);
606
607	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
608	dir->i_size -= DIRENT_SIZE;
609	drop_nlink(inode);
610	dput(dentry);
611	return 0;
612}
613
614/*
615*	This is routine for system read from queue file.
616*	To avoid mess with doing here some sort of mq_receive we allow
617*	to read only queue size & notification info (the only values
618*	that are interesting from user point of view and aren't accessible
619*	through std routines)
620*/
621static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
622				size_t count, loff_t *off)
623{
624	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
625	char buffer[FILENT_SIZE];
626	ssize_t ret;
627
628	spin_lock(&info->lock);
629	snprintf(buffer, sizeof(buffer),
630			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
631			info->qsize,
632			info->notify_owner ? info->notify.sigev_notify : 0,
633			(info->notify_owner &&
634			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
635				info->notify.sigev_signo : 0,
636			pid_vnr(info->notify_owner));
637	spin_unlock(&info->lock);
638	buffer[sizeof(buffer)-1] = '\0';
639
640	ret = simple_read_from_buffer(u_data, count, off, buffer,
641				strlen(buffer));
642	if (ret <= 0)
643		return ret;
644
645	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
646	return ret;
647}
648
649static int mqueue_flush_file(struct file *filp, fl_owner_t id)
650{
651	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
652
653	spin_lock(&info->lock);
654	if (task_tgid(current) == info->notify_owner)
655		remove_notification(info);
656
657	spin_unlock(&info->lock);
658	return 0;
659}
660
661static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
662{
663	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
664	__poll_t retval = 0;
665
666	poll_wait(filp, &info->wait_q, poll_tab);
667
668	spin_lock(&info->lock);
669	if (info->attr.mq_curmsgs)
670		retval = EPOLLIN | EPOLLRDNORM;
671
672	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
673		retval |= EPOLLOUT | EPOLLWRNORM;
674	spin_unlock(&info->lock);
675
676	return retval;
677}
678
679/* Adds current to info->e_wait_q[sr] before element with smaller prio */
680static void wq_add(struct mqueue_inode_info *info, int sr,
681			struct ext_wait_queue *ewp)
682{
683	struct ext_wait_queue *walk;
684
685	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
686		if (walk->task->prio <= current->prio) {
687			list_add_tail(&ewp->list, &walk->list);
688			return;
689		}
690	}
691	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
692}
693
694/*
695 * Puts current task to sleep. Caller must hold queue lock. After return
696 * lock isn't held.
697 * sr: SEND or RECV
698 */
699static int wq_sleep(struct mqueue_inode_info *info, int sr,
700		    ktime_t *timeout, struct ext_wait_queue *ewp)
701	__releases(&info->lock)
702{
703	int retval;
704	signed long time;
705
706	wq_add(info, sr, ewp);
707
708	for (;;) {
709		/* memory barrier not required, we hold info->lock */
710		__set_current_state(TASK_INTERRUPTIBLE);
711
712		spin_unlock(&info->lock);
713		time = schedule_hrtimeout_range_clock(timeout, 0,
714			HRTIMER_MODE_ABS, CLOCK_REALTIME);
715
716		if (READ_ONCE(ewp->state) == STATE_READY) {
717			/* see MQ_BARRIER for purpose/pairing */
718			smp_acquire__after_ctrl_dep();
719			retval = 0;
720			goto out;
721		}
722		spin_lock(&info->lock);
723
724		/* we hold info->lock, so no memory barrier required */
725		if (READ_ONCE(ewp->state) == STATE_READY) {
726			retval = 0;
727			goto out_unlock;
728		}
729		if (signal_pending(current)) {
730			retval = -ERESTARTSYS;
731			break;
732		}
733		if (time == 0) {
734			retval = -ETIMEDOUT;
735			break;
736		}
737	}
738	list_del(&ewp->list);
739out_unlock:
740	spin_unlock(&info->lock);
741out:
742	return retval;
743}
744
745/*
746 * Returns waiting task that should be serviced first or NULL if none exists
747 */
748static struct ext_wait_queue *wq_get_first_waiter(
749		struct mqueue_inode_info *info, int sr)
750{
751	struct list_head *ptr;
752
753	ptr = info->e_wait_q[sr].list.prev;
754	if (ptr == &info->e_wait_q[sr].list)
755		return NULL;
756	return list_entry(ptr, struct ext_wait_queue, list);
757}
758
759
760static inline void set_cookie(struct sk_buff *skb, char code)
761{
762	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
763}
764
765/*
766 * The next function is only to split too long sys_mq_timedsend
767 */
768static void __do_notify(struct mqueue_inode_info *info)
769{
770	/* notification
771	 * invoked when there is registered process and there isn't process
772	 * waiting synchronously for message AND state of queue changed from
773	 * empty to not empty. Here we are sure that no one is waiting
774	 * synchronously. */
775	if (info->notify_owner &&
776	    info->attr.mq_curmsgs == 1) {
777		switch (info->notify.sigev_notify) {
778		case SIGEV_NONE:
779			break;
780		case SIGEV_SIGNAL: {
781			struct kernel_siginfo sig_i;
782			struct task_struct *task;
783
784			/* do_mq_notify() accepts sigev_signo == 0, why?? */
785			if (!info->notify.sigev_signo)
786				break;
787
788			clear_siginfo(&sig_i);
789			sig_i.si_signo = info->notify.sigev_signo;
790			sig_i.si_errno = 0;
791			sig_i.si_code = SI_MESGQ;
792			sig_i.si_value = info->notify.sigev_value;
793			rcu_read_lock();
794			/* map current pid/uid into info->owner's namespaces */
795			sig_i.si_pid = task_tgid_nr_ns(current,
796						ns_of_pid(info->notify_owner));
797			sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
798						current_uid());
799			/*
800			 * We can't use kill_pid_info(), this signal should
801			 * bypass check_kill_permission(). It is from kernel
802			 * but si_fromuser() can't know this.
803			 * We do check the self_exec_id, to avoid sending
804			 * signals to programs that don't expect them.
805			 */
806			task = pid_task(info->notify_owner, PIDTYPE_TGID);
807			if (task && task->self_exec_id ==
808						info->notify_self_exec_id) {
809				do_send_sig_info(info->notify.sigev_signo,
810						&sig_i, task, PIDTYPE_TGID);
811			}
812			rcu_read_unlock();
813			break;
814		}
815		case SIGEV_THREAD:
816			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
817			netlink_sendskb(info->notify_sock, info->notify_cookie);
818			break;
819		}
820		/* after notification unregisters process */
821		put_pid(info->notify_owner);
822		put_user_ns(info->notify_user_ns);
823		info->notify_owner = NULL;
824		info->notify_user_ns = NULL;
825	}
826	wake_up(&info->wait_q);
827}
828
829static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
830			   struct timespec64 *ts)
831{
832	if (get_timespec64(ts, u_abs_timeout))
833		return -EFAULT;
834	if (!timespec64_valid(ts))
835		return -EINVAL;
836	return 0;
837}
838
839static void remove_notification(struct mqueue_inode_info *info)
840{
841	if (info->notify_owner != NULL &&
842	    info->notify.sigev_notify == SIGEV_THREAD) {
843		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
844		netlink_sendskb(info->notify_sock, info->notify_cookie);
845	}
846	put_pid(info->notify_owner);
847	put_user_ns(info->notify_user_ns);
848	info->notify_owner = NULL;
849	info->notify_user_ns = NULL;
850}
851
852static int prepare_open(struct dentry *dentry, int oflag, int ro,
853			umode_t mode, struct filename *name,
854			struct mq_attr *attr)
855{
856	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
857						  MAY_READ | MAY_WRITE };
858	int acc;
859
860	if (d_really_is_negative(dentry)) {
861		if (!(oflag & O_CREAT))
862			return -ENOENT;
863		if (ro)
864			return ro;
865		audit_inode_parent_hidden(name, dentry->d_parent);
866		return vfs_mkobj(dentry, mode & ~current_umask(),
867				  mqueue_create_attr, attr);
868	}
869	/* it already existed */
870	audit_inode(name, dentry, 0);
871	if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
872		return -EEXIST;
873	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
874		return -EINVAL;
875	acc = oflag2acc[oflag & O_ACCMODE];
876	return inode_permission(&init_user_ns, d_inode(dentry), acc);
877}
878
879static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
880		      struct mq_attr *attr)
881{
882	struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
883	struct dentry *root = mnt->mnt_root;
884	struct filename *name;
885	struct path path;
886	int fd, error;
887	int ro;
888
889	audit_mq_open(oflag, mode, attr);
890
891	if (IS_ERR(name = getname(u_name)))
892		return PTR_ERR(name);
893
894	fd = get_unused_fd_flags(O_CLOEXEC);
895	if (fd < 0)
896		goto out_putname;
897
898	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
899	inode_lock(d_inode(root));
900	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
901	if (IS_ERR(path.dentry)) {
902		error = PTR_ERR(path.dentry);
903		goto out_putfd;
904	}
905	path.mnt = mntget(mnt);
906	error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
907	if (!error) {
908		struct file *file = dentry_open(&path, oflag, current_cred());
909		if (!IS_ERR(file))
910			fd_install(fd, file);
911		else
912			error = PTR_ERR(file);
913	}
914	path_put(&path);
915out_putfd:
916	if (error) {
917		put_unused_fd(fd);
918		fd = error;
919	}
920	inode_unlock(d_inode(root));
921	if (!ro)
922		mnt_drop_write(mnt);
923out_putname:
924	putname(name);
925	return fd;
926}
927
928SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
929		struct mq_attr __user *, u_attr)
930{
931	struct mq_attr attr;
932	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
933		return -EFAULT;
934
935	return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
936}
937
938SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
939{
940	int err;
941	struct filename *name;
942	struct dentry *dentry;
943	struct inode *inode = NULL;
944	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
945	struct vfsmount *mnt = ipc_ns->mq_mnt;
946
947	name = getname(u_name);
948	if (IS_ERR(name))
949		return PTR_ERR(name);
950
951	audit_inode_parent_hidden(name, mnt->mnt_root);
952	err = mnt_want_write(mnt);
953	if (err)
954		goto out_name;
955	inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
956	dentry = lookup_one_len(name->name, mnt->mnt_root,
957				strlen(name->name));
958	if (IS_ERR(dentry)) {
959		err = PTR_ERR(dentry);
960		goto out_unlock;
961	}
962
963	inode = d_inode(dentry);
964	if (!inode) {
965		err = -ENOENT;
966	} else {
967		ihold(inode);
968		err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
969				 dentry, NULL);
970	}
971	dput(dentry);
972
973out_unlock:
974	inode_unlock(d_inode(mnt->mnt_root));
975	if (inode)
976		iput(inode);
977	mnt_drop_write(mnt);
978out_name:
979	putname(name);
980
981	return err;
982}
983
984/* Pipelined send and receive functions.
985 *
986 * If a receiver finds no waiting message, then it registers itself in the
987 * list of waiting receivers. A sender checks that list before adding the new
988 * message into the message array. If there is a waiting receiver, then it
989 * bypasses the message array and directly hands the message over to the
990 * receiver. The receiver accepts the message and returns without grabbing the
991 * queue spinlock:
992 *
993 * - Set pointer to message.
994 * - Queue the receiver task for later wakeup (without the info->lock).
995 * - Update its state to STATE_READY. Now the receiver can continue.
996 * - Wake up the process after the lock is dropped. Should the process wake up
997 *   before this wakeup (due to a timeout or a signal) it will either see
998 *   STATE_READY and continue or acquire the lock to check the state again.
999 *
1000 * The same algorithm is used for senders.
1001 */
1002
1003static inline void __pipelined_op(struct wake_q_head *wake_q,
1004				  struct mqueue_inode_info *info,
1005				  struct ext_wait_queue *this)
1006{
1007	struct task_struct *task;
1008
1009	list_del(&this->list);
1010	task = get_task_struct(this->task);
1011
1012	/* see MQ_BARRIER for purpose/pairing */
1013	smp_store_release(&this->state, STATE_READY);
1014	wake_q_add_safe(wake_q, task);
1015}
1016
1017/* pipelined_send() - send a message directly to the task waiting in
1018 * sys_mq_timedreceive() (without inserting message into a queue).
1019 */
1020static inline void pipelined_send(struct wake_q_head *wake_q,
1021				  struct mqueue_inode_info *info,
1022				  struct msg_msg *message,
1023				  struct ext_wait_queue *receiver)
1024{
1025	receiver->msg = message;
1026	__pipelined_op(wake_q, info, receiver);
1027}
1028
1029/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1030 * gets its message and put to the queue (we have one free place for sure). */
1031static inline void pipelined_receive(struct wake_q_head *wake_q,
1032				     struct mqueue_inode_info *info)
1033{
1034	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1035
1036	if (!sender) {
1037		/* for poll */
1038		wake_up_interruptible(&info->wait_q);
1039		return;
1040	}
1041	if (msg_insert(sender->msg, info))
1042		return;
1043
1044	__pipelined_op(wake_q, info, sender);
1045}
1046
1047static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1048		size_t msg_len, unsigned int msg_prio,
1049		struct timespec64 *ts)
1050{
1051	struct fd f;
1052	struct inode *inode;
1053	struct ext_wait_queue wait;
1054	struct ext_wait_queue *receiver;
1055	struct msg_msg *msg_ptr;
1056	struct mqueue_inode_info *info;
1057	ktime_t expires, *timeout = NULL;
1058	struct posix_msg_tree_node *new_leaf = NULL;
1059	int ret = 0;
1060	DEFINE_WAKE_Q(wake_q);
1061
1062	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1063		return -EINVAL;
1064
1065	if (ts) {
1066		expires = timespec64_to_ktime(*ts);
1067		timeout = &expires;
1068	}
1069
1070	audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1071
1072	f = fdget(mqdes);
1073	if (unlikely(!f.file)) {
1074		ret = -EBADF;
1075		goto out;
1076	}
1077
1078	inode = file_inode(f.file);
1079	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1080		ret = -EBADF;
1081		goto out_fput;
1082	}
1083	info = MQUEUE_I(inode);
1084	audit_file(f.file);
1085
1086	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1087		ret = -EBADF;
1088		goto out_fput;
1089	}
1090
1091	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1092		ret = -EMSGSIZE;
1093		goto out_fput;
1094	}
1095
1096	/* First try to allocate memory, before doing anything with
1097	 * existing queues. */
1098	msg_ptr = load_msg(u_msg_ptr, msg_len);
1099	if (IS_ERR(msg_ptr)) {
1100		ret = PTR_ERR(msg_ptr);
1101		goto out_fput;
1102	}
1103	msg_ptr->m_ts = msg_len;
1104	msg_ptr->m_type = msg_prio;
1105
1106	/*
1107	 * msg_insert really wants us to have a valid, spare node struct so
1108	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1109	 * fall back to that if necessary.
1110	 */
1111	if (!info->node_cache)
1112		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1113
1114	spin_lock(&info->lock);
1115
1116	if (!info->node_cache && new_leaf) {
1117		/* Save our speculative allocation into the cache */
1118		INIT_LIST_HEAD(&new_leaf->msg_list);
1119		info->node_cache = new_leaf;
1120		new_leaf = NULL;
1121	} else {
1122		kfree(new_leaf);
1123	}
1124
1125	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1126		if (f.file->f_flags & O_NONBLOCK) {
1127			ret = -EAGAIN;
1128		} else {
1129			wait.task = current;
1130			wait.msg = (void *) msg_ptr;
1131
1132			/* memory barrier not required, we hold info->lock */
1133			WRITE_ONCE(wait.state, STATE_NONE);
1134			ret = wq_sleep(info, SEND, timeout, &wait);
1135			/*
1136			 * wq_sleep must be called with info->lock held, and
1137			 * returns with the lock released
1138			 */
1139			goto out_free;
1140		}
1141	} else {
1142		receiver = wq_get_first_waiter(info, RECV);
1143		if (receiver) {
1144			pipelined_send(&wake_q, info, msg_ptr, receiver);
1145		} else {
1146			/* adds message to the queue */
1147			ret = msg_insert(msg_ptr, info);
1148			if (ret)
1149				goto out_unlock;
1150			__do_notify(info);
1151		}
1152		inode->i_atime = inode->i_mtime = inode->i_ctime =
1153				current_time(inode);
1154	}
1155out_unlock:
1156	spin_unlock(&info->lock);
1157	wake_up_q(&wake_q);
1158out_free:
1159	if (ret)
1160		free_msg(msg_ptr);
1161out_fput:
1162	fdput(f);
1163out:
1164	return ret;
1165}
1166
1167static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1168		size_t msg_len, unsigned int __user *u_msg_prio,
1169		struct timespec64 *ts)
1170{
1171	ssize_t ret;
1172	struct msg_msg *msg_ptr;
1173	struct fd f;
1174	struct inode *inode;
1175	struct mqueue_inode_info *info;
1176	struct ext_wait_queue wait;
1177	ktime_t expires, *timeout = NULL;
1178	struct posix_msg_tree_node *new_leaf = NULL;
1179
1180	if (ts) {
1181		expires = timespec64_to_ktime(*ts);
1182		timeout = &expires;
1183	}
1184
1185	audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1186
1187	f = fdget(mqdes);
1188	if (unlikely(!f.file)) {
1189		ret = -EBADF;
1190		goto out;
1191	}
1192
1193	inode = file_inode(f.file);
1194	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1195		ret = -EBADF;
1196		goto out_fput;
1197	}
1198	info = MQUEUE_I(inode);
1199	audit_file(f.file);
1200
1201	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1202		ret = -EBADF;
1203		goto out_fput;
1204	}
1205
1206	/* checks if buffer is big enough */
1207	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1208		ret = -EMSGSIZE;
1209		goto out_fput;
1210	}
1211
1212	/*
1213	 * msg_insert really wants us to have a valid, spare node struct so
1214	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1215	 * fall back to that if necessary.
1216	 */
1217	if (!info->node_cache)
1218		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1219
1220	spin_lock(&info->lock);
1221
1222	if (!info->node_cache && new_leaf) {
1223		/* Save our speculative allocation into the cache */
1224		INIT_LIST_HEAD(&new_leaf->msg_list);
1225		info->node_cache = new_leaf;
1226	} else {
1227		kfree(new_leaf);
1228	}
1229
1230	if (info->attr.mq_curmsgs == 0) {
1231		if (f.file->f_flags & O_NONBLOCK) {
1232			spin_unlock(&info->lock);
1233			ret = -EAGAIN;
1234		} else {
1235			wait.task = current;
1236
1237			/* memory barrier not required, we hold info->lock */
1238			WRITE_ONCE(wait.state, STATE_NONE);
1239			ret = wq_sleep(info, RECV, timeout, &wait);
1240			msg_ptr = wait.msg;
1241		}
1242	} else {
1243		DEFINE_WAKE_Q(wake_q);
1244
1245		msg_ptr = msg_get(info);
1246
1247		inode->i_atime = inode->i_mtime = inode->i_ctime =
1248				current_time(inode);
1249
1250		/* There is now free space in queue. */
1251		pipelined_receive(&wake_q, info);
1252		spin_unlock(&info->lock);
1253		wake_up_q(&wake_q);
1254		ret = 0;
1255	}
1256	if (ret == 0) {
1257		ret = msg_ptr->m_ts;
1258
1259		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1260			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1261			ret = -EFAULT;
1262		}
1263		free_msg(msg_ptr);
1264	}
1265out_fput:
1266	fdput(f);
1267out:
1268	return ret;
1269}
1270
1271SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1272		size_t, msg_len, unsigned int, msg_prio,
1273		const struct __kernel_timespec __user *, u_abs_timeout)
1274{
1275	struct timespec64 ts, *p = NULL;
1276	if (u_abs_timeout) {
1277		int res = prepare_timeout(u_abs_timeout, &ts);
1278		if (res)
1279			return res;
1280		p = &ts;
1281	}
1282	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1283}
1284
1285SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1286		size_t, msg_len, unsigned int __user *, u_msg_prio,
1287		const struct __kernel_timespec __user *, u_abs_timeout)
1288{
1289	struct timespec64 ts, *p = NULL;
1290	if (u_abs_timeout) {
1291		int res = prepare_timeout(u_abs_timeout, &ts);
1292		if (res)
1293			return res;
1294		p = &ts;
1295	}
1296	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1297}
1298
1299/*
1300 * Notes: the case when user wants us to deregister (with NULL as pointer)
1301 * and he isn't currently owner of notification, will be silently discarded.
1302 * It isn't explicitly defined in the POSIX.
1303 */
1304static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1305{
1306	int ret;
1307	struct fd f;
1308	struct sock *sock;
1309	struct inode *inode;
1310	struct mqueue_inode_info *info;
1311	struct sk_buff *nc;
1312
1313	audit_mq_notify(mqdes, notification);
1314
1315	nc = NULL;
1316	sock = NULL;
1317	if (notification != NULL) {
1318		if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1319			     notification->sigev_notify != SIGEV_SIGNAL &&
1320			     notification->sigev_notify != SIGEV_THREAD))
1321			return -EINVAL;
1322		if (notification->sigev_notify == SIGEV_SIGNAL &&
1323			!valid_signal(notification->sigev_signo)) {
1324			return -EINVAL;
1325		}
1326		if (notification->sigev_notify == SIGEV_THREAD) {
1327			long timeo;
1328
1329			/* create the notify skb */
1330			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1331			if (!nc)
1332				return -ENOMEM;
1333
1334			if (copy_from_user(nc->data,
1335					notification->sigev_value.sival_ptr,
1336					NOTIFY_COOKIE_LEN)) {
1337				ret = -EFAULT;
1338				goto free_skb;
1339			}
1340
1341			/* TODO: add a header? */
1342			skb_put(nc, NOTIFY_COOKIE_LEN);
1343			/* and attach it to the socket */
1344retry:
1345			f = fdget(notification->sigev_signo);
1346			if (!f.file) {
1347				ret = -EBADF;
1348				goto out;
1349			}
1350			sock = netlink_getsockbyfilp(f.file);
1351			fdput(f);
1352			if (IS_ERR(sock)) {
1353				ret = PTR_ERR(sock);
1354				goto free_skb;
1355			}
1356
1357			timeo = MAX_SCHEDULE_TIMEOUT;
1358			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1359			if (ret == 1) {
1360				sock = NULL;
1361				goto retry;
1362			}
1363			if (ret)
1364				return ret;
1365		}
1366	}
1367
1368	f = fdget(mqdes);
1369	if (!f.file) {
1370		ret = -EBADF;
1371		goto out;
1372	}
1373
1374	inode = file_inode(f.file);
1375	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1376		ret = -EBADF;
1377		goto out_fput;
1378	}
1379	info = MQUEUE_I(inode);
1380
1381	ret = 0;
1382	spin_lock(&info->lock);
1383	if (notification == NULL) {
1384		if (info->notify_owner == task_tgid(current)) {
1385			remove_notification(info);
1386			inode->i_atime = inode->i_ctime = current_time(inode);
1387		}
1388	} else if (info->notify_owner != NULL) {
1389		ret = -EBUSY;
1390	} else {
1391		switch (notification->sigev_notify) {
1392		case SIGEV_NONE:
1393			info->notify.sigev_notify = SIGEV_NONE;
1394			break;
1395		case SIGEV_THREAD:
1396			info->notify_sock = sock;
1397			info->notify_cookie = nc;
1398			sock = NULL;
1399			nc = NULL;
1400			info->notify.sigev_notify = SIGEV_THREAD;
1401			break;
1402		case SIGEV_SIGNAL:
1403			info->notify.sigev_signo = notification->sigev_signo;
1404			info->notify.sigev_value = notification->sigev_value;
1405			info->notify.sigev_notify = SIGEV_SIGNAL;
1406			info->notify_self_exec_id = current->self_exec_id;
1407			break;
1408		}
1409
1410		info->notify_owner = get_pid(task_tgid(current));
1411		info->notify_user_ns = get_user_ns(current_user_ns());
1412		inode->i_atime = inode->i_ctime = current_time(inode);
1413	}
1414	spin_unlock(&info->lock);
1415out_fput:
1416	fdput(f);
1417out:
1418	if (sock)
1419		netlink_detachskb(sock, nc);
1420	else
1421free_skb:
1422		dev_kfree_skb(nc);
1423
1424	return ret;
1425}
1426
1427SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1428		const struct sigevent __user *, u_notification)
1429{
1430	struct sigevent n, *p = NULL;
1431	if (u_notification) {
1432		if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1433			return -EFAULT;
1434		p = &n;
1435	}
1436	return do_mq_notify(mqdes, p);
1437}
1438
1439static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1440{
1441	struct fd f;
1442	struct inode *inode;
1443	struct mqueue_inode_info *info;
1444
1445	if (new && (new->mq_flags & (~O_NONBLOCK)))
1446		return -EINVAL;
1447
1448	f = fdget(mqdes);
1449	if (!f.file)
1450		return -EBADF;
1451
1452	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1453		fdput(f);
1454		return -EBADF;
1455	}
1456
1457	inode = file_inode(f.file);
1458	info = MQUEUE_I(inode);
1459
1460	spin_lock(&info->lock);
1461
1462	if (old) {
1463		*old = info->attr;
1464		old->mq_flags = f.file->f_flags & O_NONBLOCK;
1465	}
1466	if (new) {
1467		audit_mq_getsetattr(mqdes, new);
1468		spin_lock(&f.file->f_lock);
1469		if (new->mq_flags & O_NONBLOCK)
1470			f.file->f_flags |= O_NONBLOCK;
1471		else
1472			f.file->f_flags &= ~O_NONBLOCK;
1473		spin_unlock(&f.file->f_lock);
1474
1475		inode->i_atime = inode->i_ctime = current_time(inode);
1476	}
1477
1478	spin_unlock(&info->lock);
1479	fdput(f);
1480	return 0;
1481}
1482
1483SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1484		const struct mq_attr __user *, u_mqstat,
1485		struct mq_attr __user *, u_omqstat)
1486{
1487	int ret;
1488	struct mq_attr mqstat, omqstat;
1489	struct mq_attr *new = NULL, *old = NULL;
1490
1491	if (u_mqstat) {
1492		new = &mqstat;
1493		if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1494			return -EFAULT;
1495	}
1496	if (u_omqstat)
1497		old = &omqstat;
1498
1499	ret = do_mq_getsetattr(mqdes, new, old);
1500	if (ret || !old)
1501		return ret;
1502
1503	if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1504		return -EFAULT;
1505	return 0;
1506}
1507
1508#ifdef CONFIG_COMPAT
1509
1510struct compat_mq_attr {
1511	compat_long_t mq_flags;      /* message queue flags		     */
1512	compat_long_t mq_maxmsg;     /* maximum number of messages	     */
1513	compat_long_t mq_msgsize;    /* maximum message size		     */
1514	compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1515	compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1516};
1517
1518static inline int get_compat_mq_attr(struct mq_attr *attr,
1519			const struct compat_mq_attr __user *uattr)
1520{
1521	struct compat_mq_attr v;
1522
1523	if (copy_from_user(&v, uattr, sizeof(*uattr)))
1524		return -EFAULT;
1525
1526	memset(attr, 0, sizeof(*attr));
1527	attr->mq_flags = v.mq_flags;
1528	attr->mq_maxmsg = v.mq_maxmsg;
1529	attr->mq_msgsize = v.mq_msgsize;
1530	attr->mq_curmsgs = v.mq_curmsgs;
1531	return 0;
1532}
1533
1534static inline int put_compat_mq_attr(const struct mq_attr *attr,
1535			struct compat_mq_attr __user *uattr)
1536{
1537	struct compat_mq_attr v;
1538
1539	memset(&v, 0, sizeof(v));
1540	v.mq_flags = attr->mq_flags;
1541	v.mq_maxmsg = attr->mq_maxmsg;
1542	v.mq_msgsize = attr->mq_msgsize;
1543	v.mq_curmsgs = attr->mq_curmsgs;
1544	if (copy_to_user(uattr, &v, sizeof(*uattr)))
1545		return -EFAULT;
1546	return 0;
1547}
1548
1549COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1550		       int, oflag, compat_mode_t, mode,
1551		       struct compat_mq_attr __user *, u_attr)
1552{
1553	struct mq_attr attr, *p = NULL;
1554	if (u_attr && oflag & O_CREAT) {
1555		p = &attr;
1556		if (get_compat_mq_attr(&attr, u_attr))
1557			return -EFAULT;
1558	}
1559	return do_mq_open(u_name, oflag, mode, p);
1560}
1561
1562COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1563		       const struct compat_sigevent __user *, u_notification)
1564{
1565	struct sigevent n, *p = NULL;
1566	if (u_notification) {
1567		if (get_compat_sigevent(&n, u_notification))
1568			return -EFAULT;
1569		if (n.sigev_notify == SIGEV_THREAD)
1570			n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1571		p = &n;
1572	}
1573	return do_mq_notify(mqdes, p);
1574}
1575
1576COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1577		       const struct compat_mq_attr __user *, u_mqstat,
1578		       struct compat_mq_attr __user *, u_omqstat)
1579{
1580	int ret;
1581	struct mq_attr mqstat, omqstat;
1582	struct mq_attr *new = NULL, *old = NULL;
1583
1584	if (u_mqstat) {
1585		new = &mqstat;
1586		if (get_compat_mq_attr(new, u_mqstat))
1587			return -EFAULT;
1588	}
1589	if (u_omqstat)
1590		old = &omqstat;
1591
1592	ret = do_mq_getsetattr(mqdes, new, old);
1593	if (ret || !old)
1594		return ret;
1595
1596	if (put_compat_mq_attr(old, u_omqstat))
1597		return -EFAULT;
1598	return 0;
1599}
1600#endif
1601
1602#ifdef CONFIG_COMPAT_32BIT_TIME
1603static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1604				   struct timespec64 *ts)
1605{
1606	if (get_old_timespec32(ts, p))
1607		return -EFAULT;
1608	if (!timespec64_valid(ts))
1609		return -EINVAL;
1610	return 0;
1611}
1612
1613SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1614		const char __user *, u_msg_ptr,
1615		unsigned int, msg_len, unsigned int, msg_prio,
1616		const struct old_timespec32 __user *, u_abs_timeout)
1617{
1618	struct timespec64 ts, *p = NULL;
1619	if (u_abs_timeout) {
1620		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1621		if (res)
1622			return res;
1623		p = &ts;
1624	}
1625	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1626}
1627
1628SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1629		char __user *, u_msg_ptr,
1630		unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1631		const struct old_timespec32 __user *, u_abs_timeout)
1632{
1633	struct timespec64 ts, *p = NULL;
1634	if (u_abs_timeout) {
1635		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1636		if (res)
1637			return res;
1638		p = &ts;
1639	}
1640	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1641}
1642#endif
1643
1644static const struct inode_operations mqueue_dir_inode_operations = {
1645	.lookup = simple_lookup,
1646	.create = mqueue_create,
1647	.unlink = mqueue_unlink,
1648};
1649
1650static const struct file_operations mqueue_file_operations = {
1651	.flush = mqueue_flush_file,
1652	.poll = mqueue_poll_file,
1653	.read = mqueue_read_file,
1654	.llseek = default_llseek,
1655};
1656
1657static const struct super_operations mqueue_super_ops = {
1658	.alloc_inode = mqueue_alloc_inode,
1659	.free_inode = mqueue_free_inode,
1660	.evict_inode = mqueue_evict_inode,
1661	.statfs = simple_statfs,
1662};
1663
1664static const struct fs_context_operations mqueue_fs_context_ops = {
1665	.free		= mqueue_fs_context_free,
1666	.get_tree	= mqueue_get_tree,
1667};
1668
1669static struct file_system_type mqueue_fs_type = {
1670	.name			= "mqueue",
1671	.init_fs_context	= mqueue_init_fs_context,
1672	.kill_sb		= kill_litter_super,
1673	.fs_flags		= FS_USERNS_MOUNT,
1674};
1675
1676int mq_init_ns(struct ipc_namespace *ns)
1677{
1678	struct vfsmount *m;
1679
1680	ns->mq_queues_count  = 0;
1681	ns->mq_queues_max    = DFLT_QUEUESMAX;
1682	ns->mq_msg_max       = DFLT_MSGMAX;
1683	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1684	ns->mq_msg_default   = DFLT_MSG;
1685	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1686
1687	m = mq_create_mount(ns);
1688	if (IS_ERR(m))
1689		return PTR_ERR(m);
1690	ns->mq_mnt = m;
1691	return 0;
1692}
1693
1694void mq_clear_sbinfo(struct ipc_namespace *ns)
1695{
1696	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1697}
1698
1699void mq_put_mnt(struct ipc_namespace *ns)
1700{
1701	kern_unmount(ns->mq_mnt);
1702}
1703
1704static int __init init_mqueue_fs(void)
1705{
1706	int error;
1707
1708	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1709				sizeof(struct mqueue_inode_info), 0,
1710				SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1711	if (mqueue_inode_cachep == NULL)
1712		return -ENOMEM;
1713
1714	/* ignore failures - they are not fatal */
1715	mq_sysctl_table = mq_register_sysctl_table();
1716
1717	error = register_filesystem(&mqueue_fs_type);
1718	if (error)
1719		goto out_sysctl;
1720
1721	spin_lock_init(&mq_lock);
1722
1723	error = mq_init_ns(&init_ipc_ns);
1724	if (error)
1725		goto out_filesystem;
1726
1727	return 0;
1728
1729out_filesystem:
1730	unregister_filesystem(&mqueue_fs_type);
1731out_sysctl:
1732	if (mq_sysctl_table)
1733		unregister_sysctl_table(mq_sysctl_table);
1734	kmem_cache_destroy(mqueue_inode_cachep);
1735	return error;
1736}
1737
1738device_initcall(init_mqueue_fs);
1739