msg.c revision f75a2f35
1/*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 */
24
25#include <linux/capability.h>
26#include <linux/msg.h>
27#include <linux/spinlock.h>
28#include <linux/init.h>
29#include <linux/mm.h>
30#include <linux/proc_fs.h>
31#include <linux/list.h>
32#include <linux/security.h>
33#include <linux/sched.h>
34#include <linux/syscalls.h>
35#include <linux/audit.h>
36#include <linux/seq_file.h>
37#include <linux/rwsem.h>
38#include <linux/nsproxy.h>
39#include <linux/ipc_namespace.h>
40
41#include <asm/current.h>
42#include <linux/uaccess.h>
43#include "util.h"
44
45/*
46 * one msg_receiver structure for each sleeping receiver:
47 */
48struct msg_receiver {
49	struct list_head	r_list;
50	struct task_struct	*r_tsk;
51
52	int			r_mode;
53	long			r_msgtype;
54	long			r_maxsize;
55
56	struct msg_msg		*volatile r_msg;
57};
58
59/* one msg_sender for each sleeping sender */
60struct msg_sender {
61	struct list_head	list;
62	struct task_struct	*tsk;
63};
64
65#define SEARCH_ANY		1
66#define SEARCH_EQUAL		2
67#define SEARCH_NOTEQUAL		3
68#define SEARCH_LESSEQUAL	4
69#define SEARCH_NUMBER		5
70
71#define msg_ids(ns)	((ns)->ids[IPC_MSG_IDS])
72
73static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
74static int newque(struct ipc_namespace *, struct ipc_params *);
75#ifdef CONFIG_PROC_FS
76static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
77#endif
78
79/*
80 * Scale msgmni with the available lowmem size: the memory dedicated to msg
81 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
82 * Also take into account the number of nsproxies created so far.
83 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
84 */
85void recompute_msgmni(struct ipc_namespace *ns)
86{
87	struct sysinfo i;
88	unsigned long allowed;
89	int nb_ns;
90
91	si_meminfo(&i);
92	allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
93		/ MSGMNB;
94	nb_ns = atomic_read(&nr_ipc_ns);
95	allowed /= nb_ns;
96
97	if (allowed < MSGMNI) {
98		ns->msg_ctlmni = MSGMNI;
99		return;
100	}
101
102	if (allowed > IPCMNI / nb_ns) {
103		ns->msg_ctlmni = IPCMNI / nb_ns;
104		return;
105	}
106
107	ns->msg_ctlmni = allowed;
108}
109
110void msg_init_ns(struct ipc_namespace *ns)
111{
112	ns->msg_ctlmax = MSGMAX;
113	ns->msg_ctlmnb = MSGMNB;
114
115	recompute_msgmni(ns);
116
117	atomic_set(&ns->msg_bytes, 0);
118	atomic_set(&ns->msg_hdrs, 0);
119	ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
120}
121
122#ifdef CONFIG_IPC_NS
123void msg_exit_ns(struct ipc_namespace *ns)
124{
125	free_ipcs(ns, &msg_ids(ns), freeque);
126	idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
127}
128#endif
129
130void __init msg_init(void)
131{
132	msg_init_ns(&init_ipc_ns);
133
134	printk(KERN_INFO "msgmni has been set to %d\n",
135		init_ipc_ns.msg_ctlmni);
136
137	ipc_init_proc_interface("sysvipc/msg",
138				"       key      msqid perms      cbytes       qnum lspid lrpid   uid   gid  cuid  cgid      stime      rtime      ctime\n",
139				IPC_MSG_IDS, sysvipc_msg_proc_show);
140}
141
142static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
143{
144	struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
145
146	if (IS_ERR(ipcp))
147		return ERR_CAST(ipcp);
148
149	return container_of(ipcp, struct msg_queue, q_perm);
150}
151
152static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
153							int id)
154{
155	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
156
157	if (IS_ERR(ipcp))
158		return ERR_CAST(ipcp);
159
160	return container_of(ipcp, struct msg_queue, q_perm);
161}
162
163static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
164{
165	ipc_rmid(&msg_ids(ns), &s->q_perm);
166}
167
168static void msg_rcu_free(struct rcu_head *head)
169{
170	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
171	struct msg_queue *msq = ipc_rcu_to_struct(p);
172
173	security_msg_queue_free(msq);
174	ipc_rcu_free(head);
175}
176
177/**
178 * newque - Create a new msg queue
179 * @ns: namespace
180 * @params: ptr to the structure that contains the key and msgflg
181 *
182 * Called with msg_ids.rwsem held (writer)
183 */
184static int newque(struct ipc_namespace *ns, struct ipc_params *params)
185{
186	struct msg_queue *msq;
187	int id, retval;
188	key_t key = params->key;
189	int msgflg = params->flg;
190
191	msq = ipc_rcu_alloc(sizeof(*msq));
192	if (!msq)
193		return -ENOMEM;
194
195	msq->q_perm.mode = msgflg & S_IRWXUGO;
196	msq->q_perm.key = key;
197
198	msq->q_perm.security = NULL;
199	retval = security_msg_queue_alloc(msq);
200	if (retval) {
201		ipc_rcu_putref(msq, ipc_rcu_free);
202		return retval;
203	}
204
205	/* ipc_addid() locks msq upon success. */
206	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
207	if (id < 0) {
208		ipc_rcu_putref(msq, msg_rcu_free);
209		return id;
210	}
211
212	msq->q_stime = msq->q_rtime = 0;
213	msq->q_ctime = get_seconds();
214	msq->q_cbytes = msq->q_qnum = 0;
215	msq->q_qbytes = ns->msg_ctlmnb;
216	msq->q_lspid = msq->q_lrpid = 0;
217	INIT_LIST_HEAD(&msq->q_messages);
218	INIT_LIST_HEAD(&msq->q_receivers);
219	INIT_LIST_HEAD(&msq->q_senders);
220
221	ipc_unlock_object(&msq->q_perm);
222	rcu_read_unlock();
223
224	return msq->q_perm.id;
225}
226
227static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
228{
229	mss->tsk = current;
230	__set_current_state(TASK_INTERRUPTIBLE);
231	list_add_tail(&mss->list, &msq->q_senders);
232}
233
234static inline void ss_del(struct msg_sender *mss)
235{
236	if (mss->list.next != NULL)
237		list_del(&mss->list);
238}
239
240static void ss_wakeup(struct list_head *h, int kill)
241{
242	struct msg_sender *mss, *t;
243
244	list_for_each_entry_safe(mss, t, h, list) {
245		if (kill)
246			mss->list.next = NULL;
247		wake_up_process(mss->tsk);
248	}
249}
250
251static void expunge_all(struct msg_queue *msq, int res)
252{
253	struct msg_receiver *msr, *t;
254
255	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
256		msr->r_msg = NULL; /* initialize expunge ordering */
257		wake_up_process(msr->r_tsk);
258		/*
259		 * Ensure that the wakeup is visible before setting r_msg as
260		 * the receiving end depends on it: either spinning on a nil,
261		 * or dealing with -EAGAIN cases. See lockless receive part 1
262		 * and 2 in do_msgrcv().
263		 */
264		smp_mb();
265		msr->r_msg = ERR_PTR(res);
266	}
267}
268
269/*
270 * freeque() wakes up waiters on the sender and receiver waiting queue,
271 * removes the message queue from message queue ID IDR, and cleans up all the
272 * messages associated with this queue.
273 *
274 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
275 * before freeque() is called. msg_ids.rwsem remains locked on exit.
276 */
277static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
278{
279	struct msg_msg *msg, *t;
280	struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
281
282	expunge_all(msq, -EIDRM);
283	ss_wakeup(&msq->q_senders, 1);
284	msg_rmid(ns, msq);
285	ipc_unlock_object(&msq->q_perm);
286	rcu_read_unlock();
287
288	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
289		atomic_dec(&ns->msg_hdrs);
290		free_msg(msg);
291	}
292	atomic_sub(msq->q_cbytes, &ns->msg_bytes);
293	ipc_rcu_putref(msq, msg_rcu_free);
294}
295
296/*
297 * Called with msg_ids.rwsem and ipcp locked.
298 */
299static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
300{
301	struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
302
303	return security_msg_queue_associate(msq, msgflg);
304}
305
306SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
307{
308	struct ipc_namespace *ns;
309	static const struct ipc_ops msg_ops = {
310		.getnew = newque,
311		.associate = msg_security,
312	};
313	struct ipc_params msg_params;
314
315	ns = current->nsproxy->ipc_ns;
316
317	msg_params.key = key;
318	msg_params.flg = msgflg;
319
320	return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
321}
322
323static inline unsigned long
324copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
325{
326	switch (version) {
327	case IPC_64:
328		return copy_to_user(buf, in, sizeof(*in));
329	case IPC_OLD:
330	{
331		struct msqid_ds out;
332
333		memset(&out, 0, sizeof(out));
334
335		ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
336
337		out.msg_stime		= in->msg_stime;
338		out.msg_rtime		= in->msg_rtime;
339		out.msg_ctime		= in->msg_ctime;
340
341		if (in->msg_cbytes > USHRT_MAX)
342			out.msg_cbytes	= USHRT_MAX;
343		else
344			out.msg_cbytes	= in->msg_cbytes;
345		out.msg_lcbytes		= in->msg_cbytes;
346
347		if (in->msg_qnum > USHRT_MAX)
348			out.msg_qnum	= USHRT_MAX;
349		else
350			out.msg_qnum	= in->msg_qnum;
351
352		if (in->msg_qbytes > USHRT_MAX)
353			out.msg_qbytes	= USHRT_MAX;
354		else
355			out.msg_qbytes	= in->msg_qbytes;
356		out.msg_lqbytes		= in->msg_qbytes;
357
358		out.msg_lspid		= in->msg_lspid;
359		out.msg_lrpid		= in->msg_lrpid;
360
361		return copy_to_user(buf, &out, sizeof(out));
362	}
363	default:
364		return -EINVAL;
365	}
366}
367
368static inline unsigned long
369copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
370{
371	switch (version) {
372	case IPC_64:
373		if (copy_from_user(out, buf, sizeof(*out)))
374			return -EFAULT;
375		return 0;
376	case IPC_OLD:
377	{
378		struct msqid_ds tbuf_old;
379
380		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
381			return -EFAULT;
382
383		out->msg_perm.uid	= tbuf_old.msg_perm.uid;
384		out->msg_perm.gid	= tbuf_old.msg_perm.gid;
385		out->msg_perm.mode	= tbuf_old.msg_perm.mode;
386
387		if (tbuf_old.msg_qbytes == 0)
388			out->msg_qbytes	= tbuf_old.msg_lqbytes;
389		else
390			out->msg_qbytes	= tbuf_old.msg_qbytes;
391
392		return 0;
393	}
394	default:
395		return -EINVAL;
396	}
397}
398
399/*
400 * This function handles some msgctl commands which require the rwsem
401 * to be held in write mode.
402 * NOTE: no locks must be held, the rwsem is taken inside this function.
403 */
404static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
405		       struct msqid_ds __user *buf, int version)
406{
407	struct kern_ipc_perm *ipcp;
408	struct msqid64_ds uninitialized_var(msqid64);
409	struct msg_queue *msq;
410	int err;
411
412	if (cmd == IPC_SET) {
413		if (copy_msqid_from_user(&msqid64, buf, version))
414			return -EFAULT;
415	}
416
417	down_write(&msg_ids(ns).rwsem);
418	rcu_read_lock();
419
420	ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
421				      &msqid64.msg_perm, msqid64.msg_qbytes);
422	if (IS_ERR(ipcp)) {
423		err = PTR_ERR(ipcp);
424		goto out_unlock1;
425	}
426
427	msq = container_of(ipcp, struct msg_queue, q_perm);
428
429	err = security_msg_queue_msgctl(msq, cmd);
430	if (err)
431		goto out_unlock1;
432
433	switch (cmd) {
434	case IPC_RMID:
435		ipc_lock_object(&msq->q_perm);
436		/* freeque unlocks the ipc object and rcu */
437		freeque(ns, ipcp);
438		goto out_up;
439	case IPC_SET:
440		if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
441		    !capable(CAP_SYS_RESOURCE)) {
442			err = -EPERM;
443			goto out_unlock1;
444		}
445
446		ipc_lock_object(&msq->q_perm);
447		err = ipc_update_perm(&msqid64.msg_perm, ipcp);
448		if (err)
449			goto out_unlock0;
450
451		msq->q_qbytes = msqid64.msg_qbytes;
452
453		msq->q_ctime = get_seconds();
454		/* sleeping receivers might be excluded by
455		 * stricter permissions.
456		 */
457		expunge_all(msq, -EAGAIN);
458		/* sleeping senders might be able to send
459		 * due to a larger queue size.
460		 */
461		ss_wakeup(&msq->q_senders, 0);
462		break;
463	default:
464		err = -EINVAL;
465		goto out_unlock1;
466	}
467
468out_unlock0:
469	ipc_unlock_object(&msq->q_perm);
470out_unlock1:
471	rcu_read_unlock();
472out_up:
473	up_write(&msg_ids(ns).rwsem);
474	return err;
475}
476
477static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
478			 int cmd, int version, void __user *buf)
479{
480	int err;
481	struct msg_queue *msq;
482
483	switch (cmd) {
484	case IPC_INFO:
485	case MSG_INFO:
486	{
487		struct msginfo msginfo;
488		int max_id;
489
490		if (!buf)
491			return -EFAULT;
492
493		/*
494		 * We must not return kernel stack data.
495		 * due to padding, it's not enough
496		 * to set all member fields.
497		 */
498		err = security_msg_queue_msgctl(NULL, cmd);
499		if (err)
500			return err;
501
502		memset(&msginfo, 0, sizeof(msginfo));
503		msginfo.msgmni = ns->msg_ctlmni;
504		msginfo.msgmax = ns->msg_ctlmax;
505		msginfo.msgmnb = ns->msg_ctlmnb;
506		msginfo.msgssz = MSGSSZ;
507		msginfo.msgseg = MSGSEG;
508		down_read(&msg_ids(ns).rwsem);
509		if (cmd == MSG_INFO) {
510			msginfo.msgpool = msg_ids(ns).in_use;
511			msginfo.msgmap = atomic_read(&ns->msg_hdrs);
512			msginfo.msgtql = atomic_read(&ns->msg_bytes);
513		} else {
514			msginfo.msgmap = MSGMAP;
515			msginfo.msgpool = MSGPOOL;
516			msginfo.msgtql = MSGTQL;
517		}
518		max_id = ipc_get_maxid(&msg_ids(ns));
519		up_read(&msg_ids(ns).rwsem);
520		if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
521			return -EFAULT;
522		return (max_id < 0) ? 0 : max_id;
523	}
524
525	case MSG_STAT:
526	case IPC_STAT:
527	{
528		struct msqid64_ds tbuf;
529		int success_return;
530
531		if (!buf)
532			return -EFAULT;
533
534		memset(&tbuf, 0, sizeof(tbuf));
535
536		rcu_read_lock();
537		if (cmd == MSG_STAT) {
538			msq = msq_obtain_object(ns, msqid);
539			if (IS_ERR(msq)) {
540				err = PTR_ERR(msq);
541				goto out_unlock;
542			}
543			success_return = msq->q_perm.id;
544		} else {
545			msq = msq_obtain_object_check(ns, msqid);
546			if (IS_ERR(msq)) {
547				err = PTR_ERR(msq);
548				goto out_unlock;
549			}
550			success_return = 0;
551		}
552
553		err = -EACCES;
554		if (ipcperms(ns, &msq->q_perm, S_IRUGO))
555			goto out_unlock;
556
557		err = security_msg_queue_msgctl(msq, cmd);
558		if (err)
559			goto out_unlock;
560
561		kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
562		tbuf.msg_stime  = msq->q_stime;
563		tbuf.msg_rtime  = msq->q_rtime;
564		tbuf.msg_ctime  = msq->q_ctime;
565		tbuf.msg_cbytes = msq->q_cbytes;
566		tbuf.msg_qnum   = msq->q_qnum;
567		tbuf.msg_qbytes = msq->q_qbytes;
568		tbuf.msg_lspid  = msq->q_lspid;
569		tbuf.msg_lrpid  = msq->q_lrpid;
570		rcu_read_unlock();
571
572		if (copy_msqid_to_user(buf, &tbuf, version))
573			return -EFAULT;
574		return success_return;
575	}
576
577	default:
578		return -EINVAL;
579	}
580
581	return err;
582out_unlock:
583	rcu_read_unlock();
584	return err;
585}
586
587SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
588{
589	int version;
590	struct ipc_namespace *ns;
591
592	if (msqid < 0 || cmd < 0)
593		return -EINVAL;
594
595	version = ipc_parse_version(&cmd);
596	ns = current->nsproxy->ipc_ns;
597
598	switch (cmd) {
599	case IPC_INFO:
600	case MSG_INFO:
601	case MSG_STAT:	/* msqid is an index rather than a msg queue id */
602	case IPC_STAT:
603		return msgctl_nolock(ns, msqid, cmd, version, buf);
604	case IPC_SET:
605	case IPC_RMID:
606		return msgctl_down(ns, msqid, cmd, buf, version);
607	default:
608		return  -EINVAL;
609	}
610}
611
612static int testmsg(struct msg_msg *msg, long type, int mode)
613{
614	switch (mode) {
615	case SEARCH_ANY:
616	case SEARCH_NUMBER:
617		return 1;
618	case SEARCH_LESSEQUAL:
619		if (msg->m_type <= type)
620			return 1;
621		break;
622	case SEARCH_EQUAL:
623		if (msg->m_type == type)
624			return 1;
625		break;
626	case SEARCH_NOTEQUAL:
627		if (msg->m_type != type)
628			return 1;
629		break;
630	}
631	return 0;
632}
633
634static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
635{
636	struct msg_receiver *msr, *t;
637
638	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
639		if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
640		    !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
641					       msr->r_msgtype, msr->r_mode)) {
642
643			list_del(&msr->r_list);
644			if (msr->r_maxsize < msg->m_ts) {
645				/* initialize pipelined send ordering */
646				msr->r_msg = NULL;
647				wake_up_process(msr->r_tsk);
648				smp_mb(); /* see barrier comment below */
649				msr->r_msg = ERR_PTR(-E2BIG);
650			} else {
651				msr->r_msg = NULL;
652				msq->q_lrpid = task_pid_vnr(msr->r_tsk);
653				msq->q_rtime = get_seconds();
654				wake_up_process(msr->r_tsk);
655				/*
656				 * Ensure that the wakeup is visible before
657				 * setting r_msg, as the receiving end depends
658				 * on it. See lockless receive part 1 and 2 in
659				 * do_msgrcv().
660				 */
661				smp_mb();
662				msr->r_msg = msg;
663
664				return 1;
665			}
666		}
667	}
668
669	return 0;
670}
671
672long do_msgsnd(int msqid, long mtype, void __user *mtext,
673		size_t msgsz, int msgflg)
674{
675	struct msg_queue *msq;
676	struct msg_msg *msg;
677	int err;
678	struct ipc_namespace *ns;
679
680	ns = current->nsproxy->ipc_ns;
681
682	if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
683		return -EINVAL;
684	if (mtype < 1)
685		return -EINVAL;
686
687	msg = load_msg(mtext, msgsz);
688	if (IS_ERR(msg))
689		return PTR_ERR(msg);
690
691	msg->m_type = mtype;
692	msg->m_ts = msgsz;
693
694	rcu_read_lock();
695	msq = msq_obtain_object_check(ns, msqid);
696	if (IS_ERR(msq)) {
697		err = PTR_ERR(msq);
698		goto out_unlock1;
699	}
700
701	ipc_lock_object(&msq->q_perm);
702
703	for (;;) {
704		struct msg_sender s;
705
706		err = -EACCES;
707		if (ipcperms(ns, &msq->q_perm, S_IWUGO))
708			goto out_unlock0;
709
710		/* raced with RMID? */
711		if (!ipc_valid_object(&msq->q_perm)) {
712			err = -EIDRM;
713			goto out_unlock0;
714		}
715
716		err = security_msg_queue_msgsnd(msq, msg, msgflg);
717		if (err)
718			goto out_unlock0;
719
720		if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
721				1 + msq->q_qnum <= msq->q_qbytes) {
722			break;
723		}
724
725		/* queue full, wait: */
726		if (msgflg & IPC_NOWAIT) {
727			err = -EAGAIN;
728			goto out_unlock0;
729		}
730
731		/* enqueue the sender and prepare to block */
732		ss_add(msq, &s);
733
734		if (!ipc_rcu_getref(msq)) {
735			err = -EIDRM;
736			goto out_unlock0;
737		}
738
739		ipc_unlock_object(&msq->q_perm);
740		rcu_read_unlock();
741		schedule();
742
743		rcu_read_lock();
744		ipc_lock_object(&msq->q_perm);
745
746		ipc_rcu_putref(msq, ipc_rcu_free);
747		/* raced with RMID? */
748		if (!ipc_valid_object(&msq->q_perm)) {
749			err = -EIDRM;
750			goto out_unlock0;
751		}
752
753		ss_del(&s);
754
755		if (signal_pending(current)) {
756			err = -ERESTARTNOHAND;
757			goto out_unlock0;
758		}
759
760	}
761	msq->q_lspid = task_tgid_vnr(current);
762	msq->q_stime = get_seconds();
763
764	if (!pipelined_send(msq, msg)) {
765		/* no one is waiting for this message, enqueue it */
766		list_add_tail(&msg->m_list, &msq->q_messages);
767		msq->q_cbytes += msgsz;
768		msq->q_qnum++;
769		atomic_add(msgsz, &ns->msg_bytes);
770		atomic_inc(&ns->msg_hdrs);
771	}
772
773	err = 0;
774	msg = NULL;
775
776out_unlock0:
777	ipc_unlock_object(&msq->q_perm);
778out_unlock1:
779	rcu_read_unlock();
780	if (msg != NULL)
781		free_msg(msg);
782	return err;
783}
784
785SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
786		int, msgflg)
787{
788	long mtype;
789
790	if (get_user(mtype, &msgp->mtype))
791		return -EFAULT;
792	return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
793}
794
795static inline int convert_mode(long *msgtyp, int msgflg)
796{
797	if (msgflg & MSG_COPY)
798		return SEARCH_NUMBER;
799	/*
800	 *  find message of correct type.
801	 *  msgtyp = 0 => get first.
802	 *  msgtyp > 0 => get first message of matching type.
803	 *  msgtyp < 0 => get message with least type must be < abs(msgtype).
804	 */
805	if (*msgtyp == 0)
806		return SEARCH_ANY;
807	if (*msgtyp < 0) {
808		*msgtyp = -*msgtyp;
809		return SEARCH_LESSEQUAL;
810	}
811	if (msgflg & MSG_EXCEPT)
812		return SEARCH_NOTEQUAL;
813	return SEARCH_EQUAL;
814}
815
816static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
817{
818	struct msgbuf __user *msgp = dest;
819	size_t msgsz;
820
821	if (put_user(msg->m_type, &msgp->mtype))
822		return -EFAULT;
823
824	msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
825	if (store_msg(msgp->mtext, msg, msgsz))
826		return -EFAULT;
827	return msgsz;
828}
829
830#ifdef CONFIG_CHECKPOINT_RESTORE
831/*
832 * This function creates new kernel message structure, large enough to store
833 * bufsz message bytes.
834 */
835static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
836{
837	struct msg_msg *copy;
838
839	/*
840	 * Create dummy message to copy real message to.
841	 */
842	copy = load_msg(buf, bufsz);
843	if (!IS_ERR(copy))
844		copy->m_ts = bufsz;
845	return copy;
846}
847
848static inline void free_copy(struct msg_msg *copy)
849{
850	if (copy)
851		free_msg(copy);
852}
853#else
854static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
855{
856	return ERR_PTR(-ENOSYS);
857}
858
859static inline void free_copy(struct msg_msg *copy)
860{
861}
862#endif
863
864static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
865{
866	struct msg_msg *msg, *found = NULL;
867	long count = 0;
868
869	list_for_each_entry(msg, &msq->q_messages, m_list) {
870		if (testmsg(msg, *msgtyp, mode) &&
871		    !security_msg_queue_msgrcv(msq, msg, current,
872					       *msgtyp, mode)) {
873			if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
874				*msgtyp = msg->m_type - 1;
875				found = msg;
876			} else if (mode == SEARCH_NUMBER) {
877				if (*msgtyp == count)
878					return msg;
879			} else
880				return msg;
881			count++;
882		}
883	}
884
885	return found ?: ERR_PTR(-EAGAIN);
886}
887
888long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
889	       long (*msg_handler)(void __user *, struct msg_msg *, size_t))
890{
891	int mode;
892	struct msg_queue *msq;
893	struct ipc_namespace *ns;
894	struct msg_msg *msg, *copy = NULL;
895
896	ns = current->nsproxy->ipc_ns;
897
898	if (msqid < 0 || (long) bufsz < 0)
899		return -EINVAL;
900
901	if (msgflg & MSG_COPY) {
902		if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
903			return -EINVAL;
904		copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
905		if (IS_ERR(copy))
906			return PTR_ERR(copy);
907	}
908	mode = convert_mode(&msgtyp, msgflg);
909
910	rcu_read_lock();
911	msq = msq_obtain_object_check(ns, msqid);
912	if (IS_ERR(msq)) {
913		rcu_read_unlock();
914		free_copy(copy);
915		return PTR_ERR(msq);
916	}
917
918	for (;;) {
919		struct msg_receiver msr_d;
920
921		msg = ERR_PTR(-EACCES);
922		if (ipcperms(ns, &msq->q_perm, S_IRUGO))
923			goto out_unlock1;
924
925		ipc_lock_object(&msq->q_perm);
926
927		/* raced with RMID? */
928		if (!ipc_valid_object(&msq->q_perm)) {
929			msg = ERR_PTR(-EIDRM);
930			goto out_unlock0;
931		}
932
933		msg = find_msg(msq, &msgtyp, mode);
934		if (!IS_ERR(msg)) {
935			/*
936			 * Found a suitable message.
937			 * Unlink it from the queue.
938			 */
939			if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
940				msg = ERR_PTR(-E2BIG);
941				goto out_unlock0;
942			}
943			/*
944			 * If we are copying, then do not unlink message and do
945			 * not update queue parameters.
946			 */
947			if (msgflg & MSG_COPY) {
948				msg = copy_msg(msg, copy);
949				goto out_unlock0;
950			}
951
952			list_del(&msg->m_list);
953			msq->q_qnum--;
954			msq->q_rtime = get_seconds();
955			msq->q_lrpid = task_tgid_vnr(current);
956			msq->q_cbytes -= msg->m_ts;
957			atomic_sub(msg->m_ts, &ns->msg_bytes);
958			atomic_dec(&ns->msg_hdrs);
959			ss_wakeup(&msq->q_senders, 0);
960
961			goto out_unlock0;
962		}
963
964		/* No message waiting. Wait for a message */
965		if (msgflg & IPC_NOWAIT) {
966			msg = ERR_PTR(-ENOMSG);
967			goto out_unlock0;
968		}
969
970		list_add_tail(&msr_d.r_list, &msq->q_receivers);
971		msr_d.r_tsk = current;
972		msr_d.r_msgtype = msgtyp;
973		msr_d.r_mode = mode;
974		if (msgflg & MSG_NOERROR)
975			msr_d.r_maxsize = INT_MAX;
976		else
977			msr_d.r_maxsize = bufsz;
978		msr_d.r_msg = ERR_PTR(-EAGAIN);
979		__set_current_state(TASK_INTERRUPTIBLE);
980
981		ipc_unlock_object(&msq->q_perm);
982		rcu_read_unlock();
983		schedule();
984
985		/* Lockless receive, part 1:
986		 * Disable preemption.  We don't hold a reference to the queue
987		 * and getting a reference would defeat the idea of a lockless
988		 * operation, thus the code relies on rcu to guarantee the
989		 * existence of msq:
990		 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
991		 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
992		 * rcu_read_lock() prevents preemption between reading r_msg
993		 * and acquiring the q_perm.lock in ipc_lock_object().
994		 */
995		rcu_read_lock();
996
997		/* Lockless receive, part 2:
998		 * Wait until pipelined_send or expunge_all are outside of
999		 * wake_up_process(). There is a race with exit(), see
1000		 * ipc/mqueue.c for the details.
1001		 */
1002		msg = (struct msg_msg *)msr_d.r_msg;
1003		while (msg == NULL) {
1004			cpu_relax();
1005			msg = (struct msg_msg *)msr_d.r_msg;
1006		}
1007
1008		/* Lockless receive, part 3:
1009		 * If there is a message or an error then accept it without
1010		 * locking.
1011		 */
1012		if (msg != ERR_PTR(-EAGAIN))
1013			goto out_unlock1;
1014
1015		/* Lockless receive, part 3:
1016		 * Acquire the queue spinlock.
1017		 */
1018		ipc_lock_object(&msq->q_perm);
1019
1020		/* Lockless receive, part 4:
1021		 * Repeat test after acquiring the spinlock.
1022		 */
1023		msg = (struct msg_msg *)msr_d.r_msg;
1024		if (msg != ERR_PTR(-EAGAIN))
1025			goto out_unlock0;
1026
1027		list_del(&msr_d.r_list);
1028		if (signal_pending(current)) {
1029			msg = ERR_PTR(-ERESTARTNOHAND);
1030			goto out_unlock0;
1031		}
1032
1033		ipc_unlock_object(&msq->q_perm);
1034	}
1035
1036out_unlock0:
1037	ipc_unlock_object(&msq->q_perm);
1038out_unlock1:
1039	rcu_read_unlock();
1040	if (IS_ERR(msg)) {
1041		free_copy(copy);
1042		return PTR_ERR(msg);
1043	}
1044
1045	bufsz = msg_handler(buf, msg, bufsz);
1046	free_msg(msg);
1047
1048	return bufsz;
1049}
1050
1051SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
1052		long, msgtyp, int, msgflg)
1053{
1054	return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
1055}
1056
1057#ifdef CONFIG_PROC_FS
1058static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1059{
1060	struct user_namespace *user_ns = seq_user_ns(s);
1061	struct msg_queue *msq = it;
1062
1063	return seq_printf(s,
1064			"%10d %10d  %4o  %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1065			msq->q_perm.key,
1066			msq->q_perm.id,
1067			msq->q_perm.mode,
1068			msq->q_cbytes,
1069			msq->q_qnum,
1070			msq->q_lspid,
1071			msq->q_lrpid,
1072			from_kuid_munged(user_ns, msq->q_perm.uid),
1073			from_kgid_munged(user_ns, msq->q_perm.gid),
1074			from_kuid_munged(user_ns, msq->q_perm.cuid),
1075			from_kgid_munged(user_ns, msq->q_perm.cgid),
1076			msq->q_stime,
1077			msq->q_rtime,
1078			msq->q_ctime);
1079}
1080#endif
1081