1/*	$NetBSD: sysv_msg.c,v 1.76 2019/10/04 23:20:22 kamil Exp $	*/
2
3/*-
4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Implementation of SVID messages
35 *
36 * Author: Daniel Boulet
37 *
38 * Copyright 1993 Daniel Boulet and RTMX Inc.
39 *
40 * This system call was implemented by Daniel Boulet under contract from RTMX.
41 *
42 * Redistribution and use in source forms, with and without modification,
43 * are permitted provided that this entire comment appears intact.
44 *
45 * Redistribution in binary form may occur without any restrictions.
46 * Obviously, it would be nice if you gave credit where credit is due
47 * but requiring it would be too onerous.
48 *
49 * This software is provided ``AS IS'' without any warranties of any kind.
50 */
51
52#include <sys/cdefs.h>
53__KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.76 2019/10/04 23:20:22 kamil Exp $");
54
55#ifdef _KERNEL_OPT
56#include "opt_sysv.h"
57#endif
58
59#include <sys/param.h>
60#include <sys/kernel.h>
61#include <sys/msg.h>
62#include <sys/sysctl.h>
63#include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
64#include <sys/syscallargs.h>
65#include <sys/kauth.h>
66
67#define MSG_DEBUG
68#undef MSG_DEBUG_OK
69
70#ifdef MSG_DEBUG_OK
71#define MSG_PRINTF(a)	printf a
72#else
73#define MSG_PRINTF(a)
74#endif
75
76static int	nfree_msgmaps;		/* # of free map entries */
77static short	free_msgmaps;	/* head of linked list of free map entries */
78static struct	__msg *free_msghdrs;	/* list of free msg headers */
79static char	*msgpool;		/* MSGMAX byte long msg buffer pool */
80static struct	msgmap *msgmaps;	/* MSGSEG msgmap structures */
81static struct __msg *msghdrs;		/* MSGTQL msg headers */
82
83kmsq_t	*msqs;				/* MSGMNI msqid_ds struct's */
84kmutex_t msgmutex;			/* subsystem lock */
85
86static u_int	msg_waiters = 0;	/* total number of msgrcv waiters */
87static bool	msg_realloc_state;
88static kcondvar_t msg_realloc_cv;
89
90static void msg_freehdr(struct __msg *);
91
92extern int kern_has_sysvmsg;
93
94SYSCTL_SETUP_PROTO(sysctl_ipc_msg_setup);
95
96int
97msginit(void)
98{
99	int i, sz;
100	vaddr_t v;
101
102	/*
103	 * msginfo.msgssz should be a power of two for efficiency reasons.
104	 * It is also pretty silly if msginfo.msgssz is less than 8
105	 * or greater than about 256 so ...
106	 */
107
108	i = 8;
109	while (i < 1024 && i != msginfo.msgssz)
110		i <<= 1;
111	if (i != msginfo.msgssz) {
112		printf("msginfo.msgssz = %d, not a small power of 2",
113		    msginfo.msgssz);
114		return EINVAL;
115	}
116
117	if (msginfo.msgseg > 32767) {
118		printf("msginfo.msgseg = %d > 32767", msginfo.msgseg);
119		return EINVAL;
120	}
121
122	/* Allocate the wired memory for our structures */
123	sz = ALIGN(msginfo.msgmax) +
124	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
125	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
126	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
127	sz = round_page(sz);
128	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
129	if (v == 0) {
130		printf("sysv_msg: cannot allocate memory");
131		return ENOMEM;
132	}
133	msgpool = (void *)v;
134	msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
135	msghdrs = (void *)((uintptr_t)msgmaps +
136	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
137	msqs = (void *)((uintptr_t)msghdrs +
138	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
139
140	for (i = 0; i < (msginfo.msgseg - 1); i++)
141		msgmaps[i].next = i + 1;
142	msgmaps[msginfo.msgseg - 1].next = -1;
143
144	free_msgmaps = 0;
145	nfree_msgmaps = msginfo.msgseg;
146
147	for (i = 0; i < (msginfo.msgtql - 1); i++) {
148		msghdrs[i].msg_type = 0;
149		msghdrs[i].msg_next = &msghdrs[i + 1];
150	}
151	i = msginfo.msgtql - 1;
152	msghdrs[i].msg_type = 0;
153	msghdrs[i].msg_next = NULL;
154	free_msghdrs = &msghdrs[0];
155
156	for (i = 0; i < msginfo.msgmni; i++) {
157		cv_init(&msqs[i].msq_cv, "msgwait");
158		/* Implies entry is available */
159		msqs[i].msq_u.msg_qbytes = 0;
160		/* Reset to a known value */
161		msqs[i].msq_u.msg_perm._seq = 0;
162	}
163
164	mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
165	cv_init(&msg_realloc_cv, "msgrealc");
166	msg_realloc_state = false;
167
168	kern_has_sysvmsg = 1;
169
170	return 0;
171}
172
173int
174msgfini(void)
175{
176	int i, sz;
177	vaddr_t v = (vaddr_t)msgpool;
178
179	mutex_enter(&msgmutex);
180	for (i = 0; i < msginfo.msgmni; i++) {
181		if (msqs[i].msq_u.msg_qbytes != 0) {
182			mutex_exit(&msgmutex);
183			return 1; /* queue not available, prevent unload! */
184		}
185	}
186/*
187 * Destroy all condvars and free the memory we're using
188 */
189	for (i = 0; i < msginfo.msgmni; i++) {
190		cv_destroy(&msqs[i].msq_cv);
191	}
192	sz = ALIGN(msginfo.msgmax) +
193	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
194	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
195	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
196	sz = round_page(sz);
197	uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
198
199	cv_destroy(&msg_realloc_cv);
200	mutex_exit(&msgmutex);
201	mutex_destroy(&msgmutex);
202
203	kern_has_sysvmsg = 0;
204
205	return 0;
206}
207
208static int
209msgrealloc(int newmsgmni, int newmsgseg)
210{
211	struct msgmap *new_msgmaps;
212	struct __msg *new_msghdrs, *new_free_msghdrs;
213	char *old_msgpool, *new_msgpool;
214	kmsq_t *new_msqs;
215	vaddr_t v;
216	int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
217	short new_free_msgmaps;
218
219	if (newmsgmni < 1 || newmsgseg < 1)
220		return EINVAL;
221
222	/* Allocate the wired memory for our structures */
223	newmsgmax = msginfo.msgssz * newmsgseg;
224	sz = ALIGN(newmsgmax) +
225	    ALIGN(newmsgseg * sizeof(struct msgmap)) +
226	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
227	    ALIGN(newmsgmni * sizeof(kmsq_t));
228	sz = round_page(sz);
229	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
230	if (v == 0)
231		return ENOMEM;
232
233	mutex_enter(&msgmutex);
234	if (msg_realloc_state) {
235		mutex_exit(&msgmutex);
236		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
237		return EBUSY;
238	}
239	msg_realloc_state = true;
240	if (msg_waiters) {
241		/*
242		 * Mark reallocation state, wake-up all waiters,
243		 * and wait while they will all exit.
244		 */
245		for (i = 0; i < msginfo.msgmni; i++)
246			cv_broadcast(&msqs[i].msq_cv);
247		while (msg_waiters)
248			cv_wait(&msg_realloc_cv, &msgmutex);
249	}
250	old_msgpool = msgpool;
251
252	/* We cannot reallocate less memory than we use */
253	i = 0;
254	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
255		struct msqid_ds *mptr;
256		kmsq_t *msq;
257
258		msq = &msqs[msqid];
259		mptr = &msq->msq_u;
260		if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
261			i = msqid;
262	}
263	if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
264		mutex_exit(&msgmutex);
265		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
266		return EBUSY;
267	}
268
269	new_msgpool = (void *)v;
270	new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
271	new_msghdrs = (void *)((uintptr_t)new_msgmaps +
272	    ALIGN(newmsgseg * sizeof(struct msgmap)));
273	new_msqs = (void *)((uintptr_t)new_msghdrs +
274	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
275
276	/* Initialize the structures */
277	for (i = 0; i < (newmsgseg - 1); i++)
278		new_msgmaps[i].next = i + 1;
279	new_msgmaps[newmsgseg - 1].next = -1;
280	new_free_msgmaps = 0;
281	new_nfree_msgmaps = newmsgseg;
282
283	for (i = 0; i < (msginfo.msgtql - 1); i++) {
284		new_msghdrs[i].msg_type = 0;
285		new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
286	}
287	i = msginfo.msgtql - 1;
288	new_msghdrs[i].msg_type = 0;
289	new_msghdrs[i].msg_next = NULL;
290	new_free_msghdrs = &new_msghdrs[0];
291
292	for (i = 0; i < newmsgmni; i++) {
293		new_msqs[i].msq_u.msg_qbytes = 0;
294		new_msqs[i].msq_u.msg_perm._seq = 0;
295		cv_init(&new_msqs[i].msq_cv, "msgwait");
296	}
297
298	/*
299	 * Copy all message queue identifiers, message headers and buffer
300	 * pools to the new memory location.
301	 */
302	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
303		struct __msg *nmsghdr, *msghdr, *pmsghdr;
304		struct msqid_ds *nmptr, *mptr;
305		kmsq_t *nmsq, *msq;
306
307		msq = &msqs[msqid];
308		mptr = &msq->msq_u;
309
310		if (mptr->msg_qbytes == 0 &&
311		    (mptr->msg_perm.mode & MSG_LOCKED) == 0)
312			continue;
313
314		nmsq = &new_msqs[msqid];
315		nmptr = &nmsq->msq_u;
316		memcpy(nmptr, mptr, sizeof(struct msqid_ds));
317
318		/*
319		 * Go through the message headers, and copy each one
320		 * by taking the new ones, and thus defragmenting.
321		 */
322		nmsghdr = pmsghdr = NULL;
323		msghdr = mptr->_msg_first;
324		while (msghdr) {
325			short nnext = 0, next;
326			u_short msgsz, segcnt;
327
328			/* Take an entry from the new list of free msghdrs */
329			nmsghdr = new_free_msghdrs;
330			KASSERT(nmsghdr != NULL);
331			new_free_msghdrs = nmsghdr->msg_next;
332
333			nmsghdr->msg_next = NULL;
334			if (pmsghdr) {
335				pmsghdr->msg_next = nmsghdr;
336			} else {
337				nmptr->_msg_first = nmsghdr;
338				pmsghdr = nmsghdr;
339			}
340			nmsghdr->msg_ts = msghdr->msg_ts;
341			nmsghdr->msg_spot = -1;
342
343			/* Compute the amount of segments and reserve them */
344			msgsz = msghdr->msg_ts;
345			segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
346			if (segcnt == 0)
347				continue;
348			while (segcnt--) {
349				nnext = new_free_msgmaps;
350				new_free_msgmaps = new_msgmaps[nnext].next;
351				new_nfree_msgmaps--;
352				new_msgmaps[nnext].next = nmsghdr->msg_spot;
353				nmsghdr->msg_spot = nnext;
354			}
355
356			/* Copy all segments */
357			KASSERT(nnext == nmsghdr->msg_spot);
358			next = msghdr->msg_spot;
359			while (msgsz > 0) {
360				size_t tlen;
361
362				if (msgsz >= msginfo.msgssz) {
363					tlen = msginfo.msgssz;
364					msgsz -= msginfo.msgssz;
365				} else {
366					tlen = msgsz;
367					msgsz = 0;
368				}
369
370				/* Copy the message buffer */
371				memcpy(&new_msgpool[nnext * msginfo.msgssz],
372				    &msgpool[next * msginfo.msgssz], tlen);
373
374				/* Next entry of the map */
375				nnext = msgmaps[nnext].next;
376				next = msgmaps[next].next;
377			}
378
379			/* Next message header */
380			msghdr = msghdr->msg_next;
381		}
382		nmptr->_msg_last = nmsghdr;
383	}
384	KASSERT((msginfo.msgseg - nfree_msgmaps) ==
385	    (newmsgseg - new_nfree_msgmaps));
386
387	sz = ALIGN(msginfo.msgmax) +
388	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
389	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
390	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
391	sz = round_page(sz);
392
393	for (i = 0; i < msginfo.msgmni; i++)
394		cv_destroy(&msqs[i].msq_cv);
395
396	/* Set the pointers and update the new values */
397	msgpool = new_msgpool;
398	msgmaps = new_msgmaps;
399	msghdrs = new_msghdrs;
400	msqs = new_msqs;
401
402	free_msghdrs = new_free_msghdrs;
403	free_msgmaps = new_free_msgmaps;
404	nfree_msgmaps = new_nfree_msgmaps;
405	msginfo.msgmni = newmsgmni;
406	msginfo.msgseg = newmsgseg;
407	msginfo.msgmax = newmsgmax;
408
409	/* Reallocation completed - notify all waiters, if any */
410	msg_realloc_state = false;
411	cv_broadcast(&msg_realloc_cv);
412	mutex_exit(&msgmutex);
413
414	uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
415	return 0;
416}
417
418static void
419msg_freehdr(struct __msg *msghdr)
420{
421
422	KASSERT(mutex_owned(&msgmutex));
423
424	while (msghdr->msg_ts > 0) {
425		short next;
426		KASSERT(msghdr->msg_spot >= 0);
427		KASSERT(msghdr->msg_spot < msginfo.msgseg);
428
429		next = msgmaps[msghdr->msg_spot].next;
430		msgmaps[msghdr->msg_spot].next = free_msgmaps;
431		free_msgmaps = msghdr->msg_spot;
432		nfree_msgmaps++;
433		msghdr->msg_spot = next;
434		if (msghdr->msg_ts >= msginfo.msgssz)
435			msghdr->msg_ts -= msginfo.msgssz;
436		else
437			msghdr->msg_ts = 0;
438	}
439	KASSERT(msghdr->msg_spot == -1);
440	msghdr->msg_next = free_msghdrs;
441	free_msghdrs = msghdr;
442}
443
444int
445sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
446    register_t *retval)
447{
448	/* {
449		syscallarg(int) msqid;
450		syscallarg(int) cmd;
451		syscallarg(struct msqid_ds *) buf;
452	} */
453	struct msqid_ds msqbuf;
454	int cmd, error;
455
456	cmd = SCARG(uap, cmd);
457
458	if (cmd == IPC_SET) {
459		error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
460		if (error)
461			return (error);
462	}
463
464	error = msgctl1(l, SCARG(uap, msqid), cmd,
465	    (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
466
467	if (error == 0 && cmd == IPC_STAT)
468		error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
469
470	return (error);
471}
472
473int
474msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
475{
476	kauth_cred_t cred = l->l_cred;
477	struct msqid_ds *msqptr;
478	kmsq_t *msq;
479	int error = 0, ix;
480
481	MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
482
483	ix = IPCID_TO_IX(msqid);
484
485	mutex_enter(&msgmutex);
486
487	if (ix < 0 || ix >= msginfo.msgmni) {
488		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
489		    msginfo.msgmni));
490		error = EINVAL;
491		goto unlock;
492	}
493
494	msq = &msqs[ix];
495	msqptr = &msq->msq_u;
496
497	if (msqptr->msg_qbytes == 0) {
498		MSG_PRINTF(("no such msqid\n"));
499		error = EINVAL;
500		goto unlock;
501	}
502	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
503		MSG_PRINTF(("wrong sequence number\n"));
504		error = EINVAL;
505		goto unlock;
506	}
507
508	switch (cmd) {
509	case IPC_RMID:
510	{
511		struct __msg *msghdr;
512		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
513			break;
514		/* Free the message headers */
515		msghdr = msqptr->_msg_first;
516		while (msghdr != NULL) {
517			struct __msg *msghdr_tmp;
518
519			/* Free the segments of each message */
520			msqptr->_msg_cbytes -= msghdr->msg_ts;
521			msqptr->msg_qnum--;
522			msghdr_tmp = msghdr;
523			msghdr = msghdr->msg_next;
524			msg_freehdr(msghdr_tmp);
525		}
526		KASSERT(msqptr->_msg_cbytes == 0);
527		KASSERT(msqptr->msg_qnum == 0);
528
529		/* Mark it as free */
530		msqptr->msg_qbytes = 0;
531		cv_broadcast(&msq->msq_cv);
532	}
533		break;
534
535	case IPC_SET:
536		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
537			break;
538		if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
539		    kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC,
540		    KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE,
541		    KAUTH_ARG(msqbuf->msg_qbytes),
542		    KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) {
543			error = EPERM;
544			break;
545		}
546		if (msqbuf->msg_qbytes > msginfo.msgmnb) {
547			MSG_PRINTF(("can't increase msg_qbytes beyond %d "
548			    "(truncating)\n", msginfo.msgmnb));
549			/* silently restrict qbytes to system limit */
550			msqbuf->msg_qbytes = msginfo.msgmnb;
551		}
552		if (msqbuf->msg_qbytes == 0) {
553			MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
554			error = EINVAL;		/* XXX non-standard errno! */
555			break;
556		}
557		msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
558		msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
559		msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
560		    (msqbuf->msg_perm.mode & 0777);
561		msqptr->msg_qbytes = msqbuf->msg_qbytes;
562		msqptr->msg_ctime = time_second;
563		break;
564
565	case IPC_STAT:
566		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
567			MSG_PRINTF(("requester doesn't have read access\n"));
568			break;
569		}
570		memset(msqbuf, 0, sizeof *msqbuf);
571		msqbuf->msg_perm = msqptr->msg_perm;
572		msqbuf->msg_perm.mode &= 0777;
573		msqbuf->msg_qnum = msqptr->msg_qnum;
574		msqbuf->msg_qbytes = msqptr->msg_qbytes;
575		msqbuf->msg_lspid = msqptr->msg_lspid;
576		msqbuf->msg_lrpid = msqptr->msg_lrpid;
577		msqbuf->msg_stime = msqptr->msg_stime;
578		msqbuf->msg_rtime = msqptr->msg_rtime;
579		msqbuf->msg_ctime = msqptr->msg_ctime;
580		break;
581
582	default:
583		MSG_PRINTF(("invalid command %d\n", cmd));
584		error = EINVAL;
585		break;
586	}
587
588unlock:
589	mutex_exit(&msgmutex);
590	return (error);
591}
592
593int
594sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
595{
596	/* {
597		syscallarg(key_t) key;
598		syscallarg(int) msgflg;
599	} */
600	int msqid, error = 0;
601	int key = SCARG(uap, key);
602	int msgflg = SCARG(uap, msgflg);
603	kauth_cred_t cred = l->l_cred;
604	struct msqid_ds *msqptr = NULL;
605	kmsq_t *msq;
606
607	mutex_enter(&msgmutex);
608
609	MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
610
611	if (key != IPC_PRIVATE) {
612		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
613			msq = &msqs[msqid];
614			msqptr = &msq->msq_u;
615			if (msqptr->msg_qbytes != 0 &&
616			    msqptr->msg_perm._key == key)
617				break;
618		}
619		if (msqid < msginfo.msgmni) {
620			MSG_PRINTF(("found public key\n"));
621			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
622				MSG_PRINTF(("not exclusive\n"));
623				error = EEXIST;
624				goto unlock;
625			}
626			if ((error = ipcperm(cred, &msqptr->msg_perm,
627			    msgflg & 0700 ))) {
628				MSG_PRINTF(("requester doesn't have 0%o access\n",
629				    msgflg & 0700));
630				goto unlock;
631			}
632			goto found;
633		}
634	}
635
636	MSG_PRINTF(("need to allocate the msqid_ds\n"));
637	if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
638		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
639			/*
640			 * Look for an unallocated and unlocked msqid_ds.
641			 * msqid_ds's can be locked by msgsnd or msgrcv while
642			 * they are copying the message in/out.  We can't
643			 * re-use the entry until they release it.
644			 */
645			msq = &msqs[msqid];
646			msqptr = &msq->msq_u;
647			if (msqptr->msg_qbytes == 0 &&
648			    (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
649				break;
650		}
651		if (msqid == msginfo.msgmni) {
652			MSG_PRINTF(("no more msqid_ds's available\n"));
653			error = ENOSPC;
654			goto unlock;
655		}
656		MSG_PRINTF(("msqid %d is available\n", msqid));
657		msqptr->msg_perm._key = key;
658		msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
659		msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
660		msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
661		msqptr->msg_perm.gid = kauth_cred_getegid(cred);
662		msqptr->msg_perm.mode = (msgflg & 0777);
663		/* Make sure that the returned msqid is unique */
664		msqptr->msg_perm._seq++;
665		msqptr->_msg_first = NULL;
666		msqptr->_msg_last = NULL;
667		msqptr->_msg_cbytes = 0;
668		msqptr->msg_qnum = 0;
669		msqptr->msg_qbytes = msginfo.msgmnb;
670		msqptr->msg_lspid = 0;
671		msqptr->msg_lrpid = 0;
672		msqptr->msg_stime = 0;
673		msqptr->msg_rtime = 0;
674		msqptr->msg_ctime = time_second;
675	} else {
676		MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
677		error = ENOENT;
678		goto unlock;
679	}
680
681found:
682	/* Construct the unique msqid */
683	*retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
684
685unlock:
686	mutex_exit(&msgmutex);
687	return (error);
688}
689
690int
691sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
692{
693	/* {
694		syscallarg(int) msqid;
695		syscallarg(const void *) msgp;
696		syscallarg(size_t) msgsz;
697		syscallarg(int) msgflg;
698	} */
699
700	return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
701	    SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
702}
703
704int
705msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
706    int msgflg, size_t typesz, copyin_t fetch_type)
707{
708	int segs_needed, error = 0, msqid;
709	kauth_cred_t cred = l->l_cred;
710	struct msqid_ds *msqptr;
711	struct __msg *msghdr;
712	kmsq_t *msq;
713	short next;
714
715	MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqidr,
716	     user_msgp, (long long)msgsz, msgflg));
717
718	if ((ssize_t)msgsz < 0)
719		return EINVAL;
720
721restart:
722	msqid = IPCID_TO_IX(msqidr);
723
724	mutex_enter(&msgmutex);
725	/* In case of reallocation, we will wait for completion */
726	while (__predict_false(msg_realloc_state))
727		cv_wait(&msg_realloc_cv, &msgmutex);
728
729	if (msqid < 0 || msqid >= msginfo.msgmni) {
730		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
731		    msginfo.msgmni));
732		error = EINVAL;
733		goto unlock;
734	}
735
736	msq = &msqs[msqid];
737	msqptr = &msq->msq_u;
738
739	if (msqptr->msg_qbytes == 0) {
740		MSG_PRINTF(("no such message queue id\n"));
741		error = EINVAL;
742		goto unlock;
743	}
744	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
745		MSG_PRINTF(("wrong sequence number\n"));
746		error = EINVAL;
747		goto unlock;
748	}
749
750	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
751		MSG_PRINTF(("requester doesn't have write access\n"));
752		goto unlock;
753	}
754
755	segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
756	MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
757	    (long long)msgsz, msginfo.msgssz, segs_needed));
758	for (;;) {
759		int need_more_resources = 0;
760
761		/*
762		 * check msgsz [cannot be negative since it is unsigned]
763		 * (inside this loop in case msg_qbytes changes while we sleep)
764		 */
765
766		if (msgsz > msqptr->msg_qbytes) {
767			MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
768			error = EINVAL;
769			goto unlock;
770		}
771
772		if (msqptr->msg_perm.mode & MSG_LOCKED) {
773			MSG_PRINTF(("msqid is locked\n"));
774			need_more_resources = 1;
775		}
776		if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
777			MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
778			need_more_resources = 1;
779		}
780		if (segs_needed > nfree_msgmaps) {
781			MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
782			need_more_resources = 1;
783		}
784		if (free_msghdrs == NULL) {
785			MSG_PRINTF(("no more msghdrs\n"));
786			need_more_resources = 1;
787		}
788
789		if (need_more_resources) {
790			int we_own_it;
791
792			if ((msgflg & IPC_NOWAIT) != 0) {
793				MSG_PRINTF(("need more resources but caller "
794				    "doesn't want to wait\n"));
795				error = EAGAIN;
796				goto unlock;
797			}
798
799			if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
800				MSG_PRINTF(("we don't own the msqid_ds\n"));
801				we_own_it = 0;
802			} else {
803				/* Force later arrivals to wait for our
804				   request */
805				MSG_PRINTF(("we own the msqid_ds\n"));
806				msqptr->msg_perm.mode |= MSG_LOCKED;
807				we_own_it = 1;
808			}
809
810			msg_waiters++;
811			MSG_PRINTF(("goodnight\n"));
812			error = cv_wait_sig(&msq->msq_cv, &msgmutex);
813			MSG_PRINTF(("good morning, error=%d\n", error));
814			msg_waiters--;
815
816			if (we_own_it)
817				msqptr->msg_perm.mode &= ~MSG_LOCKED;
818
819			/*
820			 * In case of such state, notify reallocator and
821			 * restart the call.
822			 */
823			if (msg_realloc_state) {
824				cv_broadcast(&msg_realloc_cv);
825				mutex_exit(&msgmutex);
826				goto restart;
827			}
828
829			if (error != 0) {
830				MSG_PRINTF(("msgsnd: interrupted system "
831				    "call\n"));
832				error = EINTR;
833				goto unlock;
834			}
835
836			/*
837			 * Make sure that the msq queue still exists
838			 */
839
840			if (msqptr->msg_qbytes == 0) {
841				MSG_PRINTF(("msqid deleted\n"));
842				error = EIDRM;
843				goto unlock;
844			}
845		} else {
846			MSG_PRINTF(("got all the resources that we need\n"));
847			break;
848		}
849	}
850
851	/*
852	 * We have the resources that we need.
853	 * Make sure!
854	 */
855
856	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
857	KASSERT(segs_needed <= nfree_msgmaps);
858	KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
859	KASSERT(free_msghdrs != NULL);
860
861	/*
862	 * Re-lock the msqid_ds in case we page-fault when copying in the
863	 * message
864	 */
865
866	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
867	msqptr->msg_perm.mode |= MSG_LOCKED;
868
869	/*
870	 * Allocate a message header
871	 */
872
873	msghdr = free_msghdrs;
874	free_msghdrs = msghdr->msg_next;
875	msghdr->msg_spot = -1;
876	msghdr->msg_ts = msgsz;
877
878	/*
879	 * Allocate space for the message
880	 */
881
882	while (segs_needed > 0) {
883		KASSERT(nfree_msgmaps > 0);
884		KASSERT(free_msgmaps != -1);
885		KASSERT(free_msgmaps < msginfo.msgseg);
886
887		next = free_msgmaps;
888		MSG_PRINTF(("allocating segment %d to message\n", next));
889		free_msgmaps = msgmaps[next].next;
890		nfree_msgmaps--;
891		msgmaps[next].next = msghdr->msg_spot;
892		msghdr->msg_spot = next;
893		segs_needed--;
894	}
895
896	/*
897	 * Copy in the message type
898	 */
899	mutex_exit(&msgmutex);
900	error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
901	mutex_enter(&msgmutex);
902	if (error != 0) {
903		MSG_PRINTF(("error %d copying the message type\n", error));
904		msg_freehdr(msghdr);
905		msqptr->msg_perm.mode &= ~MSG_LOCKED;
906		cv_broadcast(&msq->msq_cv);
907		goto unlock;
908	}
909	user_msgp += typesz;
910
911	/*
912	 * Validate the message type
913	 */
914
915	if (msghdr->msg_type < 1) {
916		msg_freehdr(msghdr);
917		msqptr->msg_perm.mode &= ~MSG_LOCKED;
918		cv_broadcast(&msq->msq_cv);
919		MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
920		error = EINVAL;
921		goto unlock;
922	}
923
924	/*
925	 * Copy in the message body
926	 */
927
928	next = msghdr->msg_spot;
929	while (msgsz > 0) {
930		size_t tlen;
931		KASSERT(next > -1);
932		KASSERT(next < msginfo.msgseg);
933
934		if (msgsz > msginfo.msgssz)
935			tlen = msginfo.msgssz;
936		else
937			tlen = msgsz;
938		mutex_exit(&msgmutex);
939		error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
940		mutex_enter(&msgmutex);
941		if (error != 0) {
942			MSG_PRINTF(("error %d copying in message segment\n",
943			    error));
944			msg_freehdr(msghdr);
945			msqptr->msg_perm.mode &= ~MSG_LOCKED;
946			cv_broadcast(&msq->msq_cv);
947			goto unlock;
948		}
949		msgsz -= tlen;
950		user_msgp += tlen;
951		next = msgmaps[next].next;
952	}
953	KASSERT(next == -1);
954
955	/*
956	 * We've got the message.  Unlock the msqid_ds.
957	 */
958
959	msqptr->msg_perm.mode &= ~MSG_LOCKED;
960
961	/*
962	 * Make sure that the msqid_ds is still allocated.
963	 */
964
965	if (msqptr->msg_qbytes == 0) {
966		msg_freehdr(msghdr);
967		cv_broadcast(&msq->msq_cv);
968		error = EIDRM;
969		goto unlock;
970	}
971
972	/*
973	 * Put the message into the queue
974	 */
975
976	if (msqptr->_msg_first == NULL) {
977		msqptr->_msg_first = msghdr;
978		msqptr->_msg_last = msghdr;
979	} else {
980		msqptr->_msg_last->msg_next = msghdr;
981		msqptr->_msg_last = msghdr;
982	}
983	msqptr->_msg_last->msg_next = NULL;
984
985	msqptr->_msg_cbytes += msghdr->msg_ts;
986	msqptr->msg_qnum++;
987	msqptr->msg_lspid = l->l_proc->p_pid;
988	msqptr->msg_stime = time_second;
989
990	cv_broadcast(&msq->msq_cv);
991
992unlock:
993	mutex_exit(&msgmutex);
994	return error;
995}
996
997int
998sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
999{
1000	/* {
1001		syscallarg(int) msqid;
1002		syscallarg(void *) msgp;
1003		syscallarg(size_t) msgsz;
1004		syscallarg(long) msgtyp;
1005		syscallarg(int) msgflg;
1006	} */
1007
1008	return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
1009	    SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
1010	    sizeof(long), copyout, retval);
1011}
1012
1013int
1014msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
1015    int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
1016{
1017	size_t len;
1018	kauth_cred_t cred = l->l_cred;
1019	struct msqid_ds *msqptr;
1020	struct __msg *msghdr;
1021	int error = 0, msqid;
1022	kmsq_t *msq;
1023	short next;
1024
1025	MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqidr,
1026	    user_msgp, (long long)msgsz, msgtyp, msgflg));
1027
1028	if ((ssize_t)msgsz < 0)
1029		return EINVAL;
1030
1031restart:
1032	msqid = IPCID_TO_IX(msqidr);
1033
1034	mutex_enter(&msgmutex);
1035	/* In case of reallocation, we will wait for completion */
1036	while (__predict_false(msg_realloc_state))
1037		cv_wait(&msg_realloc_cv, &msgmutex);
1038
1039	if (msqid < 0 || msqid >= msginfo.msgmni) {
1040		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
1041		    msginfo.msgmni));
1042		error = EINVAL;
1043		goto unlock;
1044	}
1045
1046	msq = &msqs[msqid];
1047	msqptr = &msq->msq_u;
1048
1049	if (msqptr->msg_qbytes == 0) {
1050		MSG_PRINTF(("no such message queue id\n"));
1051		error = EINVAL;
1052		goto unlock;
1053	}
1054	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1055		MSG_PRINTF(("wrong sequence number\n"));
1056		error = EINVAL;
1057		goto unlock;
1058	}
1059
1060	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
1061		MSG_PRINTF(("requester doesn't have read access\n"));
1062		goto unlock;
1063	}
1064
1065	msghdr = NULL;
1066	while (msghdr == NULL) {
1067		if (msgtyp == 0) {
1068			msghdr = msqptr->_msg_first;
1069			if (msghdr != NULL) {
1070				if (msgsz < msghdr->msg_ts &&
1071				    (msgflg & MSG_NOERROR) == 0) {
1072					MSG_PRINTF(("first msg on the queue "
1073					    "is too big (want %lld, got %d)\n",
1074					    (long long)msgsz, msghdr->msg_ts));
1075					error = E2BIG;
1076					goto unlock;
1077				}
1078				if (msqptr->_msg_first == msqptr->_msg_last) {
1079					msqptr->_msg_first = NULL;
1080					msqptr->_msg_last = NULL;
1081				} else {
1082					msqptr->_msg_first = msghdr->msg_next;
1083					KASSERT(msqptr->_msg_first != NULL);
1084				}
1085			}
1086		} else {
1087			struct __msg *previous;
1088			struct __msg **prev;
1089
1090			for (previous = NULL, prev = &msqptr->_msg_first;
1091			     (msghdr = *prev) != NULL;
1092			     previous = msghdr, prev = &msghdr->msg_next) {
1093				/*
1094				 * Is this message's type an exact match or is
1095				 * this message's type less than or equal to
1096				 * the absolute value of a negative msgtyp?
1097				 * Note that the second half of this test can
1098				 * NEVER be true if msgtyp is positive since
1099				 * msg_type is always positive!
1100				 */
1101
1102				if (msgtyp != msghdr->msg_type &&
1103				    msgtyp != LONG_MIN &&
1104				    msghdr->msg_type > -msgtyp)
1105					continue;
1106
1107				MSG_PRINTF(("found message type %ld, requested %ld\n",
1108				    msghdr->msg_type, msgtyp));
1109				if (msgsz < msghdr->msg_ts &&
1110				     (msgflg & MSG_NOERROR) == 0) {
1111					MSG_PRINTF(("requested message on the queue "
1112					    "is too big (want %lld, got %d)\n",
1113					    (long long)msgsz, msghdr->msg_ts));
1114					error = E2BIG;
1115					goto unlock;
1116				}
1117				*prev = msghdr->msg_next;
1118				if (msghdr != msqptr->_msg_last)
1119					break;
1120				if (previous == NULL) {
1121					KASSERT(prev == &msqptr->_msg_first);
1122					msqptr->_msg_first = NULL;
1123					msqptr->_msg_last = NULL;
1124				} else {
1125					KASSERT(prev != &msqptr->_msg_first);
1126					msqptr->_msg_last = previous;
1127				}
1128				break;
1129			}
1130		}
1131
1132		/*
1133		 * We've either extracted the msghdr for the appropriate
1134		 * message or there isn't one.
1135		 * If there is one then bail out of this loop.
1136		 */
1137		if (msghdr != NULL)
1138			break;
1139
1140		/*
1141		 * Hmph!  No message found.  Does the user want to wait?
1142		 */
1143
1144		if ((msgflg & IPC_NOWAIT) != 0) {
1145			MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1146			    msgtyp));
1147			error = ENOMSG;
1148			goto unlock;
1149		}
1150
1151		/*
1152		 * Wait for something to happen
1153		 */
1154
1155		msg_waiters++;
1156		MSG_PRINTF(("msgrcv:  goodnight\n"));
1157		error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1158		MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1159		msg_waiters--;
1160
1161		/*
1162		 * In case of such state, notify reallocator and
1163		 * restart the call.
1164		 */
1165		if (msg_realloc_state) {
1166			cv_broadcast(&msg_realloc_cv);
1167			mutex_exit(&msgmutex);
1168			goto restart;
1169		}
1170
1171		if (error != 0) {
1172			MSG_PRINTF(("msgsnd: interrupted system call\n"));
1173			error = EINTR;
1174			goto unlock;
1175		}
1176
1177		/*
1178		 * Make sure that the msq queue still exists
1179		 */
1180
1181		if (msqptr->msg_qbytes == 0 ||
1182		    msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1183			MSG_PRINTF(("msqid deleted\n"));
1184			error = EIDRM;
1185			goto unlock;
1186		}
1187	}
1188
1189	/*
1190	 * Return the message to the user.
1191	 *
1192	 * First, do the bookkeeping (before we risk being interrupted).
1193	 */
1194
1195	msqptr->_msg_cbytes -= msghdr->msg_ts;
1196	msqptr->msg_qnum--;
1197	msqptr->msg_lrpid = l->l_proc->p_pid;
1198	msqptr->msg_rtime = time_second;
1199
1200	/*
1201	 * Make msgsz the actual amount that we'll be returning.
1202	 * Note that this effectively truncates the message if it is too long
1203	 * (since msgsz is never increased).
1204	 */
1205
1206	MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1207	    (long long)msgsz, msghdr->msg_ts));
1208	if (msgsz > msghdr->msg_ts)
1209		msgsz = msghdr->msg_ts;
1210
1211	/*
1212	 * Return the type to the user.
1213	 */
1214	mutex_exit(&msgmutex);
1215	error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1216	mutex_enter(&msgmutex);
1217	if (error != 0) {
1218		MSG_PRINTF(("error (%d) copying out message type\n", error));
1219		msg_freehdr(msghdr);
1220		cv_broadcast(&msq->msq_cv);
1221		goto unlock;
1222	}
1223	user_msgp += typesz;
1224
1225	/*
1226	 * Return the segments to the user
1227	 */
1228
1229	next = msghdr->msg_spot;
1230	for (len = 0; len < msgsz; len += msginfo.msgssz) {
1231		size_t tlen;
1232		KASSERT(next > -1);
1233		KASSERT(next < msginfo.msgseg);
1234
1235		if (msgsz - len > msginfo.msgssz)
1236			tlen = msginfo.msgssz;
1237		else
1238			tlen = msgsz - len;
1239		mutex_exit(&msgmutex);
1240		error = copyout(&msgpool[next * msginfo.msgssz],
1241		    user_msgp, tlen);
1242		mutex_enter(&msgmutex);
1243		if (error != 0) {
1244			MSG_PRINTF(("error (%d) copying out message segment\n",
1245			    error));
1246			msg_freehdr(msghdr);
1247			cv_broadcast(&msq->msq_cv);
1248			goto unlock;
1249		}
1250		user_msgp += tlen;
1251		next = msgmaps[next].next;
1252	}
1253
1254	/*
1255	 * Done, return the actual number of bytes copied out.
1256	 */
1257
1258	msg_freehdr(msghdr);
1259	cv_broadcast(&msq->msq_cv);
1260	*retval = msgsz;
1261
1262unlock:
1263	mutex_exit(&msgmutex);
1264	return error;
1265}
1266
1267/*
1268 * Sysctl initialization and nodes.
1269 */
1270
1271static int
1272sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1273{
1274	int newsize, error;
1275	struct sysctlnode node;
1276	node = *rnode;
1277	node.sysctl_data = &newsize;
1278
1279	newsize = msginfo.msgmni;
1280	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1281	if (error || newp == NULL)
1282		return error;
1283
1284	sysctl_unlock();
1285	error = msgrealloc(newsize, msginfo.msgseg);
1286	sysctl_relock();
1287	return error;
1288}
1289
1290static int
1291sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1292{
1293	int newsize, error;
1294	struct sysctlnode node;
1295	node = *rnode;
1296	node.sysctl_data = &newsize;
1297
1298	newsize = msginfo.msgseg;
1299	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1300	if (error || newp == NULL)
1301		return error;
1302
1303	sysctl_unlock();
1304	error = msgrealloc(msginfo.msgmni, newsize);
1305	sysctl_relock();
1306	return error;
1307}
1308
1309SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1310{
1311	const struct sysctlnode *node = NULL;
1312
1313	sysctl_createv(clog, 0, NULL, &node,
1314		CTLFLAG_PERMANENT,
1315		CTLTYPE_NODE, "ipc",
1316		SYSCTL_DESCR("SysV IPC options"),
1317		NULL, 0, NULL, 0,
1318		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1319
1320	if (node == NULL)
1321		return;
1322
1323	sysctl_createv(clog, 0, &node, NULL,
1324		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1325		CTLTYPE_INT, "msgmni",
1326		SYSCTL_DESCR("Max number of message queue identifiers"),
1327		sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1328		CTL_CREATE, CTL_EOL);
1329	sysctl_createv(clog, 0, &node, NULL,
1330		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1331		CTLTYPE_INT, "msgseg",
1332		SYSCTL_DESCR("Max number of number of message segments"),
1333		sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1334		CTL_CREATE, CTL_EOL);
1335}
1336