1/*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
2/*-
3 * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Adam Glass and Charles
16 *	Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (c) 2003-2005 McAfee, Inc.
33 * All rights reserved.
34 *
35 * This software was developed for the FreeBSD Project in part by McAfee
36 * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR
37 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research
38 * program.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 *    notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 *    notice, this list of conditions and the following disclaimer in the
47 *    documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD: stable/11/sys/kern/sysv_shm.c 367601 2020-11-11 22:00:30Z brooks $");
64
65#include "opt_compat.h"
66#include "opt_sysvipc.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/abi_compat.h>
71#include <sys/kernel.h>
72#include <sys/limits.h>
73#include <sys/lock.h>
74#include <sys/sysctl.h>
75#include <sys/shm.h>
76#include <sys/proc.h>
77#include <sys/malloc.h>
78#include <sys/mman.h>
79#include <sys/module.h>
80#include <sys/mutex.h>
81#include <sys/racct.h>
82#include <sys/resourcevar.h>
83#include <sys/rwlock.h>
84#include <sys/stat.h>
85#include <sys/syscall.h>
86#include <sys/syscallsubr.h>
87#include <sys/sysent.h>
88#include <sys/sysproto.h>
89#include <sys/jail.h>
90
91#include <security/mac/mac_framework.h>
92
93#include <vm/vm.h>
94#include <vm/vm_param.h>
95#include <vm/pmap.h>
96#include <vm/vm_object.h>
97#include <vm/vm_map.h>
98#include <vm/vm_page.h>
99#include <vm/vm_pager.h>
100
101FEATURE(sysv_shm, "System V shared memory segments support");
102
103static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
104
105#define	SHMSEG_FREE     	0x0200
106#define	SHMSEG_REMOVED  	0x0400
107#define	SHMSEG_ALLOCATED	0x0800
108
109static int shm_last_free, shm_nused, shmalloced;
110vm_size_t shm_committed;
111static struct shmid_kernel *shmsegs;
112static unsigned shm_prison_slot;
113
114struct shmmap_state {
115	vm_offset_t va;
116	int shmid;
117};
118
119static void shm_deallocate_segment(struct shmid_kernel *);
120static int shm_find_segment_by_key(struct prison *, key_t);
121static struct shmid_kernel *shm_find_segment(struct prison *, int, bool);
122static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
123static int shmget_allocate_segment(struct thread *td, key_t key, size_t size,
124    int mode);
125static int shmget_existing(struct thread *td, size_t size, int shmflg,
126    int mode, int segnum);
127static void shmrealloc(void);
128static int shminit(void);
129static int sysvshm_modload(struct module *, int, void *);
130static int shmunload(void);
131static void shmexit_myhook(struct vmspace *vm);
132static void shmfork_myhook(struct proc *p1, struct proc *p2);
133static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
134static void shm_remove(struct shmid_kernel *, int);
135static struct prison *shm_find_prison(struct ucred *);
136static int shm_prison_cansee(struct prison *, struct shmid_kernel *);
137static int shm_prison_check(void *, void *);
138static int shm_prison_set(void *, void *);
139static int shm_prison_get(void *, void *);
140static int shm_prison_remove(void *, void *);
141static void shm_prison_cleanup(struct prison *);
142
143/*
144 * Tuneable values.
145 */
146#ifndef SHMMAXPGS
147#define	SHMMAXPGS	131072	/* Note: sysv shared memory is swap backed. */
148#endif
149#ifndef SHMMAX
150#define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
151#endif
152#ifndef SHMMIN
153#define	SHMMIN	1
154#endif
155#ifndef SHMMNI
156#define	SHMMNI	192
157#endif
158#ifndef SHMSEG
159#define	SHMSEG	128
160#endif
161#ifndef SHMALL
162#define	SHMALL	(SHMMAXPGS)
163#endif
164
165struct	shminfo shminfo = {
166	.shmmax = SHMMAX,
167	.shmmin = SHMMIN,
168	.shmmni = SHMMNI,
169	.shmseg = SHMSEG,
170	.shmall = SHMALL
171};
172
173static int shm_use_phys;
174static int shm_allow_removed = 1;
175
176SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RWTUN, &shminfo.shmmax, 0,
177    "Maximum shared memory segment size");
178SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RWTUN, &shminfo.shmmin, 0,
179    "Minimum shared memory segment size");
180SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0,
181    "Number of shared memory identifiers");
182SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0,
183    "Number of segments per process");
184SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RWTUN, &shminfo.shmall, 0,
185    "Maximum number of pages available for shared memory");
186SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RWTUN,
187    &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core");
188SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RWTUN,
189    &shm_allow_removed, 0,
190    "Enable/Disable attachment to attached segments marked for removal");
191SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD |
192    CTLFLAG_MPSAFE, NULL, 0, sysctl_shmsegs, "",
193    "Array of struct shmid_kernel for each potential shared memory segment");
194
195static struct sx sysvshmsx;
196#define	SYSVSHM_LOCK()		sx_xlock(&sysvshmsx)
197#define	SYSVSHM_UNLOCK()	sx_xunlock(&sysvshmsx)
198#define	SYSVSHM_ASSERT_LOCKED()	sx_assert(&sysvshmsx, SA_XLOCKED)
199
200static int
201shm_find_segment_by_key(struct prison *pr, key_t key)
202{
203	int i;
204
205	for (i = 0; i < shmalloced; i++)
206		if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
207		    shmsegs[i].cred != NULL &&
208		    shmsegs[i].cred->cr_prison == pr &&
209		    shmsegs[i].u.shm_perm.key == key)
210			return (i);
211	return (-1);
212}
213
214/*
215 * Finds segment either by shmid if is_shmid is true, or by segnum if
216 * is_shmid is false.
217 */
218static struct shmid_kernel *
219shm_find_segment(struct prison *rpr, int arg, bool is_shmid)
220{
221	struct shmid_kernel *shmseg;
222	int segnum;
223
224	segnum = is_shmid ? IPCID_TO_IX(arg) : arg;
225	if (segnum < 0 || segnum >= shmalloced)
226		return (NULL);
227	shmseg = &shmsegs[segnum];
228	if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
229	    (!shm_allow_removed &&
230	    (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) ||
231	    (is_shmid && shmseg->u.shm_perm.seq != IPCID_TO_SEQ(arg)) ||
232	    shm_prison_cansee(rpr, shmseg) != 0)
233		return (NULL);
234	return (shmseg);
235}
236
237static void
238shm_deallocate_segment(struct shmid_kernel *shmseg)
239{
240	vm_size_t size;
241
242	SYSVSHM_ASSERT_LOCKED();
243
244	vm_object_deallocate(shmseg->object);
245	shmseg->object = NULL;
246	size = round_page(shmseg->u.shm_segsz);
247	shm_committed -= btoc(size);
248	shm_nused--;
249	shmseg->u.shm_perm.mode = SHMSEG_FREE;
250#ifdef MAC
251	mac_sysvshm_cleanup(shmseg);
252#endif
253	racct_sub_cred(shmseg->cred, RACCT_NSHM, 1);
254	racct_sub_cred(shmseg->cred, RACCT_SHMSIZE, size);
255	crfree(shmseg->cred);
256	shmseg->cred = NULL;
257}
258
259static int
260shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
261{
262	struct shmid_kernel *shmseg;
263	int segnum, result;
264	vm_size_t size;
265
266	SYSVSHM_ASSERT_LOCKED();
267	segnum = IPCID_TO_IX(shmmap_s->shmid);
268	KASSERT(segnum >= 0 && segnum < shmalloced,
269	    ("segnum %d shmalloced %d", segnum, shmalloced));
270
271	shmseg = &shmsegs[segnum];
272	size = round_page(shmseg->u.shm_segsz);
273	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
274	if (result != KERN_SUCCESS)
275		return (EINVAL);
276	shmmap_s->shmid = -1;
277	shmseg->u.shm_dtime = time_second;
278	if ((--shmseg->u.shm_nattch <= 0) &&
279	    (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
280		shm_deallocate_segment(shmseg);
281		shm_last_free = segnum;
282	}
283	return (0);
284}
285
286static void
287shm_remove(struct shmid_kernel *shmseg, int segnum)
288{
289
290	shmseg->u.shm_perm.key = IPC_PRIVATE;
291	shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
292	if (shmseg->u.shm_nattch <= 0) {
293		shm_deallocate_segment(shmseg);
294		shm_last_free = segnum;
295	}
296}
297
298static struct prison *
299shm_find_prison(struct ucred *cred)
300{
301	struct prison *pr, *rpr;
302
303	pr = cred->cr_prison;
304	prison_lock(pr);
305	rpr = osd_jail_get(pr, shm_prison_slot);
306	prison_unlock(pr);
307	return rpr;
308}
309
310static int
311shm_prison_cansee(struct prison *rpr, struct shmid_kernel *shmseg)
312{
313
314	if (shmseg->cred == NULL ||
315	    !(rpr == shmseg->cred->cr_prison ||
316	      prison_ischild(rpr, shmseg->cred->cr_prison)))
317		return (EINVAL);
318	return (0);
319}
320
321static int
322kern_shmdt_locked(struct thread *td, const void *shmaddr)
323{
324	struct proc *p = td->td_proc;
325	struct shmmap_state *shmmap_s;
326#ifdef MAC
327	struct shmid_kernel *shmsegptr;
328	int error;
329#endif
330	int i;
331
332	SYSVSHM_ASSERT_LOCKED();
333	if (shm_find_prison(td->td_ucred) == NULL)
334		return (ENOSYS);
335	shmmap_s = p->p_vmspace->vm_shm;
336 	if (shmmap_s == NULL)
337		return (EINVAL);
338	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
339		if (shmmap_s->shmid != -1 &&
340		    shmmap_s->va == (vm_offset_t)shmaddr) {
341			break;
342		}
343	}
344	if (i == shminfo.shmseg)
345		return (EINVAL);
346#ifdef MAC
347	shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
348	error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr);
349	if (error != 0)
350		return (error);
351#endif
352	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
353}
354
355#ifndef _SYS_SYSPROTO_H_
356struct shmdt_args {
357	const void *shmaddr;
358};
359#endif
360int
361sys_shmdt(struct thread *td, struct shmdt_args *uap)
362{
363	int error;
364
365	SYSVSHM_LOCK();
366	error = kern_shmdt_locked(td, uap->shmaddr);
367	SYSVSHM_UNLOCK();
368	return (error);
369}
370
371static int
372kern_shmat_locked(struct thread *td, int shmid, const void *shmaddr,
373    int shmflg)
374{
375	struct prison *rpr;
376	struct proc *p = td->td_proc;
377	struct shmid_kernel *shmseg;
378	struct shmmap_state *shmmap_s;
379	vm_offset_t attach_va;
380	vm_prot_t prot;
381	vm_size_t size;
382	int cow, error, find_space, i, rv;
383
384	SYSVSHM_ASSERT_LOCKED();
385	rpr = shm_find_prison(td->td_ucred);
386	if (rpr == NULL)
387		return (ENOSYS);
388	shmmap_s = p->p_vmspace->vm_shm;
389	if (shmmap_s == NULL) {
390		shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state),
391		    M_SHM, M_WAITOK);
392		for (i = 0; i < shminfo.shmseg; i++)
393			shmmap_s[i].shmid = -1;
394		KASSERT(p->p_vmspace->vm_shm == NULL, ("raced"));
395		p->p_vmspace->vm_shm = shmmap_s;
396	}
397	shmseg = shm_find_segment(rpr, shmid, true);
398	if (shmseg == NULL)
399		return (EINVAL);
400	error = ipcperm(td, &shmseg->u.shm_perm,
401	    (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
402	if (error != 0)
403		return (error);
404#ifdef MAC
405	error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg);
406	if (error != 0)
407		return (error);
408#endif
409	for (i = 0; i < shminfo.shmseg; i++) {
410		if (shmmap_s->shmid == -1)
411			break;
412		shmmap_s++;
413	}
414	if (i >= shminfo.shmseg)
415		return (EMFILE);
416	size = round_page(shmseg->u.shm_segsz);
417	prot = VM_PROT_READ;
418	cow = MAP_INHERIT_SHARE | MAP_PREFAULT_PARTIAL;
419	if ((shmflg & SHM_RDONLY) == 0)
420		prot |= VM_PROT_WRITE;
421	if (shmaddr != NULL) {
422		if ((shmflg & SHM_RND) != 0)
423			attach_va = rounddown2((vm_offset_t)shmaddr, SHMLBA);
424		else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0)
425			attach_va = (vm_offset_t)shmaddr;
426		else
427			return (EINVAL);
428		if ((shmflg & SHM_REMAP) != 0)
429			cow |= MAP_REMAP;
430		find_space = VMFS_NO_SPACE;
431	} else {
432		/*
433		 * This is just a hint to vm_map_find() about where to
434		 * put it.
435		 */
436		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
437		    lim_max(td, RLIMIT_DATA));
438		find_space = VMFS_OPTIMAL_SPACE;
439	}
440
441	vm_object_reference(shmseg->object);
442	rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->object, 0, &attach_va,
443	    size, 0, find_space, prot, prot, cow);
444	if (rv != KERN_SUCCESS) {
445		vm_object_deallocate(shmseg->object);
446		return (ENOMEM);
447	}
448
449	shmmap_s->va = attach_va;
450	shmmap_s->shmid = shmid;
451	shmseg->u.shm_lpid = p->p_pid;
452	shmseg->u.shm_atime = time_second;
453	shmseg->u.shm_nattch++;
454	td->td_retval[0] = attach_va;
455	return (error);
456}
457
458int
459kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
460{
461	int error;
462
463	SYSVSHM_LOCK();
464	error = kern_shmat_locked(td, shmid, shmaddr, shmflg);
465	SYSVSHM_UNLOCK();
466	return (error);
467}
468
469#ifndef _SYS_SYSPROTO_H_
470struct shmat_args {
471	int shmid;
472	const void *shmaddr;
473	int shmflg;
474};
475#endif
476int
477sys_shmat(struct thread *td, struct shmat_args *uap)
478{
479
480	return (kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg));
481}
482
483static int
484kern_shmctl_locked(struct thread *td, int shmid, int cmd, void *buf,
485    size_t *bufsz)
486{
487	struct prison *rpr;
488	struct shmid_kernel *shmseg;
489	struct shmid_ds *shmidp;
490	struct shm_info shm_info;
491	int error;
492
493	SYSVSHM_ASSERT_LOCKED();
494
495	rpr = shm_find_prison(td->td_ucred);
496	if (rpr == NULL)
497		return (ENOSYS);
498
499	switch (cmd) {
500	/*
501	 * It is possible that kern_shmctl is being called from the Linux ABI
502	 * layer, in which case, we will need to implement IPC_INFO.  It should
503	 * be noted that other shmctl calls will be funneled through here for
504	 * Linix binaries as well.
505	 *
506	 * NB: The Linux ABI layer will convert this data to structure(s) more
507	 * consistent with the Linux ABI.
508	 */
509	case IPC_INFO:
510		memcpy(buf, &shminfo, sizeof(shminfo));
511		if (bufsz)
512			*bufsz = sizeof(shminfo);
513		td->td_retval[0] = shmalloced;
514		return (0);
515	case SHM_INFO: {
516		shm_info.used_ids = shm_nused;
517		shm_info.shm_rss = 0;	/*XXX where to get from ? */
518		shm_info.shm_tot = 0;	/*XXX where to get from ? */
519		shm_info.shm_swp = 0;	/*XXX where to get from ? */
520		shm_info.swap_attempts = 0;	/*XXX where to get from ? */
521		shm_info.swap_successes = 0;	/*XXX where to get from ? */
522		memcpy(buf, &shm_info, sizeof(shm_info));
523		if (bufsz != NULL)
524			*bufsz = sizeof(shm_info);
525		td->td_retval[0] = shmalloced;
526		return (0);
527	}
528	}
529	shmseg = shm_find_segment(rpr, shmid, cmd != SHM_STAT);
530	if (shmseg == NULL)
531		return (EINVAL);
532#ifdef MAC
533	error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd);
534	if (error != 0)
535		return (error);
536#endif
537	switch (cmd) {
538	case SHM_STAT:
539	case IPC_STAT:
540		shmidp = (struct shmid_ds *)buf;
541		error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
542		if (error != 0)
543			return (error);
544		memcpy(shmidp, &shmseg->u, sizeof(struct shmid_ds));
545		if (td->td_ucred->cr_prison != shmseg->cred->cr_prison)
546			shmidp->shm_perm.key = IPC_PRIVATE;
547		if (bufsz != NULL)
548			*bufsz = sizeof(struct shmid_ds);
549		if (cmd == SHM_STAT) {
550			td->td_retval[0] = IXSEQ_TO_IPCID(shmid,
551			    shmseg->u.shm_perm);
552		}
553		break;
554	case IPC_SET:
555		shmidp = (struct shmid_ds *)buf;
556		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
557		if (error != 0)
558			return (error);
559		shmseg->u.shm_perm.uid = shmidp->shm_perm.uid;
560		shmseg->u.shm_perm.gid = shmidp->shm_perm.gid;
561		shmseg->u.shm_perm.mode =
562		    (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
563		    (shmidp->shm_perm.mode & ACCESSPERMS);
564		shmseg->u.shm_ctime = time_second;
565		break;
566	case IPC_RMID:
567		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
568		if (error != 0)
569			return (error);
570		shm_remove(shmseg, IPCID_TO_IX(shmid));
571		break;
572#if 0
573	case SHM_LOCK:
574	case SHM_UNLOCK:
575#endif
576	default:
577		error = EINVAL;
578		break;
579	}
580	return (error);
581}
582
583int
584kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
585{
586	int error;
587
588	SYSVSHM_LOCK();
589	error = kern_shmctl_locked(td, shmid, cmd, buf, bufsz);
590	SYSVSHM_UNLOCK();
591	return (error);
592}
593
594
595#ifndef _SYS_SYSPROTO_H_
596struct shmctl_args {
597	int shmid;
598	int cmd;
599	struct shmid_ds *buf;
600};
601#endif
602int
603sys_shmctl(struct thread *td, struct shmctl_args *uap)
604{
605	int error;
606	struct shmid_ds buf;
607	size_t bufsz;
608
609	/*
610	 * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support
611	 * Linux binaries.  If we see the call come through the FreeBSD ABI,
612	 * return an error back to the user since we do not to support this.
613	 */
614	if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO ||
615	    uap->cmd == SHM_STAT)
616		return (EINVAL);
617
618	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
619	if (uap->cmd == IPC_SET) {
620		if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
621			goto done;
622	}
623
624	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
625	if (error)
626		goto done;
627
628	/* Cases in which we need to copyout */
629	switch (uap->cmd) {
630	case IPC_STAT:
631		error = copyout(&buf, uap->buf, bufsz);
632		break;
633	}
634
635done:
636	if (error) {
637		/* Invalidate the return value */
638		td->td_retval[0] = -1;
639	}
640	return (error);
641}
642
643
644static int
645shmget_existing(struct thread *td, size_t size, int shmflg, int mode,
646    int segnum)
647{
648	struct shmid_kernel *shmseg;
649#ifdef MAC
650	int error;
651#endif
652
653	SYSVSHM_ASSERT_LOCKED();
654	KASSERT(segnum >= 0 && segnum < shmalloced,
655	    ("segnum %d shmalloced %d", segnum, shmalloced));
656	shmseg = &shmsegs[segnum];
657	if ((shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
658		return (EEXIST);
659#ifdef MAC
660	error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, shmflg);
661	if (error != 0)
662		return (error);
663#endif
664	if (size != 0 && size > shmseg->u.shm_segsz)
665		return (EINVAL);
666	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
667	return (0);
668}
669
670static int
671shmget_allocate_segment(struct thread *td, key_t key, size_t size, int mode)
672{
673	struct ucred *cred = td->td_ucred;
674	struct shmid_kernel *shmseg;
675	vm_object_t shm_object;
676	int i, segnum;
677
678	SYSVSHM_ASSERT_LOCKED();
679
680	if (size < shminfo.shmmin || size > shminfo.shmmax)
681		return (EINVAL);
682	if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
683		return (ENOSPC);
684	size = round_page(size);
685	if (shm_committed + btoc(size) > shminfo.shmall)
686		return (ENOMEM);
687	if (shm_last_free < 0) {
688		shmrealloc();	/* Maybe expand the shmsegs[] array. */
689		for (i = 0; i < shmalloced; i++)
690			if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
691				break;
692		if (i == shmalloced)
693			return (ENOSPC);
694		segnum = i;
695	} else  {
696		segnum = shm_last_free;
697		shm_last_free = -1;
698	}
699	KASSERT(segnum >= 0 && segnum < shmalloced,
700	    ("segnum %d shmalloced %d", segnum, shmalloced));
701	shmseg = &shmsegs[segnum];
702#ifdef RACCT
703	if (racct_enable) {
704		PROC_LOCK(td->td_proc);
705		if (racct_add(td->td_proc, RACCT_NSHM, 1)) {
706			PROC_UNLOCK(td->td_proc);
707			return (ENOSPC);
708		}
709		if (racct_add(td->td_proc, RACCT_SHMSIZE, size)) {
710			racct_sub(td->td_proc, RACCT_NSHM, 1);
711			PROC_UNLOCK(td->td_proc);
712			return (ENOMEM);
713		}
714		PROC_UNLOCK(td->td_proc);
715	}
716#endif
717
718	/*
719	 * We make sure that we have allocated a pager before we need
720	 * to.
721	 */
722	shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP,
723	    0, size, VM_PROT_DEFAULT, 0, cred);
724	if (shm_object == NULL) {
725#ifdef RACCT
726		if (racct_enable) {
727			PROC_LOCK(td->td_proc);
728			racct_sub(td->td_proc, RACCT_NSHM, 1);
729			racct_sub(td->td_proc, RACCT_SHMSIZE, size);
730			PROC_UNLOCK(td->td_proc);
731		}
732#endif
733		return (ENOMEM);
734	}
735	shm_object->pg_color = 0;
736	VM_OBJECT_WLOCK(shm_object);
737	vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
738	vm_object_set_flag(shm_object, OBJ_COLORED | OBJ_NOSPLIT);
739	VM_OBJECT_WUNLOCK(shm_object);
740
741	shmseg->object = shm_object;
742	shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
743	shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid;
744	shmseg->u.shm_perm.mode = (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
745	shmseg->u.shm_perm.key = key;
746	shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff;
747	shmseg->cred = crhold(cred);
748	shmseg->u.shm_segsz = size;
749	shmseg->u.shm_cpid = td->td_proc->p_pid;
750	shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
751	shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
752#ifdef MAC
753	mac_sysvshm_create(cred, shmseg);
754#endif
755	shmseg->u.shm_ctime = time_second;
756	shm_committed += btoc(size);
757	shm_nused++;
758	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
759
760	return (0);
761}
762
763#ifndef _SYS_SYSPROTO_H_
764struct shmget_args {
765	key_t key;
766	size_t size;
767	int shmflg;
768};
769#endif
770int
771sys_shmget(struct thread *td, struct shmget_args *uap)
772{
773	int segnum, mode;
774	int error;
775
776	if (shm_find_prison(td->td_ucred) == NULL)
777		return (ENOSYS);
778	mode = uap->shmflg & ACCESSPERMS;
779	SYSVSHM_LOCK();
780	if (uap->key == IPC_PRIVATE) {
781		error = shmget_allocate_segment(td, uap->key, uap->size, mode);
782	} else {
783		segnum = shm_find_segment_by_key(td->td_ucred->cr_prison,
784		    uap->key);
785		if (segnum >= 0)
786			error = shmget_existing(td, uap->size, uap->shmflg,
787			    mode, segnum);
788		else if ((uap->shmflg & IPC_CREAT) == 0)
789			error = ENOENT;
790		else
791			error = shmget_allocate_segment(td, uap->key,
792			    uap->size, mode);
793	}
794	SYSVSHM_UNLOCK();
795	return (error);
796}
797
798static void
799shmfork_myhook(struct proc *p1, struct proc *p2)
800{
801	struct shmmap_state *shmmap_s;
802	size_t size;
803	int i;
804
805	SYSVSHM_LOCK();
806	size = shminfo.shmseg * sizeof(struct shmmap_state);
807	shmmap_s = malloc(size, M_SHM, M_WAITOK);
808	bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
809	p2->p_vmspace->vm_shm = shmmap_s;
810	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
811		if (shmmap_s->shmid != -1) {
812			KASSERT(IPCID_TO_IX(shmmap_s->shmid) >= 0 &&
813			    IPCID_TO_IX(shmmap_s->shmid) < shmalloced,
814			    ("segnum %d shmalloced %d",
815			    IPCID_TO_IX(shmmap_s->shmid), shmalloced));
816			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
817		}
818	}
819	SYSVSHM_UNLOCK();
820}
821
822static void
823shmexit_myhook(struct vmspace *vm)
824{
825	struct shmmap_state *base, *shm;
826	int i;
827
828	base = vm->vm_shm;
829	if (base != NULL) {
830		vm->vm_shm = NULL;
831		SYSVSHM_LOCK();
832		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
833			if (shm->shmid != -1)
834				shm_delete_mapping(vm, shm);
835		}
836		SYSVSHM_UNLOCK();
837		free(base, M_SHM);
838	}
839}
840
841static void
842shmrealloc(void)
843{
844	struct shmid_kernel *newsegs;
845	int i;
846
847	SYSVSHM_ASSERT_LOCKED();
848
849	if (shmalloced >= shminfo.shmmni)
850		return;
851
852	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM,
853	    M_WAITOK | M_ZERO);
854	for (i = 0; i < shmalloced; i++)
855		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
856	for (; i < shminfo.shmmni; i++) {
857		newsegs[i].u.shm_perm.mode = SHMSEG_FREE;
858		newsegs[i].u.shm_perm.seq = 0;
859#ifdef MAC
860		mac_sysvshm_init(&newsegs[i]);
861#endif
862	}
863	free(shmsegs, M_SHM);
864	shmsegs = newsegs;
865	shmalloced = shminfo.shmmni;
866}
867
868static struct syscall_helper_data shm_syscalls[] = {
869	SYSCALL_INIT_HELPER(shmat),
870	SYSCALL_INIT_HELPER(shmctl),
871	SYSCALL_INIT_HELPER(shmdt),
872	SYSCALL_INIT_HELPER(shmget),
873#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
874    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
875	SYSCALL_INIT_HELPER_COMPAT(freebsd7_shmctl),
876#endif
877#if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
878	SYSCALL_INIT_HELPER(shmsys),
879#endif
880	SYSCALL_INIT_LAST
881};
882
883#ifdef COMPAT_FREEBSD32
884#include <compat/freebsd32/freebsd32.h>
885#include <compat/freebsd32/freebsd32_ipc.h>
886#include <compat/freebsd32/freebsd32_proto.h>
887#include <compat/freebsd32/freebsd32_signal.h>
888#include <compat/freebsd32/freebsd32_syscall.h>
889#include <compat/freebsd32/freebsd32_util.h>
890
891static struct syscall_helper_data shm32_syscalls[] = {
892	SYSCALL32_INIT_HELPER_COMPAT(shmat),
893	SYSCALL32_INIT_HELPER_COMPAT(shmdt),
894	SYSCALL32_INIT_HELPER_COMPAT(shmget),
895	SYSCALL32_INIT_HELPER(freebsd32_shmsys),
896	SYSCALL32_INIT_HELPER(freebsd32_shmctl),
897#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
898    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
899	SYSCALL32_INIT_HELPER(freebsd7_freebsd32_shmctl),
900#endif
901	SYSCALL_INIT_LAST
902};
903#endif
904
905static int
906shminit(void)
907{
908	struct prison *pr;
909	void **rsv;
910	int i, error;
911	osd_method_t methods[PR_MAXMETHOD] = {
912	    [PR_METHOD_CHECK] =		shm_prison_check,
913	    [PR_METHOD_SET] =		shm_prison_set,
914	    [PR_METHOD_GET] =		shm_prison_get,
915	    [PR_METHOD_REMOVE] =	shm_prison_remove,
916	};
917
918#ifndef BURN_BRIDGES
919	if (TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall) != 0)
920		printf("kern.ipc.shmmaxpgs is now called kern.ipc.shmall!\n");
921#endif
922	if (shminfo.shmmax == SHMMAX) {
923		/* Initialize shmmax dealing with possible overflow. */
924		for (i = PAGE_SIZE; i != 0; i--) {
925			shminfo.shmmax = shminfo.shmall * i;
926			if ((shminfo.shmmax / shminfo.shmall) == (u_long)i)
927				break;
928		}
929	}
930	shmalloced = shminfo.shmmni;
931	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM,
932	    M_WAITOK|M_ZERO);
933	for (i = 0; i < shmalloced; i++) {
934		shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
935		shmsegs[i].u.shm_perm.seq = 0;
936#ifdef MAC
937		mac_sysvshm_init(&shmsegs[i]);
938#endif
939	}
940	shm_last_free = 0;
941	shm_nused = 0;
942	shm_committed = 0;
943	sx_init(&sysvshmsx, "sysvshmsx");
944	shmexit_hook = &shmexit_myhook;
945	shmfork_hook = &shmfork_myhook;
946
947	/* Set current prisons according to their allow.sysvipc. */
948	shm_prison_slot = osd_jail_register(NULL, methods);
949	rsv = osd_reserve(shm_prison_slot);
950	prison_lock(&prison0);
951	(void)osd_jail_set_reserved(&prison0, shm_prison_slot, rsv, &prison0);
952	prison_unlock(&prison0);
953	rsv = NULL;
954	sx_slock(&allprison_lock);
955	TAILQ_FOREACH(pr, &allprison, pr_list) {
956		if (rsv == NULL)
957			rsv = osd_reserve(shm_prison_slot);
958		prison_lock(pr);
959		if ((pr->pr_allow & PR_ALLOW_SYSVIPC) && pr->pr_ref > 0) {
960			(void)osd_jail_set_reserved(pr, shm_prison_slot, rsv,
961			    &prison0);
962			rsv = NULL;
963		}
964		prison_unlock(pr);
965	}
966	if (rsv != NULL)
967		osd_free_reserved(rsv);
968	sx_sunlock(&allprison_lock);
969
970	error = syscall_helper_register(shm_syscalls, SY_THR_STATIC_KLD);
971	if (error != 0)
972		return (error);
973#ifdef COMPAT_FREEBSD32
974	error = syscall32_helper_register(shm32_syscalls, SY_THR_STATIC_KLD);
975	if (error != 0)
976		return (error);
977#endif
978	return (0);
979}
980
981static int
982shmunload(void)
983{
984	int i;
985
986	if (shm_nused > 0)
987		return (EBUSY);
988
989#ifdef COMPAT_FREEBSD32
990	syscall32_helper_unregister(shm32_syscalls);
991#endif
992	syscall_helper_unregister(shm_syscalls);
993	if (shm_prison_slot != 0)
994		osd_jail_deregister(shm_prison_slot);
995
996	for (i = 0; i < shmalloced; i++) {
997#ifdef MAC
998		mac_sysvshm_destroy(&shmsegs[i]);
999#endif
1000		/*
1001		 * Objects might be still mapped into the processes
1002		 * address spaces.  Actual free would happen on the
1003		 * last mapping destruction.
1004		 */
1005		if (shmsegs[i].u.shm_perm.mode != SHMSEG_FREE)
1006			vm_object_deallocate(shmsegs[i].object);
1007	}
1008	free(shmsegs, M_SHM);
1009	shmexit_hook = NULL;
1010	shmfork_hook = NULL;
1011	sx_destroy(&sysvshmsx);
1012	return (0);
1013}
1014
1015static int
1016sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
1017{
1018	struct shmid_kernel tshmseg;
1019#ifdef COMPAT_FREEBSD32
1020	struct shmid_kernel32 tshmseg32;
1021#endif
1022	struct prison *pr, *rpr;
1023	void *outaddr;
1024	size_t outsize;
1025	int error, i;
1026
1027	SYSVSHM_LOCK();
1028	pr = req->td->td_ucred->cr_prison;
1029	rpr = shm_find_prison(req->td->td_ucred);
1030	error = 0;
1031	for (i = 0; i < shmalloced; i++) {
1032		if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
1033		    rpr == NULL || shm_prison_cansee(rpr, &shmsegs[i]) != 0) {
1034			bzero(&tshmseg, sizeof(tshmseg));
1035			tshmseg.u.shm_perm.mode = SHMSEG_FREE;
1036		} else {
1037			tshmseg = shmsegs[i];
1038			if (tshmseg.cred->cr_prison != pr)
1039				tshmseg.u.shm_perm.key = IPC_PRIVATE;
1040		}
1041#ifdef COMPAT_FREEBSD32
1042		if (SV_CURPROC_FLAG(SV_ILP32)) {
1043			bzero(&tshmseg32, sizeof(tshmseg32));
1044			freebsd32_ipcperm_out(&tshmseg.u.shm_perm,
1045			    &tshmseg32.u.shm_perm);
1046			CP(tshmseg, tshmseg32, u.shm_segsz);
1047			CP(tshmseg, tshmseg32, u.shm_lpid);
1048			CP(tshmseg, tshmseg32, u.shm_cpid);
1049			CP(tshmseg, tshmseg32, u.shm_nattch);
1050			CP(tshmseg, tshmseg32, u.shm_atime);
1051			CP(tshmseg, tshmseg32, u.shm_dtime);
1052			CP(tshmseg, tshmseg32, u.shm_ctime);
1053			/* Don't copy object, label, or cred */
1054			outaddr = &tshmseg32;
1055			outsize = sizeof(tshmseg32);
1056		} else
1057#endif
1058		{
1059			tshmseg.object = NULL;
1060			tshmseg.label = NULL;
1061			tshmseg.cred = NULL;
1062			outaddr = &tshmseg;
1063			outsize = sizeof(tshmseg);
1064		}
1065		error = SYSCTL_OUT(req, outaddr, outsize);
1066		if (error != 0)
1067			break;
1068	}
1069	SYSVSHM_UNLOCK();
1070	return (error);
1071}
1072
1073static int
1074shm_prison_check(void *obj, void *data)
1075{
1076	struct prison *pr = obj;
1077	struct prison *prpr;
1078	struct vfsoptlist *opts = data;
1079	int error, jsys;
1080
1081	/*
1082	 * sysvshm is a jailsys integer.
1083	 * It must be "disable" if the parent jail is disabled.
1084	 */
1085	error = vfs_copyopt(opts, "sysvshm", &jsys, sizeof(jsys));
1086	if (error != ENOENT) {
1087		if (error != 0)
1088			return (error);
1089		switch (jsys) {
1090		case JAIL_SYS_DISABLE:
1091			break;
1092		case JAIL_SYS_NEW:
1093		case JAIL_SYS_INHERIT:
1094			prison_lock(pr->pr_parent);
1095			prpr = osd_jail_get(pr->pr_parent, shm_prison_slot);
1096			prison_unlock(pr->pr_parent);
1097			if (prpr == NULL)
1098				return (EPERM);
1099			break;
1100		default:
1101			return (EINVAL);
1102		}
1103	}
1104
1105	return (0);
1106}
1107
1108static int
1109shm_prison_set(void *obj, void *data)
1110{
1111	struct prison *pr = obj;
1112	struct prison *tpr, *orpr, *nrpr, *trpr;
1113	struct vfsoptlist *opts = data;
1114	void *rsv;
1115	int jsys, descend;
1116
1117	/*
1118	 * sysvshm controls which jail is the root of the associated segments
1119	 * (this jail or same as the parent), or if the feature is available
1120	 * at all.
1121	 */
1122	if (vfs_copyopt(opts, "sysvshm", &jsys, sizeof(jsys)) == ENOENT)
1123		jsys = vfs_flagopt(opts, "allow.sysvipc", NULL, 0)
1124		    ? JAIL_SYS_INHERIT
1125		    : vfs_flagopt(opts, "allow.nosysvipc", NULL, 0)
1126		    ? JAIL_SYS_DISABLE
1127		    : -1;
1128	if (jsys == JAIL_SYS_DISABLE) {
1129		prison_lock(pr);
1130		orpr = osd_jail_get(pr, shm_prison_slot);
1131		if (orpr != NULL)
1132			osd_jail_del(pr, shm_prison_slot);
1133		prison_unlock(pr);
1134		if (orpr != NULL) {
1135			if (orpr == pr)
1136				shm_prison_cleanup(pr);
1137			/* Disable all child jails as well. */
1138			FOREACH_PRISON_DESCENDANT(pr, tpr, descend) {
1139				prison_lock(tpr);
1140				trpr = osd_jail_get(tpr, shm_prison_slot);
1141				if (trpr != NULL) {
1142					osd_jail_del(tpr, shm_prison_slot);
1143					prison_unlock(tpr);
1144					if (trpr == tpr)
1145						shm_prison_cleanup(tpr);
1146				} else {
1147					prison_unlock(tpr);
1148					descend = 0;
1149				}
1150			}
1151		}
1152	} else if (jsys != -1) {
1153		if (jsys == JAIL_SYS_NEW)
1154			nrpr = pr;
1155		else {
1156			prison_lock(pr->pr_parent);
1157			nrpr = osd_jail_get(pr->pr_parent, shm_prison_slot);
1158			prison_unlock(pr->pr_parent);
1159		}
1160		rsv = osd_reserve(shm_prison_slot);
1161		prison_lock(pr);
1162		orpr = osd_jail_get(pr, shm_prison_slot);
1163		if (orpr != nrpr)
1164			(void)osd_jail_set_reserved(pr, shm_prison_slot, rsv,
1165			    nrpr);
1166		else
1167			osd_free_reserved(rsv);
1168		prison_unlock(pr);
1169		if (orpr != nrpr) {
1170			if (orpr == pr)
1171				shm_prison_cleanup(pr);
1172			if (orpr != NULL) {
1173				/* Change child jails matching the old root, */
1174				FOREACH_PRISON_DESCENDANT(pr, tpr, descend) {
1175					prison_lock(tpr);
1176					trpr = osd_jail_get(tpr,
1177					    shm_prison_slot);
1178					if (trpr == orpr) {
1179						(void)osd_jail_set(tpr,
1180						    shm_prison_slot, nrpr);
1181						prison_unlock(tpr);
1182						if (trpr == tpr)
1183							shm_prison_cleanup(tpr);
1184					} else {
1185						prison_unlock(tpr);
1186						descend = 0;
1187					}
1188				}
1189			}
1190		}
1191	}
1192
1193	return (0);
1194}
1195
1196static int
1197shm_prison_get(void *obj, void *data)
1198{
1199	struct prison *pr = obj;
1200	struct prison *rpr;
1201	struct vfsoptlist *opts = data;
1202	int error, jsys;
1203
1204	/* Set sysvshm based on the jail's root prison. */
1205	prison_lock(pr);
1206	rpr = osd_jail_get(pr, shm_prison_slot);
1207	prison_unlock(pr);
1208	jsys = rpr == NULL ? JAIL_SYS_DISABLE
1209	    : rpr == pr ? JAIL_SYS_NEW : JAIL_SYS_INHERIT;
1210	error = vfs_setopt(opts, "sysvshm", &jsys, sizeof(jsys));
1211	if (error == ENOENT)
1212		error = 0;
1213	return (error);
1214}
1215
1216static int
1217shm_prison_remove(void *obj, void *data __unused)
1218{
1219	struct prison *pr = obj;
1220	struct prison *rpr;
1221
1222	SYSVSHM_LOCK();
1223	prison_lock(pr);
1224	rpr = osd_jail_get(pr, shm_prison_slot);
1225	prison_unlock(pr);
1226	if (rpr == pr)
1227		shm_prison_cleanup(pr);
1228	SYSVSHM_UNLOCK();
1229	return (0);
1230}
1231
1232static void
1233shm_prison_cleanup(struct prison *pr)
1234{
1235	struct shmid_kernel *shmseg;
1236	int i;
1237
1238	/* Remove any segments that belong to this jail. */
1239	for (i = 0; i < shmalloced; i++) {
1240		shmseg = &shmsegs[i];
1241		if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) &&
1242		    shmseg->cred != NULL && shmseg->cred->cr_prison == pr) {
1243			shm_remove(shmseg, i);
1244		}
1245	}
1246}
1247
1248SYSCTL_JAIL_PARAM_SYS_NODE(sysvshm, CTLFLAG_RW, "SYSV shared memory");
1249
1250#if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
1251struct oshmid_ds {
1252	struct	ipc_perm_old shm_perm;	/* operation perms */
1253	int	shm_segsz;		/* size of segment (bytes) */
1254	u_short	shm_cpid;		/* pid, creator */
1255	u_short	shm_lpid;		/* pid, last operation */
1256	short	shm_nattch;		/* no. of current attaches */
1257	time_t	shm_atime;		/* last attach time */
1258	time_t	shm_dtime;		/* last detach time */
1259	time_t	shm_ctime;		/* last change time */
1260	void	*shm_handle;		/* internal handle for shm segment */
1261};
1262
1263struct oshmctl_args {
1264	int shmid;
1265	int cmd;
1266	struct oshmid_ds *ubuf;
1267};
1268
1269static int
1270oshmctl(struct thread *td, struct oshmctl_args *uap)
1271{
1272#ifdef COMPAT_43
1273	int error = 0;
1274	struct prison *rpr;
1275	struct shmid_kernel *shmseg;
1276	struct oshmid_ds outbuf;
1277
1278	rpr = shm_find_prison(td->td_ucred);
1279	if (rpr == NULL)
1280		return (ENOSYS);
1281	if (uap->cmd != IPC_STAT) {
1282		return (freebsd7_shmctl(td,
1283		    (struct freebsd7_shmctl_args *)uap));
1284	}
1285	SYSVSHM_LOCK();
1286	shmseg = shm_find_segment(rpr, uap->shmid, true);
1287	if (shmseg == NULL) {
1288		SYSVSHM_UNLOCK();
1289		return (EINVAL);
1290	}
1291	error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
1292	if (error != 0) {
1293		SYSVSHM_UNLOCK();
1294		return (error);
1295	}
1296#ifdef MAC
1297	error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd);
1298	if (error != 0) {
1299		SYSVSHM_UNLOCK();
1300		return (error);
1301	}
1302#endif
1303	ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm);
1304	outbuf.shm_segsz = shmseg->u.shm_segsz;
1305	outbuf.shm_cpid = shmseg->u.shm_cpid;
1306	outbuf.shm_lpid = shmseg->u.shm_lpid;
1307	outbuf.shm_nattch = shmseg->u.shm_nattch;
1308	outbuf.shm_atime = shmseg->u.shm_atime;
1309	outbuf.shm_dtime = shmseg->u.shm_dtime;
1310	outbuf.shm_ctime = shmseg->u.shm_ctime;
1311	outbuf.shm_handle = shmseg->object;
1312	SYSVSHM_UNLOCK();
1313	return (copyout(&outbuf, uap->ubuf, sizeof(outbuf)));
1314#else
1315	return (EINVAL);
1316#endif
1317}
1318
1319/* XXX casting to (sy_call_t *) is bogus, as usual. */
1320static sy_call_t *shmcalls[] = {
1321	(sy_call_t *)sys_shmat, (sy_call_t *)oshmctl,
1322	(sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
1323	(sy_call_t *)freebsd7_shmctl
1324};
1325
1326#ifndef _SYS_SYSPROTO_H_
1327/* XXX actually varargs. */
1328struct shmsys_args {
1329	int	which;
1330	int	a2;
1331	int	a3;
1332	int	a4;
1333};
1334#endif
1335int
1336sys_shmsys(struct thread *td, struct shmsys_args *uap)
1337{
1338
1339	if (uap->which < 0 || uap->which >= nitems(shmcalls))
1340		return (EINVAL);
1341	return ((*shmcalls[uap->which])(td, &uap->a2));
1342}
1343
1344#endif	/* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */
1345
1346#ifdef COMPAT_FREEBSD32
1347
1348int
1349freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap)
1350{
1351
1352#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
1353    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
1354	switch (uap->which) {
1355	case 0:	{	/* shmat */
1356		struct shmat_args ap;
1357
1358		ap.shmid = uap->a2;
1359		ap.shmaddr = PTRIN(uap->a3);
1360		ap.shmflg = uap->a4;
1361		return (sysent[SYS_shmat].sy_call(td, &ap));
1362	}
1363	case 2: {	/* shmdt */
1364		struct shmdt_args ap;
1365
1366		ap.shmaddr = PTRIN(uap->a2);
1367		return (sysent[SYS_shmdt].sy_call(td, &ap));
1368	}
1369	case 3: {	/* shmget */
1370		struct shmget_args ap;
1371
1372		ap.key = uap->a2;
1373		ap.size = uap->a3;
1374		ap.shmflg = uap->a4;
1375		return (sysent[SYS_shmget].sy_call(td, &ap));
1376	}
1377	case 4: {	/* shmctl */
1378		struct freebsd7_freebsd32_shmctl_args ap;
1379
1380		ap.shmid = uap->a2;
1381		ap.cmd = uap->a3;
1382		ap.buf = PTRIN(uap->a4);
1383		return (freebsd7_freebsd32_shmctl(td, &ap));
1384	}
1385	case 1:		/* oshmctl */
1386	default:
1387		return (EINVAL);
1388	}
1389#else
1390	return (nosys(td, NULL));
1391#endif
1392}
1393
1394#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
1395    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
1396int
1397freebsd7_freebsd32_shmctl(struct thread *td,
1398    struct freebsd7_freebsd32_shmctl_args *uap)
1399{
1400	int error;
1401	union {
1402		struct shmid_ds shmid_ds;
1403		struct shm_info shm_info;
1404		struct shminfo shminfo;
1405	} u;
1406	union {
1407		struct shmid_ds32_old shmid_ds32;
1408		struct shm_info32 shm_info32;
1409		struct shminfo32 shminfo32;
1410	} u32;
1411	size_t sz;
1412
1413	if (uap->cmd == IPC_SET) {
1414		if ((error = copyin(uap->buf, &u32.shmid_ds32,
1415		    sizeof(u32.shmid_ds32))))
1416			goto done;
1417		freebsd32_ipcperm_old_in(&u32.shmid_ds32.shm_perm,
1418		    &u.shmid_ds.shm_perm);
1419		CP(u32.shmid_ds32, u.shmid_ds, shm_segsz);
1420		CP(u32.shmid_ds32, u.shmid_ds, shm_lpid);
1421		CP(u32.shmid_ds32, u.shmid_ds, shm_cpid);
1422		CP(u32.shmid_ds32, u.shmid_ds, shm_nattch);
1423		CP(u32.shmid_ds32, u.shmid_ds, shm_atime);
1424		CP(u32.shmid_ds32, u.shmid_ds, shm_dtime);
1425		CP(u32.shmid_ds32, u.shmid_ds, shm_ctime);
1426	}
1427
1428	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz);
1429	if (error)
1430		goto done;
1431
1432	/* Cases in which we need to copyout */
1433	switch (uap->cmd) {
1434	case IPC_INFO:
1435		CP(u.shminfo, u32.shminfo32, shmmax);
1436		CP(u.shminfo, u32.shminfo32, shmmin);
1437		CP(u.shminfo, u32.shminfo32, shmmni);
1438		CP(u.shminfo, u32.shminfo32, shmseg);
1439		CP(u.shminfo, u32.shminfo32, shmall);
1440		error = copyout(&u32.shminfo32, uap->buf,
1441		    sizeof(u32.shminfo32));
1442		break;
1443	case SHM_INFO:
1444		CP(u.shm_info, u32.shm_info32, used_ids);
1445		CP(u.shm_info, u32.shm_info32, shm_rss);
1446		CP(u.shm_info, u32.shm_info32, shm_tot);
1447		CP(u.shm_info, u32.shm_info32, shm_swp);
1448		CP(u.shm_info, u32.shm_info32, swap_attempts);
1449		CP(u.shm_info, u32.shm_info32, swap_successes);
1450		error = copyout(&u32.shm_info32, uap->buf,
1451		    sizeof(u32.shm_info32));
1452		break;
1453	case SHM_STAT:
1454	case IPC_STAT:
1455		memset(&u32.shmid_ds32, 0, sizeof(u32.shmid_ds32));
1456		freebsd32_ipcperm_old_out(&u.shmid_ds.shm_perm,
1457		    &u32.shmid_ds32.shm_perm);
1458		if (u.shmid_ds.shm_segsz > INT32_MAX)
1459			u32.shmid_ds32.shm_segsz = INT32_MAX;
1460		else
1461			CP(u.shmid_ds, u32.shmid_ds32, shm_segsz);
1462		CP(u.shmid_ds, u32.shmid_ds32, shm_lpid);
1463		CP(u.shmid_ds, u32.shmid_ds32, shm_cpid);
1464		CP(u.shmid_ds, u32.shmid_ds32, shm_nattch);
1465		CP(u.shmid_ds, u32.shmid_ds32, shm_atime);
1466		CP(u.shmid_ds, u32.shmid_ds32, shm_dtime);
1467		CP(u.shmid_ds, u32.shmid_ds32, shm_ctime);
1468		u32.shmid_ds32.shm_internal = 0;
1469		error = copyout(&u32.shmid_ds32, uap->buf,
1470		    sizeof(u32.shmid_ds32));
1471		break;
1472	}
1473
1474done:
1475	if (error) {
1476		/* Invalidate the return value */
1477		td->td_retval[0] = -1;
1478	}
1479	return (error);
1480}
1481#endif
1482
1483int
1484freebsd32_shmctl(struct thread *td, struct freebsd32_shmctl_args *uap)
1485{
1486	int error;
1487	union {
1488		struct shmid_ds shmid_ds;
1489		struct shm_info shm_info;
1490		struct shminfo shminfo;
1491	} u;
1492	union {
1493		struct shmid_ds32 shmid_ds32;
1494		struct shm_info32 shm_info32;
1495		struct shminfo32 shminfo32;
1496	} u32;
1497	size_t sz;
1498
1499	if (uap->cmd == IPC_SET) {
1500		if ((error = copyin(uap->buf, &u32.shmid_ds32,
1501		    sizeof(u32.shmid_ds32))))
1502			goto done;
1503		freebsd32_ipcperm_in(&u32.shmid_ds32.shm_perm,
1504		    &u.shmid_ds.shm_perm);
1505		CP(u32.shmid_ds32, u.shmid_ds, shm_segsz);
1506		CP(u32.shmid_ds32, u.shmid_ds, shm_lpid);
1507		CP(u32.shmid_ds32, u.shmid_ds, shm_cpid);
1508		CP(u32.shmid_ds32, u.shmid_ds, shm_nattch);
1509		CP(u32.shmid_ds32, u.shmid_ds, shm_atime);
1510		CP(u32.shmid_ds32, u.shmid_ds, shm_dtime);
1511		CP(u32.shmid_ds32, u.shmid_ds, shm_ctime);
1512	}
1513
1514	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz);
1515	if (error)
1516		goto done;
1517
1518	/* Cases in which we need to copyout */
1519	switch (uap->cmd) {
1520	case IPC_INFO:
1521		CP(u.shminfo, u32.shminfo32, shmmax);
1522		CP(u.shminfo, u32.shminfo32, shmmin);
1523		CP(u.shminfo, u32.shminfo32, shmmni);
1524		CP(u.shminfo, u32.shminfo32, shmseg);
1525		CP(u.shminfo, u32.shminfo32, shmall);
1526		error = copyout(&u32.shminfo32, uap->buf,
1527		    sizeof(u32.shminfo32));
1528		break;
1529	case SHM_INFO:
1530		CP(u.shm_info, u32.shm_info32, used_ids);
1531		CP(u.shm_info, u32.shm_info32, shm_rss);
1532		CP(u.shm_info, u32.shm_info32, shm_tot);
1533		CP(u.shm_info, u32.shm_info32, shm_swp);
1534		CP(u.shm_info, u32.shm_info32, swap_attempts);
1535		CP(u.shm_info, u32.shm_info32, swap_successes);
1536		error = copyout(&u32.shm_info32, uap->buf,
1537		    sizeof(u32.shm_info32));
1538		break;
1539	case SHM_STAT:
1540	case IPC_STAT:
1541		freebsd32_ipcperm_out(&u.shmid_ds.shm_perm,
1542		    &u32.shmid_ds32.shm_perm);
1543		if (u.shmid_ds.shm_segsz > INT32_MAX)
1544			u32.shmid_ds32.shm_segsz = INT32_MAX;
1545		else
1546			CP(u.shmid_ds, u32.shmid_ds32, shm_segsz);
1547		CP(u.shmid_ds, u32.shmid_ds32, shm_lpid);
1548		CP(u.shmid_ds, u32.shmid_ds32, shm_cpid);
1549		CP(u.shmid_ds, u32.shmid_ds32, shm_nattch);
1550		CP(u.shmid_ds, u32.shmid_ds32, shm_atime);
1551		CP(u.shmid_ds, u32.shmid_ds32, shm_dtime);
1552		CP(u.shmid_ds, u32.shmid_ds32, shm_ctime);
1553		error = copyout(&u32.shmid_ds32, uap->buf,
1554		    sizeof(u32.shmid_ds32));
1555		break;
1556	}
1557
1558done:
1559	if (error) {
1560		/* Invalidate the return value */
1561		td->td_retval[0] = -1;
1562	}
1563	return (error);
1564}
1565#endif
1566
1567#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
1568    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
1569
1570#ifndef _SYS_SYSPROTO_H_
1571struct freebsd7_shmctl_args {
1572	int shmid;
1573	int cmd;
1574	struct shmid_ds_old *buf;
1575};
1576#endif
1577int
1578freebsd7_shmctl(struct thread *td, struct freebsd7_shmctl_args *uap)
1579{
1580	int error;
1581	struct shmid_ds_old old;
1582	struct shmid_ds buf;
1583	size_t bufsz;
1584
1585	/*
1586	 * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support
1587	 * Linux binaries.  If we see the call come through the FreeBSD ABI,
1588	 * return an error back to the user since we do not to support this.
1589	 */
1590	if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO ||
1591	    uap->cmd == SHM_STAT)
1592		return (EINVAL);
1593
1594	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
1595	if (uap->cmd == IPC_SET) {
1596		if ((error = copyin(uap->buf, &old, sizeof(old))))
1597			goto done;
1598		ipcperm_old2new(&old.shm_perm, &buf.shm_perm);
1599		CP(old, buf, shm_segsz);
1600		CP(old, buf, shm_lpid);
1601		CP(old, buf, shm_cpid);
1602		CP(old, buf, shm_nattch);
1603		CP(old, buf, shm_atime);
1604		CP(old, buf, shm_dtime);
1605		CP(old, buf, shm_ctime);
1606	}
1607
1608	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
1609	if (error)
1610		goto done;
1611
1612	/* Cases in which we need to copyout */
1613	switch (uap->cmd) {
1614	case IPC_STAT:
1615		memset(&old, 0, sizeof(old));
1616		ipcperm_new2old(&buf.shm_perm, &old.shm_perm);
1617		if (buf.shm_segsz > INT_MAX)
1618			old.shm_segsz = INT_MAX;
1619		else
1620			CP(buf, old, shm_segsz);
1621		CP(buf, old, shm_lpid);
1622		CP(buf, old, shm_cpid);
1623		if (buf.shm_nattch > SHRT_MAX)
1624			old.shm_nattch = SHRT_MAX;
1625		else
1626			CP(buf, old, shm_nattch);
1627		CP(buf, old, shm_atime);
1628		CP(buf, old, shm_dtime);
1629		CP(buf, old, shm_ctime);
1630		old.shm_internal = NULL;
1631		error = copyout(&old, uap->buf, sizeof(old));
1632		break;
1633	}
1634
1635done:
1636	if (error) {
1637		/* Invalidate the return value */
1638		td->td_retval[0] = -1;
1639	}
1640	return (error);
1641}
1642
1643#endif	/* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 ||
1644	   COMPAT_FREEBSD7 */
1645
1646static int
1647sysvshm_modload(struct module *module, int cmd, void *arg)
1648{
1649	int error = 0;
1650
1651	switch (cmd) {
1652	case MOD_LOAD:
1653		error = shminit();
1654		if (error != 0)
1655			shmunload();
1656		break;
1657	case MOD_UNLOAD:
1658		error = shmunload();
1659		break;
1660	case MOD_SHUTDOWN:
1661		break;
1662	default:
1663		error = EINVAL;
1664		break;
1665	}
1666	return (error);
1667}
1668
1669static moduledata_t sysvshm_mod = {
1670	"sysvshm",
1671	&sysvshm_modload,
1672	NULL
1673};
1674
1675DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1676MODULE_VERSION(sysvshm, 1);
1677