shm.c revision d0edd852
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 *	 Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 *
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25 */
26
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/hugetlb.h>
30#include <linux/shm.h>
31#include <linux/init.h>
32#include <linux/file.h>
33#include <linux/mman.h>
34#include <linux/shmem_fs.h>
35#include <linux/security.h>
36#include <linux/syscalls.h>
37#include <linux/audit.h>
38#include <linux/capability.h>
39#include <linux/ptrace.h>
40#include <linux/seq_file.h>
41#include <linux/rwsem.h>
42#include <linux/nsproxy.h>
43#include <linux/mount.h>
44#include <linux/ipc_namespace.h>
45
46#include <linux/uaccess.h>
47
48#include "util.h"
49
50struct shm_file_data {
51	int id;
52	struct ipc_namespace *ns;
53	struct file *file;
54	const struct vm_operations_struct *vm_ops;
55};
56
57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58
59static const struct file_operations shm_file_operations;
60static const struct vm_operations_struct shm_vm_ops;
61
62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
63
64#define shm_unlock(shp)			\
65	ipc_unlock(&(shp)->shm_perm)
66
67static int newseg(struct ipc_namespace *, struct ipc_params *);
68static void shm_open(struct vm_area_struct *vma);
69static void shm_close(struct vm_area_struct *vma);
70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71#ifdef CONFIG_PROC_FS
72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73#endif
74
75void shm_init_ns(struct ipc_namespace *ns)
76{
77	ns->shm_ctlmax = SHMMAX;
78	ns->shm_ctlall = SHMALL;
79	ns->shm_ctlmni = SHMMNI;
80	ns->shm_rmid_forced = 0;
81	ns->shm_tot = 0;
82	ipc_init_ids(&shm_ids(ns));
83}
84
85/*
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
88 */
89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90{
91	struct shmid_kernel *shp;
92	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93
94	if (shp->shm_nattch) {
95		shp->shm_perm.mode |= SHM_DEST;
96		/* Do not find it any more */
97		shp->shm_perm.key = IPC_PRIVATE;
98		shm_unlock(shp);
99	} else
100		shm_destroy(ns, shp);
101}
102
103#ifdef CONFIG_IPC_NS
104void shm_exit_ns(struct ipc_namespace *ns)
105{
106	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
107	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
108}
109#endif
110
111static int __init ipc_ns_init(void)
112{
113	shm_init_ns(&init_ipc_ns);
114	return 0;
115}
116
117pure_initcall(ipc_ns_init);
118
119void __init shm_init(void)
120{
121	ipc_init_proc_interface("sysvipc/shm",
122#if BITS_PER_LONG <= 32
123				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
124#else
125				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
126#endif
127				IPC_SHM_IDS, sysvipc_shm_proc_show);
128}
129
130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131{
132	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
133
134	if (IS_ERR(ipcp))
135		return ERR_CAST(ipcp);
136
137	return container_of(ipcp, struct shmid_kernel, shm_perm);
138}
139
140static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
141{
142	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
143
144	if (IS_ERR(ipcp))
145		return ERR_CAST(ipcp);
146
147	return container_of(ipcp, struct shmid_kernel, shm_perm);
148}
149
150/*
151 * shm_lock_(check_) routines are called in the paths where the rwsem
152 * is not necessarily held.
153 */
154static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155{
156	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157
158	/*
159	 * We raced in the idr lookup or with shm_destroy().  Either way, the
160	 * ID is busted.
161	 */
162	WARN_ON(IS_ERR(ipcp));
163
164	return container_of(ipcp, struct shmid_kernel, shm_perm);
165}
166
167static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
168{
169	rcu_read_lock();
170	ipc_lock_object(&ipcp->shm_perm);
171}
172
173static void shm_rcu_free(struct rcu_head *head)
174{
175	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
176	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
177
178	security_shm_free(shp);
179	ipc_rcu_free(head);
180}
181
182static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
183{
184	list_del(&s->shm_clist);
185	ipc_rmid(&shm_ids(ns), &s->shm_perm);
186}
187
188
189/* This is called by fork, once for every shm attach. */
190static void shm_open(struct vm_area_struct *vma)
191{
192	struct file *file = vma->vm_file;
193	struct shm_file_data *sfd = shm_file_data(file);
194	struct shmid_kernel *shp;
195
196	shp = shm_lock(sfd->ns, sfd->id);
197	shp->shm_atim = get_seconds();
198	shp->shm_lprid = task_tgid_vnr(current);
199	shp->shm_nattch++;
200	shm_unlock(shp);
201}
202
203/*
204 * shm_destroy - free the struct shmid_kernel
205 *
206 * @ns: namespace
207 * @shp: struct to free
208 *
209 * It has to be called with shp and shm_ids.rwsem (writer) locked,
210 * but returns with shp unlocked and freed.
211 */
212static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
213{
214	struct file *shm_file;
215
216	shm_file = shp->shm_file;
217	shp->shm_file = NULL;
218	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
219	shm_rmid(ns, shp);
220	shm_unlock(shp);
221	if (!is_file_hugepages(shm_file))
222		shmem_lock(shm_file, 0, shp->mlock_user);
223	else if (shp->mlock_user)
224		user_shm_unlock(i_size_read(file_inode(shm_file)),
225				shp->mlock_user);
226	fput(shm_file);
227	ipc_rcu_putref(shp, shm_rcu_free);
228}
229
230/*
231 * shm_may_destroy - identifies whether shm segment should be destroyed now
232 *
233 * Returns true if and only if there are no active users of the segment and
234 * one of the following is true:
235 *
236 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
237 *
238 * 2) sysctl kernel.shm_rmid_forced is set to 1.
239 */
240static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
241{
242	return (shp->shm_nattch == 0) &&
243	       (ns->shm_rmid_forced ||
244		(shp->shm_perm.mode & SHM_DEST));
245}
246
247/*
248 * remove the attach descriptor vma.
249 * free memory for segment if it is marked destroyed.
250 * The descriptor has already been removed from the current->mm->mmap list
251 * and will later be kfree()d.
252 */
253static void shm_close(struct vm_area_struct *vma)
254{
255	struct file *file = vma->vm_file;
256	struct shm_file_data *sfd = shm_file_data(file);
257	struct shmid_kernel *shp;
258	struct ipc_namespace *ns = sfd->ns;
259
260	down_write(&shm_ids(ns).rwsem);
261	/* remove from the list of attaches of the shm segment */
262	shp = shm_lock(ns, sfd->id);
263	shp->shm_lprid = task_tgid_vnr(current);
264	shp->shm_dtim = get_seconds();
265	shp->shm_nattch--;
266	if (shm_may_destroy(ns, shp))
267		shm_destroy(ns, shp);
268	else
269		shm_unlock(shp);
270	up_write(&shm_ids(ns).rwsem);
271}
272
273/* Called with ns->shm_ids(ns).rwsem locked */
274static int shm_try_destroy_orphaned(int id, void *p, void *data)
275{
276	struct ipc_namespace *ns = data;
277	struct kern_ipc_perm *ipcp = p;
278	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
279
280	/*
281	 * We want to destroy segments without users and with already
282	 * exit'ed originating process.
283	 *
284	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
285	 */
286	if (shp->shm_creator != NULL)
287		return 0;
288
289	if (shm_may_destroy(ns, shp)) {
290		shm_lock_by_ptr(shp);
291		shm_destroy(ns, shp);
292	}
293	return 0;
294}
295
296void shm_destroy_orphaned(struct ipc_namespace *ns)
297{
298	down_write(&shm_ids(ns).rwsem);
299	if (shm_ids(ns).in_use)
300		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
301	up_write(&shm_ids(ns).rwsem);
302}
303
304/* Locking assumes this will only be called with task == current */
305void exit_shm(struct task_struct *task)
306{
307	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
308	struct shmid_kernel *shp, *n;
309
310	if (list_empty(&task->sysvshm.shm_clist))
311		return;
312
313	/*
314	 * If kernel.shm_rmid_forced is not set then only keep track of
315	 * which shmids are orphaned, so that a later set of the sysctl
316	 * can clean them up.
317	 */
318	if (!ns->shm_rmid_forced) {
319		down_read(&shm_ids(ns).rwsem);
320		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
321			shp->shm_creator = NULL;
322		/*
323		 * Only under read lock but we are only called on current
324		 * so no entry on the list will be shared.
325		 */
326		list_del(&task->sysvshm.shm_clist);
327		up_read(&shm_ids(ns).rwsem);
328		return;
329	}
330
331	/*
332	 * Destroy all already created segments, that were not yet mapped,
333	 * and mark any mapped as orphan to cover the sysctl toggling.
334	 * Destroy is skipped if shm_may_destroy() returns false.
335	 */
336	down_write(&shm_ids(ns).rwsem);
337	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
338		shp->shm_creator = NULL;
339
340		if (shm_may_destroy(ns, shp)) {
341			shm_lock_by_ptr(shp);
342			shm_destroy(ns, shp);
343		}
344	}
345
346	/* Remove the list head from any segments still attached. */
347	list_del(&task->sysvshm.shm_clist);
348	up_write(&shm_ids(ns).rwsem);
349}
350
351static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352{
353	struct file *file = vma->vm_file;
354	struct shm_file_data *sfd = shm_file_data(file);
355
356	return sfd->vm_ops->fault(vma, vmf);
357}
358
359#ifdef CONFIG_NUMA
360static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
361{
362	struct file *file = vma->vm_file;
363	struct shm_file_data *sfd = shm_file_data(file);
364	int err = 0;
365	if (sfd->vm_ops->set_policy)
366		err = sfd->vm_ops->set_policy(vma, new);
367	return err;
368}
369
370static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
371					unsigned long addr)
372{
373	struct file *file = vma->vm_file;
374	struct shm_file_data *sfd = shm_file_data(file);
375	struct mempolicy *pol = NULL;
376
377	if (sfd->vm_ops->get_policy)
378		pol = sfd->vm_ops->get_policy(vma, addr);
379	else if (vma->vm_policy)
380		pol = vma->vm_policy;
381
382	return pol;
383}
384#endif
385
386static int shm_mmap(struct file *file, struct vm_area_struct *vma)
387{
388	struct shm_file_data *sfd = shm_file_data(file);
389	int ret;
390
391	ret = sfd->file->f_op->mmap(sfd->file, vma);
392	if (ret != 0)
393		return ret;
394	sfd->vm_ops = vma->vm_ops;
395#ifdef CONFIG_MMU
396	WARN_ON(!sfd->vm_ops->fault);
397#endif
398	vma->vm_ops = &shm_vm_ops;
399	shm_open(vma);
400
401	return ret;
402}
403
404static int shm_release(struct inode *ino, struct file *file)
405{
406	struct shm_file_data *sfd = shm_file_data(file);
407
408	put_ipc_ns(sfd->ns);
409	shm_file_data(file) = NULL;
410	kfree(sfd);
411	return 0;
412}
413
414static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
415{
416	struct shm_file_data *sfd = shm_file_data(file);
417
418	if (!sfd->file->f_op->fsync)
419		return -EINVAL;
420	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
421}
422
423static long shm_fallocate(struct file *file, int mode, loff_t offset,
424			  loff_t len)
425{
426	struct shm_file_data *sfd = shm_file_data(file);
427
428	if (!sfd->file->f_op->fallocate)
429		return -EOPNOTSUPP;
430	return sfd->file->f_op->fallocate(file, mode, offset, len);
431}
432
433static unsigned long shm_get_unmapped_area(struct file *file,
434	unsigned long addr, unsigned long len, unsigned long pgoff,
435	unsigned long flags)
436{
437	struct shm_file_data *sfd = shm_file_data(file);
438	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
439						pgoff, flags);
440}
441
442static const struct file_operations shm_file_operations = {
443	.mmap		= shm_mmap,
444	.fsync		= shm_fsync,
445	.release	= shm_release,
446#ifndef CONFIG_MMU
447	.get_unmapped_area	= shm_get_unmapped_area,
448#endif
449	.llseek		= noop_llseek,
450	.fallocate	= shm_fallocate,
451};
452
453static const struct file_operations shm_file_operations_huge = {
454	.mmap		= shm_mmap,
455	.fsync		= shm_fsync,
456	.release	= shm_release,
457	.get_unmapped_area	= shm_get_unmapped_area,
458	.llseek		= noop_llseek,
459	.fallocate	= shm_fallocate,
460};
461
462int is_file_shm_hugepages(struct file *file)
463{
464	return file->f_op == &shm_file_operations_huge;
465}
466
467static const struct vm_operations_struct shm_vm_ops = {
468	.open	= shm_open,	/* callback for a new vm-area open */
469	.close	= shm_close,	/* callback for when the vm-area is released */
470	.fault	= shm_fault,
471#if defined(CONFIG_NUMA)
472	.set_policy = shm_set_policy,
473	.get_policy = shm_get_policy,
474#endif
475};
476
477/**
478 * newseg - Create a new shared memory segment
479 * @ns: namespace
480 * @params: ptr to the structure that contains key, size and shmflg
481 *
482 * Called with shm_ids.rwsem held as a writer.
483 */
484static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
485{
486	key_t key = params->key;
487	int shmflg = params->flg;
488	size_t size = params->u.size;
489	int error;
490	struct shmid_kernel *shp;
491	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
492	struct file *file;
493	char name[13];
494	int id;
495	vm_flags_t acctflag = 0;
496
497	if (size < SHMMIN || size > ns->shm_ctlmax)
498		return -EINVAL;
499
500	if (numpages << PAGE_SHIFT < size)
501		return -ENOSPC;
502
503	if (ns->shm_tot + numpages < ns->shm_tot ||
504			ns->shm_tot + numpages > ns->shm_ctlall)
505		return -ENOSPC;
506
507	shp = ipc_rcu_alloc(sizeof(*shp));
508	if (!shp)
509		return -ENOMEM;
510
511	shp->shm_perm.key = key;
512	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
513	shp->mlock_user = NULL;
514
515	shp->shm_perm.security = NULL;
516	error = security_shm_alloc(shp);
517	if (error) {
518		ipc_rcu_putref(shp, ipc_rcu_free);
519		return error;
520	}
521
522	sprintf(name, "SYSV%08x", key);
523	if (shmflg & SHM_HUGETLB) {
524		struct hstate *hs;
525		size_t hugesize;
526
527		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
528		if (!hs) {
529			error = -EINVAL;
530			goto no_file;
531		}
532		hugesize = ALIGN(size, huge_page_size(hs));
533
534		/* hugetlb_file_setup applies strict accounting */
535		if (shmflg & SHM_NORESERVE)
536			acctflag = VM_NORESERVE;
537		file = hugetlb_file_setup(name, hugesize, acctflag,
538				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
539				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
540	} else {
541		/*
542		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
543		 * if it's asked for.
544		 */
545		if  ((shmflg & SHM_NORESERVE) &&
546				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547			acctflag = VM_NORESERVE;
548		file = shmem_kernel_file_setup(name, size, acctflag);
549	}
550	error = PTR_ERR(file);
551	if (IS_ERR(file))
552		goto no_file;
553
554	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
555	if (id < 0) {
556		error = id;
557		goto no_id;
558	}
559
560	shp->shm_cprid = task_tgid_vnr(current);
561	shp->shm_lprid = 0;
562	shp->shm_atim = shp->shm_dtim = 0;
563	shp->shm_ctim = get_seconds();
564	shp->shm_segsz = size;
565	shp->shm_nattch = 0;
566	shp->shm_file = file;
567	shp->shm_creator = current;
568	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
569
570	/*
571	 * shmid gets reported as "inode#" in /proc/pid/maps.
572	 * proc-ps tools use this. Changing this will break them.
573	 */
574	file_inode(file)->i_ino = shp->shm_perm.id;
575
576	ns->shm_tot += numpages;
577	error = shp->shm_perm.id;
578
579	ipc_unlock_object(&shp->shm_perm);
580	rcu_read_unlock();
581	return error;
582
583no_id:
584	if (is_file_hugepages(file) && shp->mlock_user)
585		user_shm_unlock(size, shp->mlock_user);
586	fput(file);
587no_file:
588	ipc_rcu_putref(shp, shm_rcu_free);
589	return error;
590}
591
592/*
593 * Called with shm_ids.rwsem and ipcp locked.
594 */
595static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
596{
597	struct shmid_kernel *shp;
598
599	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
600	return security_shm_associate(shp, shmflg);
601}
602
603/*
604 * Called with shm_ids.rwsem and ipcp locked.
605 */
606static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
607				struct ipc_params *params)
608{
609	struct shmid_kernel *shp;
610
611	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
612	if (shp->shm_segsz < params->u.size)
613		return -EINVAL;
614
615	return 0;
616}
617
618SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
619{
620	struct ipc_namespace *ns;
621	static const struct ipc_ops shm_ops = {
622		.getnew = newseg,
623		.associate = shm_security,
624		.more_checks = shm_more_checks,
625	};
626	struct ipc_params shm_params;
627
628	ns = current->nsproxy->ipc_ns;
629
630	shm_params.key = key;
631	shm_params.flg = shmflg;
632	shm_params.u.size = size;
633
634	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
635}
636
637static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
638{
639	switch (version) {
640	case IPC_64:
641		return copy_to_user(buf, in, sizeof(*in));
642	case IPC_OLD:
643	    {
644		struct shmid_ds out;
645
646		memset(&out, 0, sizeof(out));
647		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
648		out.shm_segsz	= in->shm_segsz;
649		out.shm_atime	= in->shm_atime;
650		out.shm_dtime	= in->shm_dtime;
651		out.shm_ctime	= in->shm_ctime;
652		out.shm_cpid	= in->shm_cpid;
653		out.shm_lpid	= in->shm_lpid;
654		out.shm_nattch	= in->shm_nattch;
655
656		return copy_to_user(buf, &out, sizeof(out));
657	    }
658	default:
659		return -EINVAL;
660	}
661}
662
663static inline unsigned long
664copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
665{
666	switch (version) {
667	case IPC_64:
668		if (copy_from_user(out, buf, sizeof(*out)))
669			return -EFAULT;
670		return 0;
671	case IPC_OLD:
672	    {
673		struct shmid_ds tbuf_old;
674
675		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
676			return -EFAULT;
677
678		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
679		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
680		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
681
682		return 0;
683	    }
684	default:
685		return -EINVAL;
686	}
687}
688
689static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
690{
691	switch (version) {
692	case IPC_64:
693		return copy_to_user(buf, in, sizeof(*in));
694	case IPC_OLD:
695	    {
696		struct shminfo out;
697
698		if (in->shmmax > INT_MAX)
699			out.shmmax = INT_MAX;
700		else
701			out.shmmax = (int)in->shmmax;
702
703		out.shmmin	= in->shmmin;
704		out.shmmni	= in->shmmni;
705		out.shmseg	= in->shmseg;
706		out.shmall	= in->shmall;
707
708		return copy_to_user(buf, &out, sizeof(out));
709	    }
710	default:
711		return -EINVAL;
712	}
713}
714
715/*
716 * Calculate and add used RSS and swap pages of a shm.
717 * Called with shm_ids.rwsem held as a reader
718 */
719static void shm_add_rss_swap(struct shmid_kernel *shp,
720	unsigned long *rss_add, unsigned long *swp_add)
721{
722	struct inode *inode;
723
724	inode = file_inode(shp->shm_file);
725
726	if (is_file_hugepages(shp->shm_file)) {
727		struct address_space *mapping = inode->i_mapping;
728		struct hstate *h = hstate_file(shp->shm_file);
729		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
730	} else {
731#ifdef CONFIG_SHMEM
732		struct shmem_inode_info *info = SHMEM_I(inode);
733		spin_lock(&info->lock);
734		*rss_add += inode->i_mapping->nrpages;
735		*swp_add += info->swapped;
736		spin_unlock(&info->lock);
737#else
738		*rss_add += inode->i_mapping->nrpages;
739#endif
740	}
741}
742
743/*
744 * Called with shm_ids.rwsem held as a reader
745 */
746static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
747		unsigned long *swp)
748{
749	int next_id;
750	int total, in_use;
751
752	*rss = 0;
753	*swp = 0;
754
755	in_use = shm_ids(ns).in_use;
756
757	for (total = 0, next_id = 0; total < in_use; next_id++) {
758		struct kern_ipc_perm *ipc;
759		struct shmid_kernel *shp;
760
761		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
762		if (ipc == NULL)
763			continue;
764		shp = container_of(ipc, struct shmid_kernel, shm_perm);
765
766		shm_add_rss_swap(shp, rss, swp);
767
768		total++;
769	}
770}
771
772/*
773 * This function handles some shmctl commands which require the rwsem
774 * to be held in write mode.
775 * NOTE: no locks must be held, the rwsem is taken inside this function.
776 */
777static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
778		       struct shmid_ds __user *buf, int version)
779{
780	struct kern_ipc_perm *ipcp;
781	struct shmid64_ds shmid64;
782	struct shmid_kernel *shp;
783	int err;
784
785	if (cmd == IPC_SET) {
786		if (copy_shmid_from_user(&shmid64, buf, version))
787			return -EFAULT;
788	}
789
790	down_write(&shm_ids(ns).rwsem);
791	rcu_read_lock();
792
793	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
794				      &shmid64.shm_perm, 0);
795	if (IS_ERR(ipcp)) {
796		err = PTR_ERR(ipcp);
797		goto out_unlock1;
798	}
799
800	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
801
802	err = security_shm_shmctl(shp, cmd);
803	if (err)
804		goto out_unlock1;
805
806	switch (cmd) {
807	case IPC_RMID:
808		ipc_lock_object(&shp->shm_perm);
809		/* do_shm_rmid unlocks the ipc object and rcu */
810		do_shm_rmid(ns, ipcp);
811		goto out_up;
812	case IPC_SET:
813		ipc_lock_object(&shp->shm_perm);
814		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
815		if (err)
816			goto out_unlock0;
817		shp->shm_ctim = get_seconds();
818		break;
819	default:
820		err = -EINVAL;
821		goto out_unlock1;
822	}
823
824out_unlock0:
825	ipc_unlock_object(&shp->shm_perm);
826out_unlock1:
827	rcu_read_unlock();
828out_up:
829	up_write(&shm_ids(ns).rwsem);
830	return err;
831}
832
833static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
834			 int cmd, int version, void __user *buf)
835{
836	int err;
837	struct shmid_kernel *shp;
838
839	/* preliminary security checks for *_INFO */
840	if (cmd == IPC_INFO || cmd == SHM_INFO) {
841		err = security_shm_shmctl(NULL, cmd);
842		if (err)
843			return err;
844	}
845
846	switch (cmd) {
847	case IPC_INFO:
848	{
849		struct shminfo64 shminfo;
850
851		memset(&shminfo, 0, sizeof(shminfo));
852		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
853		shminfo.shmmax = ns->shm_ctlmax;
854		shminfo.shmall = ns->shm_ctlall;
855
856		shminfo.shmmin = SHMMIN;
857		if (copy_shminfo_to_user(buf, &shminfo, version))
858			return -EFAULT;
859
860		down_read(&shm_ids(ns).rwsem);
861		err = ipc_get_maxid(&shm_ids(ns));
862		up_read(&shm_ids(ns).rwsem);
863
864		if (err < 0)
865			err = 0;
866		goto out;
867	}
868	case SHM_INFO:
869	{
870		struct shm_info shm_info;
871
872		memset(&shm_info, 0, sizeof(shm_info));
873		down_read(&shm_ids(ns).rwsem);
874		shm_info.used_ids = shm_ids(ns).in_use;
875		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
876		shm_info.shm_tot = ns->shm_tot;
877		shm_info.swap_attempts = 0;
878		shm_info.swap_successes = 0;
879		err = ipc_get_maxid(&shm_ids(ns));
880		up_read(&shm_ids(ns).rwsem);
881		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
882			err = -EFAULT;
883			goto out;
884		}
885
886		err = err < 0 ? 0 : err;
887		goto out;
888	}
889	case SHM_STAT:
890	case IPC_STAT:
891	{
892		struct shmid64_ds tbuf;
893		int result;
894
895		rcu_read_lock();
896		if (cmd == SHM_STAT) {
897			shp = shm_obtain_object(ns, shmid);
898			if (IS_ERR(shp)) {
899				err = PTR_ERR(shp);
900				goto out_unlock;
901			}
902			result = shp->shm_perm.id;
903		} else {
904			shp = shm_obtain_object_check(ns, shmid);
905			if (IS_ERR(shp)) {
906				err = PTR_ERR(shp);
907				goto out_unlock;
908			}
909			result = 0;
910		}
911
912		err = -EACCES;
913		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
914			goto out_unlock;
915
916		err = security_shm_shmctl(shp, cmd);
917		if (err)
918			goto out_unlock;
919
920		memset(&tbuf, 0, sizeof(tbuf));
921		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
922		tbuf.shm_segsz	= shp->shm_segsz;
923		tbuf.shm_atime	= shp->shm_atim;
924		tbuf.shm_dtime	= shp->shm_dtim;
925		tbuf.shm_ctime	= shp->shm_ctim;
926		tbuf.shm_cpid	= shp->shm_cprid;
927		tbuf.shm_lpid	= shp->shm_lprid;
928		tbuf.shm_nattch	= shp->shm_nattch;
929		rcu_read_unlock();
930
931		if (copy_shmid_to_user(buf, &tbuf, version))
932			err = -EFAULT;
933		else
934			err = result;
935		goto out;
936	}
937	default:
938		return -EINVAL;
939	}
940
941out_unlock:
942	rcu_read_unlock();
943out:
944	return err;
945}
946
947SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
948{
949	struct shmid_kernel *shp;
950	int err, version;
951	struct ipc_namespace *ns;
952
953	if (cmd < 0 || shmid < 0)
954		return -EINVAL;
955
956	version = ipc_parse_version(&cmd);
957	ns = current->nsproxy->ipc_ns;
958
959	switch (cmd) {
960	case IPC_INFO:
961	case SHM_INFO:
962	case SHM_STAT:
963	case IPC_STAT:
964		return shmctl_nolock(ns, shmid, cmd, version, buf);
965	case IPC_RMID:
966	case IPC_SET:
967		return shmctl_down(ns, shmid, cmd, buf, version);
968	case SHM_LOCK:
969	case SHM_UNLOCK:
970	{
971		struct file *shm_file;
972
973		rcu_read_lock();
974		shp = shm_obtain_object_check(ns, shmid);
975		if (IS_ERR(shp)) {
976			err = PTR_ERR(shp);
977			goto out_unlock1;
978		}
979
980		audit_ipc_obj(&(shp->shm_perm));
981		err = security_shm_shmctl(shp, cmd);
982		if (err)
983			goto out_unlock1;
984
985		ipc_lock_object(&shp->shm_perm);
986
987		/* check if shm_destroy() is tearing down shp */
988		if (!ipc_valid_object(&shp->shm_perm)) {
989			err = -EIDRM;
990			goto out_unlock0;
991		}
992
993		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
994			kuid_t euid = current_euid();
995			if (!uid_eq(euid, shp->shm_perm.uid) &&
996			    !uid_eq(euid, shp->shm_perm.cuid)) {
997				err = -EPERM;
998				goto out_unlock0;
999			}
1000			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1001				err = -EPERM;
1002				goto out_unlock0;
1003			}
1004		}
1005
1006		shm_file = shp->shm_file;
1007		if (is_file_hugepages(shm_file))
1008			goto out_unlock0;
1009
1010		if (cmd == SHM_LOCK) {
1011			struct user_struct *user = current_user();
1012			err = shmem_lock(shm_file, 1, user);
1013			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1014				shp->shm_perm.mode |= SHM_LOCKED;
1015				shp->mlock_user = user;
1016			}
1017			goto out_unlock0;
1018		}
1019
1020		/* SHM_UNLOCK */
1021		if (!(shp->shm_perm.mode & SHM_LOCKED))
1022			goto out_unlock0;
1023		shmem_lock(shm_file, 0, shp->mlock_user);
1024		shp->shm_perm.mode &= ~SHM_LOCKED;
1025		shp->mlock_user = NULL;
1026		get_file(shm_file);
1027		ipc_unlock_object(&shp->shm_perm);
1028		rcu_read_unlock();
1029		shmem_unlock_mapping(shm_file->f_mapping);
1030
1031		fput(shm_file);
1032		return err;
1033	}
1034	default:
1035		return -EINVAL;
1036	}
1037
1038out_unlock0:
1039	ipc_unlock_object(&shp->shm_perm);
1040out_unlock1:
1041	rcu_read_unlock();
1042	return err;
1043}
1044
1045/*
1046 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1047 *
1048 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1049 * "raddr" thing points to kernel space, and there has to be a wrapper around
1050 * this.
1051 */
1052long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1053	      unsigned long shmlba)
1054{
1055	struct shmid_kernel *shp;
1056	unsigned long addr;
1057	unsigned long size;
1058	struct file *file;
1059	int    err;
1060	unsigned long flags;
1061	unsigned long prot;
1062	int acc_mode;
1063	struct ipc_namespace *ns;
1064	struct shm_file_data *sfd;
1065	struct path path;
1066	fmode_t f_mode;
1067	unsigned long populate = 0;
1068
1069	err = -EINVAL;
1070	if (shmid < 0)
1071		goto out;
1072	else if ((addr = (ulong)shmaddr)) {
1073		if (addr & (shmlba - 1)) {
1074			if (shmflg & SHM_RND)
1075				addr &= ~(shmlba - 1);	   /* round down */
1076			else
1077#ifndef __ARCH_FORCE_SHMLBA
1078				if (addr & ~PAGE_MASK)
1079#endif
1080					goto out;
1081		}
1082		flags = MAP_SHARED | MAP_FIXED;
1083	} else {
1084		if ((shmflg & SHM_REMAP))
1085			goto out;
1086
1087		flags = MAP_SHARED;
1088	}
1089
1090	if (shmflg & SHM_RDONLY) {
1091		prot = PROT_READ;
1092		acc_mode = S_IRUGO;
1093		f_mode = FMODE_READ;
1094	} else {
1095		prot = PROT_READ | PROT_WRITE;
1096		acc_mode = S_IRUGO | S_IWUGO;
1097		f_mode = FMODE_READ | FMODE_WRITE;
1098	}
1099	if (shmflg & SHM_EXEC) {
1100		prot |= PROT_EXEC;
1101		acc_mode |= S_IXUGO;
1102	}
1103
1104	/*
1105	 * We cannot rely on the fs check since SYSV IPC does have an
1106	 * additional creator id...
1107	 */
1108	ns = current->nsproxy->ipc_ns;
1109	rcu_read_lock();
1110	shp = shm_obtain_object_check(ns, shmid);
1111	if (IS_ERR(shp)) {
1112		err = PTR_ERR(shp);
1113		goto out_unlock;
1114	}
1115
1116	err = -EACCES;
1117	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1118		goto out_unlock;
1119
1120	err = security_shm_shmat(shp, shmaddr, shmflg);
1121	if (err)
1122		goto out_unlock;
1123
1124	ipc_lock_object(&shp->shm_perm);
1125
1126	/* check if shm_destroy() is tearing down shp */
1127	if (!ipc_valid_object(&shp->shm_perm)) {
1128		ipc_unlock_object(&shp->shm_perm);
1129		err = -EIDRM;
1130		goto out_unlock;
1131	}
1132
1133	path = shp->shm_file->f_path;
1134	path_get(&path);
1135	shp->shm_nattch++;
1136	size = i_size_read(d_inode(path.dentry));
1137	ipc_unlock_object(&shp->shm_perm);
1138	rcu_read_unlock();
1139
1140	err = -ENOMEM;
1141	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1142	if (!sfd) {
1143		path_put(&path);
1144		goto out_nattch;
1145	}
1146
1147	file = alloc_file(&path, f_mode,
1148			  is_file_hugepages(shp->shm_file) ?
1149				&shm_file_operations_huge :
1150				&shm_file_operations);
1151	err = PTR_ERR(file);
1152	if (IS_ERR(file)) {
1153		kfree(sfd);
1154		path_put(&path);
1155		goto out_nattch;
1156	}
1157
1158	file->private_data = sfd;
1159	file->f_mapping = shp->shm_file->f_mapping;
1160	sfd->id = shp->shm_perm.id;
1161	sfd->ns = get_ipc_ns(ns);
1162	sfd->file = shp->shm_file;
1163	sfd->vm_ops = NULL;
1164
1165	err = security_mmap_file(file, prot, flags);
1166	if (err)
1167		goto out_fput;
1168
1169	down_write(&current->mm->mmap_sem);
1170	if (addr && !(shmflg & SHM_REMAP)) {
1171		err = -EINVAL;
1172		if (addr + size < addr)
1173			goto invalid;
1174
1175		if (find_vma_intersection(current->mm, addr, addr + size))
1176			goto invalid;
1177	}
1178
1179	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1180	*raddr = addr;
1181	err = 0;
1182	if (IS_ERR_VALUE(addr))
1183		err = (long)addr;
1184invalid:
1185	up_write(&current->mm->mmap_sem);
1186	if (populate)
1187		mm_populate(addr, populate);
1188
1189out_fput:
1190	fput(file);
1191
1192out_nattch:
1193	down_write(&shm_ids(ns).rwsem);
1194	shp = shm_lock(ns, shmid);
1195	shp->shm_nattch--;
1196	if (shm_may_destroy(ns, shp))
1197		shm_destroy(ns, shp);
1198	else
1199		shm_unlock(shp);
1200	up_write(&shm_ids(ns).rwsem);
1201	return err;
1202
1203out_unlock:
1204	rcu_read_unlock();
1205out:
1206	return err;
1207}
1208
1209SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1210{
1211	unsigned long ret;
1212	long err;
1213
1214	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1215	if (err)
1216		return err;
1217	force_successful_syscall_return();
1218	return (long)ret;
1219}
1220
1221/*
1222 * detach and kill segment if marked destroyed.
1223 * The work is done in shm_close.
1224 */
1225SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1226{
1227	struct mm_struct *mm = current->mm;
1228	struct vm_area_struct *vma;
1229	unsigned long addr = (unsigned long)shmaddr;
1230	int retval = -EINVAL;
1231#ifdef CONFIG_MMU
1232	loff_t size = 0;
1233	struct file *file;
1234	struct vm_area_struct *next;
1235#endif
1236
1237	if (addr & ~PAGE_MASK)
1238		return retval;
1239
1240	down_write(&mm->mmap_sem);
1241
1242	/*
1243	 * This function tries to be smart and unmap shm segments that
1244	 * were modified by partial mlock or munmap calls:
1245	 * - It first determines the size of the shm segment that should be
1246	 *   unmapped: It searches for a vma that is backed by shm and that
1247	 *   started at address shmaddr. It records it's size and then unmaps
1248	 *   it.
1249	 * - Then it unmaps all shm vmas that started at shmaddr and that
1250	 *   are within the initially determined size and that are from the
1251	 *   same shm segment from which we determined the size.
1252	 * Errors from do_munmap are ignored: the function only fails if
1253	 * it's called with invalid parameters or if it's called to unmap
1254	 * a part of a vma. Both calls in this function are for full vmas,
1255	 * the parameters are directly copied from the vma itself and always
1256	 * valid - therefore do_munmap cannot fail. (famous last words?)
1257	 */
1258	/*
1259	 * If it had been mremap()'d, the starting address would not
1260	 * match the usual checks anyway. So assume all vma's are
1261	 * above the starting address given.
1262	 */
1263	vma = find_vma(mm, addr);
1264
1265#ifdef CONFIG_MMU
1266	while (vma) {
1267		next = vma->vm_next;
1268
1269		/*
1270		 * Check if the starting address would match, i.e. it's
1271		 * a fragment created by mprotect() and/or munmap(), or it
1272		 * otherwise it starts at this address with no hassles.
1273		 */
1274		if ((vma->vm_ops == &shm_vm_ops) &&
1275			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1276
1277			/*
1278			 * Record the file of the shm segment being
1279			 * unmapped.  With mremap(), someone could place
1280			 * page from another segment but with equal offsets
1281			 * in the range we are unmapping.
1282			 */
1283			file = vma->vm_file;
1284			size = i_size_read(file_inode(vma->vm_file));
1285			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1286			/*
1287			 * We discovered the size of the shm segment, so
1288			 * break out of here and fall through to the next
1289			 * loop that uses the size information to stop
1290			 * searching for matching vma's.
1291			 */
1292			retval = 0;
1293			vma = next;
1294			break;
1295		}
1296		vma = next;
1297	}
1298
1299	/*
1300	 * We need look no further than the maximum address a fragment
1301	 * could possibly have landed at. Also cast things to loff_t to
1302	 * prevent overflows and make comparisons vs. equal-width types.
1303	 */
1304	size = PAGE_ALIGN(size);
1305	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1306		next = vma->vm_next;
1307
1308		/* finding a matching vma now does not alter retval */
1309		if ((vma->vm_ops == &shm_vm_ops) &&
1310		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1311		    (vma->vm_file == file))
1312			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1313		vma = next;
1314	}
1315
1316#else /* CONFIG_MMU */
1317	/* under NOMMU conditions, the exact address to be destroyed must be
1318	 * given */
1319	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1320		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1321		retval = 0;
1322	}
1323
1324#endif
1325
1326	up_write(&mm->mmap_sem);
1327	return retval;
1328}
1329
1330#ifdef CONFIG_PROC_FS
1331static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1332{
1333	struct user_namespace *user_ns = seq_user_ns(s);
1334	struct shmid_kernel *shp = it;
1335	unsigned long rss = 0, swp = 0;
1336
1337	shm_add_rss_swap(shp, &rss, &swp);
1338
1339#if BITS_PER_LONG <= 32
1340#define SIZE_SPEC "%10lu"
1341#else
1342#define SIZE_SPEC "%21lu"
1343#endif
1344
1345	seq_printf(s,
1346		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1347		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1348		   SIZE_SPEC " " SIZE_SPEC "\n",
1349		   shp->shm_perm.key,
1350		   shp->shm_perm.id,
1351		   shp->shm_perm.mode,
1352		   shp->shm_segsz,
1353		   shp->shm_cprid,
1354		   shp->shm_lprid,
1355		   shp->shm_nattch,
1356		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1357		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1358		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1359		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1360		   shp->shm_atim,
1361		   shp->shm_dtim,
1362		   shp->shm_ctim,
1363		   rss * PAGE_SIZE,
1364		   swp * PAGE_SIZE);
1365
1366	return 0;
1367}
1368#endif
1369