shm.c revision b7952180
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 *	 Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 */
23
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/hugetlb.h>
27#include <linux/shm.h>
28#include <linux/init.h>
29#include <linux/file.h>
30#include <linux/mman.h>
31#include <linux/shmem_fs.h>
32#include <linux/security.h>
33#include <linux/syscalls.h>
34#include <linux/audit.h>
35#include <linux/capability.h>
36#include <linux/ptrace.h>
37#include <linux/seq_file.h>
38#include <linux/rwsem.h>
39#include <linux/nsproxy.h>
40#include <linux/mount.h>
41#include <linux/ipc_namespace.h>
42
43#include <asm/uaccess.h>
44
45#include "util.h"
46
47struct shm_file_data {
48	int id;
49	struct ipc_namespace *ns;
50	struct file *file;
51	const struct vm_operations_struct *vm_ops;
52};
53
54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55
56static const struct file_operations shm_file_operations;
57static const struct vm_operations_struct shm_vm_ops;
58
59#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
60
61#define shm_unlock(shp)			\
62	ipc_unlock(&(shp)->shm_perm)
63
64static int newseg(struct ipc_namespace *, struct ipc_params *);
65static void shm_open(struct vm_area_struct *vma);
66static void shm_close(struct vm_area_struct *vma);
67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68#ifdef CONFIG_PROC_FS
69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70#endif
71
72void shm_init_ns(struct ipc_namespace *ns)
73{
74	ns->shm_ctlmax = SHMMAX;
75	ns->shm_ctlall = SHMALL;
76	ns->shm_ctlmni = SHMMNI;
77	ns->shm_tot = 0;
78	ipc_init_ids(&shm_ids(ns));
79}
80
81/*
82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83 * Only shm_ids.rw_mutex remains locked on exit.
84 */
85static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
86{
87	struct shmid_kernel *shp;
88	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
89
90	if (shp->shm_nattch){
91		shp->shm_perm.mode |= SHM_DEST;
92		/* Do not find it any more */
93		shp->shm_perm.key = IPC_PRIVATE;
94		shm_unlock(shp);
95	} else
96		shm_destroy(ns, shp);
97}
98
99#ifdef CONFIG_IPC_NS
100void shm_exit_ns(struct ipc_namespace *ns)
101{
102	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
104}
105#endif
106
107void __init shm_init (void)
108{
109	shm_init_ns(&init_ipc_ns);
110	ipc_init_proc_interface("sysvipc/shm",
111#if BITS_PER_LONG <= 32
112				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
113#else
114				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
115#endif
116				IPC_SHM_IDS, sysvipc_shm_proc_show);
117}
118
119/*
120 * shm_lock_(check_) routines are called in the paths where the rw_mutex
121 * is not necessarily held.
122 */
123static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
124{
125	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
126
127	if (IS_ERR(ipcp))
128		return (struct shmid_kernel *)ipcp;
129
130	return container_of(ipcp, struct shmid_kernel, shm_perm);
131}
132
133static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
134						int id)
135{
136	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
137
138	if (IS_ERR(ipcp))
139		return (struct shmid_kernel *)ipcp;
140
141	return container_of(ipcp, struct shmid_kernel, shm_perm);
142}
143
144static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
145{
146	ipc_rmid(&shm_ids(ns), &s->shm_perm);
147}
148
149
150/* This is called by fork, once for every shm attach. */
151static void shm_open(struct vm_area_struct *vma)
152{
153	struct file *file = vma->vm_file;
154	struct shm_file_data *sfd = shm_file_data(file);
155	struct shmid_kernel *shp;
156
157	shp = shm_lock(sfd->ns, sfd->id);
158	BUG_ON(IS_ERR(shp));
159	shp->shm_atim = get_seconds();
160	shp->shm_lprid = task_tgid_vnr(current);
161	shp->shm_nattch++;
162	shm_unlock(shp);
163}
164
165/*
166 * shm_destroy - free the struct shmid_kernel
167 *
168 * @ns: namespace
169 * @shp: struct to free
170 *
171 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
172 * but returns with shp unlocked and freed.
173 */
174static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
175{
176	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
177	shm_rmid(ns, shp);
178	shm_unlock(shp);
179	if (!is_file_hugepages(shp->shm_file))
180		shmem_lock(shp->shm_file, 0, shp->mlock_user);
181	else if (shp->mlock_user)
182		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
183						shp->mlock_user);
184	fput (shp->shm_file);
185	security_shm_free(shp);
186	ipc_rcu_putref(shp);
187}
188
189/*
190 * remove the attach descriptor vma.
191 * free memory for segment if it is marked destroyed.
192 * The descriptor has already been removed from the current->mm->mmap list
193 * and will later be kfree()d.
194 */
195static void shm_close(struct vm_area_struct *vma)
196{
197	struct file * file = vma->vm_file;
198	struct shm_file_data *sfd = shm_file_data(file);
199	struct shmid_kernel *shp;
200	struct ipc_namespace *ns = sfd->ns;
201
202	down_write(&shm_ids(ns).rw_mutex);
203	/* remove from the list of attaches of the shm segment */
204	shp = shm_lock(ns, sfd->id);
205	BUG_ON(IS_ERR(shp));
206	shp->shm_lprid = task_tgid_vnr(current);
207	shp->shm_dtim = get_seconds();
208	shp->shm_nattch--;
209	if(shp->shm_nattch == 0 &&
210	   shp->shm_perm.mode & SHM_DEST)
211		shm_destroy(ns, shp);
212	else
213		shm_unlock(shp);
214	up_write(&shm_ids(ns).rw_mutex);
215}
216
217static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
218{
219	struct file *file = vma->vm_file;
220	struct shm_file_data *sfd = shm_file_data(file);
221
222	return sfd->vm_ops->fault(vma, vmf);
223}
224
225#ifdef CONFIG_NUMA
226static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
227{
228	struct file *file = vma->vm_file;
229	struct shm_file_data *sfd = shm_file_data(file);
230	int err = 0;
231	if (sfd->vm_ops->set_policy)
232		err = sfd->vm_ops->set_policy(vma, new);
233	return err;
234}
235
236static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
237					unsigned long addr)
238{
239	struct file *file = vma->vm_file;
240	struct shm_file_data *sfd = shm_file_data(file);
241	struct mempolicy *pol = NULL;
242
243	if (sfd->vm_ops->get_policy)
244		pol = sfd->vm_ops->get_policy(vma, addr);
245	else if (vma->vm_policy)
246		pol = vma->vm_policy;
247
248	return pol;
249}
250#endif
251
252static int shm_mmap(struct file * file, struct vm_area_struct * vma)
253{
254	struct shm_file_data *sfd = shm_file_data(file);
255	int ret;
256
257	ret = sfd->file->f_op->mmap(sfd->file, vma);
258	if (ret != 0)
259		return ret;
260	sfd->vm_ops = vma->vm_ops;
261#ifdef CONFIG_MMU
262	BUG_ON(!sfd->vm_ops->fault);
263#endif
264	vma->vm_ops = &shm_vm_ops;
265	shm_open(vma);
266
267	return ret;
268}
269
270static int shm_release(struct inode *ino, struct file *file)
271{
272	struct shm_file_data *sfd = shm_file_data(file);
273
274	put_ipc_ns(sfd->ns);
275	shm_file_data(file) = NULL;
276	kfree(sfd);
277	return 0;
278}
279
280static int shm_fsync(struct file *file, int datasync)
281{
282	struct shm_file_data *sfd = shm_file_data(file);
283
284	if (!sfd->file->f_op->fsync)
285		return -EINVAL;
286	return sfd->file->f_op->fsync(sfd->file, datasync);
287}
288
289static unsigned long shm_get_unmapped_area(struct file *file,
290	unsigned long addr, unsigned long len, unsigned long pgoff,
291	unsigned long flags)
292{
293	struct shm_file_data *sfd = shm_file_data(file);
294	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
295						pgoff, flags);
296}
297
298static const struct file_operations shm_file_operations = {
299	.mmap		= shm_mmap,
300	.fsync		= shm_fsync,
301	.release	= shm_release,
302#ifndef CONFIG_MMU
303	.get_unmapped_area	= shm_get_unmapped_area,
304#endif
305	.llseek		= noop_llseek,
306};
307
308static const struct file_operations shm_file_operations_huge = {
309	.mmap		= shm_mmap,
310	.fsync		= shm_fsync,
311	.release	= shm_release,
312	.get_unmapped_area	= shm_get_unmapped_area,
313	.llseek		= noop_llseek,
314};
315
316int is_file_shm_hugepages(struct file *file)
317{
318	return file->f_op == &shm_file_operations_huge;
319}
320
321static const struct vm_operations_struct shm_vm_ops = {
322	.open	= shm_open,	/* callback for a new vm-area open */
323	.close	= shm_close,	/* callback for when the vm-area is released */
324	.fault	= shm_fault,
325#if defined(CONFIG_NUMA)
326	.set_policy = shm_set_policy,
327	.get_policy = shm_get_policy,
328#endif
329};
330
331/**
332 * newseg - Create a new shared memory segment
333 * @ns: namespace
334 * @params: ptr to the structure that contains key, size and shmflg
335 *
336 * Called with shm_ids.rw_mutex held as a writer.
337 */
338
339static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
340{
341	key_t key = params->key;
342	int shmflg = params->flg;
343	size_t size = params->u.size;
344	int error;
345	struct shmid_kernel *shp;
346	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
347	struct file * file;
348	char name[13];
349	int id;
350	int acctflag = 0;
351
352	if (size < SHMMIN || size > ns->shm_ctlmax)
353		return -EINVAL;
354
355	if (ns->shm_tot + numpages > ns->shm_ctlall)
356		return -ENOSPC;
357
358	shp = ipc_rcu_alloc(sizeof(*shp));
359	if (!shp)
360		return -ENOMEM;
361
362	shp->shm_perm.key = key;
363	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
364	shp->mlock_user = NULL;
365
366	shp->shm_perm.security = NULL;
367	error = security_shm_alloc(shp);
368	if (error) {
369		ipc_rcu_putref(shp);
370		return error;
371	}
372
373	sprintf (name, "SYSV%08x", key);
374	if (shmflg & SHM_HUGETLB) {
375		/* hugetlb_file_setup applies strict accounting */
376		if (shmflg & SHM_NORESERVE)
377			acctflag = VM_NORESERVE;
378		file = hugetlb_file_setup(name, size, acctflag,
379					&shp->mlock_user, HUGETLB_SHMFS_INODE);
380	} else {
381		/*
382		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
383	 	 * if it's asked for.
384		 */
385		if  ((shmflg & SHM_NORESERVE) &&
386				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
387			acctflag = VM_NORESERVE;
388		file = shmem_file_setup(name, size, acctflag);
389	}
390	error = PTR_ERR(file);
391	if (IS_ERR(file))
392		goto no_file;
393
394	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
395	if (id < 0) {
396		error = id;
397		goto no_id;
398	}
399
400	shp->shm_cprid = task_tgid_vnr(current);
401	shp->shm_lprid = 0;
402	shp->shm_atim = shp->shm_dtim = 0;
403	shp->shm_ctim = get_seconds();
404	shp->shm_segsz = size;
405	shp->shm_nattch = 0;
406	shp->shm_file = file;
407	/*
408	 * shmid gets reported as "inode#" in /proc/pid/maps.
409	 * proc-ps tools use this. Changing this will break them.
410	 */
411	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
412
413	ns->shm_tot += numpages;
414	error = shp->shm_perm.id;
415	shm_unlock(shp);
416	return error;
417
418no_id:
419	if (is_file_hugepages(file) && shp->mlock_user)
420		user_shm_unlock(size, shp->mlock_user);
421	fput(file);
422no_file:
423	security_shm_free(shp);
424	ipc_rcu_putref(shp);
425	return error;
426}
427
428/*
429 * Called with shm_ids.rw_mutex and ipcp locked.
430 */
431static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
432{
433	struct shmid_kernel *shp;
434
435	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
436	return security_shm_associate(shp, shmflg);
437}
438
439/*
440 * Called with shm_ids.rw_mutex and ipcp locked.
441 */
442static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
443				struct ipc_params *params)
444{
445	struct shmid_kernel *shp;
446
447	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
448	if (shp->shm_segsz < params->u.size)
449		return -EINVAL;
450
451	return 0;
452}
453
454SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
455{
456	struct ipc_namespace *ns;
457	struct ipc_ops shm_ops;
458	struct ipc_params shm_params;
459
460	ns = current->nsproxy->ipc_ns;
461
462	shm_ops.getnew = newseg;
463	shm_ops.associate = shm_security;
464	shm_ops.more_checks = shm_more_checks;
465
466	shm_params.key = key;
467	shm_params.flg = shmflg;
468	shm_params.u.size = size;
469
470	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
471}
472
473static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
474{
475	switch(version) {
476	case IPC_64:
477		return copy_to_user(buf, in, sizeof(*in));
478	case IPC_OLD:
479	    {
480		struct shmid_ds out;
481
482		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
483		out.shm_segsz	= in->shm_segsz;
484		out.shm_atime	= in->shm_atime;
485		out.shm_dtime	= in->shm_dtime;
486		out.shm_ctime	= in->shm_ctime;
487		out.shm_cpid	= in->shm_cpid;
488		out.shm_lpid	= in->shm_lpid;
489		out.shm_nattch	= in->shm_nattch;
490
491		return copy_to_user(buf, &out, sizeof(out));
492	    }
493	default:
494		return -EINVAL;
495	}
496}
497
498static inline unsigned long
499copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
500{
501	switch(version) {
502	case IPC_64:
503		if (copy_from_user(out, buf, sizeof(*out)))
504			return -EFAULT;
505		return 0;
506	case IPC_OLD:
507	    {
508		struct shmid_ds tbuf_old;
509
510		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
511			return -EFAULT;
512
513		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
514		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
515		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
516
517		return 0;
518	    }
519	default:
520		return -EINVAL;
521	}
522}
523
524static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
525{
526	switch(version) {
527	case IPC_64:
528		return copy_to_user(buf, in, sizeof(*in));
529	case IPC_OLD:
530	    {
531		struct shminfo out;
532
533		if(in->shmmax > INT_MAX)
534			out.shmmax = INT_MAX;
535		else
536			out.shmmax = (int)in->shmmax;
537
538		out.shmmin	= in->shmmin;
539		out.shmmni	= in->shmmni;
540		out.shmseg	= in->shmseg;
541		out.shmall	= in->shmall;
542
543		return copy_to_user(buf, &out, sizeof(out));
544	    }
545	default:
546		return -EINVAL;
547	}
548}
549
550/*
551 * Calculate and add used RSS and swap pages of a shm.
552 * Called with shm_ids.rw_mutex held as a reader
553 */
554static void shm_add_rss_swap(struct shmid_kernel *shp,
555	unsigned long *rss_add, unsigned long *swp_add)
556{
557	struct inode *inode;
558
559	inode = shp->shm_file->f_path.dentry->d_inode;
560
561	if (is_file_hugepages(shp->shm_file)) {
562		struct address_space *mapping = inode->i_mapping;
563		struct hstate *h = hstate_file(shp->shm_file);
564		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
565	} else {
566#ifdef CONFIG_SHMEM
567		struct shmem_inode_info *info = SHMEM_I(inode);
568		spin_lock(&info->lock);
569		*rss_add += inode->i_mapping->nrpages;
570		*swp_add += info->swapped;
571		spin_unlock(&info->lock);
572#else
573		*rss_add += inode->i_mapping->nrpages;
574#endif
575	}
576}
577
578/*
579 * Called with shm_ids.rw_mutex held as a reader
580 */
581static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
582		unsigned long *swp)
583{
584	int next_id;
585	int total, in_use;
586
587	*rss = 0;
588	*swp = 0;
589
590	in_use = shm_ids(ns).in_use;
591
592	for (total = 0, next_id = 0; total < in_use; next_id++) {
593		struct kern_ipc_perm *ipc;
594		struct shmid_kernel *shp;
595
596		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
597		if (ipc == NULL)
598			continue;
599		shp = container_of(ipc, struct shmid_kernel, shm_perm);
600
601		shm_add_rss_swap(shp, rss, swp);
602
603		total++;
604	}
605}
606
607/*
608 * This function handles some shmctl commands which require the rw_mutex
609 * to be held in write mode.
610 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
611 */
612static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
613		       struct shmid_ds __user *buf, int version)
614{
615	struct kern_ipc_perm *ipcp;
616	struct shmid64_ds shmid64;
617	struct shmid_kernel *shp;
618	int err;
619
620	if (cmd == IPC_SET) {
621		if (copy_shmid_from_user(&shmid64, buf, version))
622			return -EFAULT;
623	}
624
625	ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
626	if (IS_ERR(ipcp))
627		return PTR_ERR(ipcp);
628
629	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
630
631	err = security_shm_shmctl(shp, cmd);
632	if (err)
633		goto out_unlock;
634	switch (cmd) {
635	case IPC_RMID:
636		do_shm_rmid(ns, ipcp);
637		goto out_up;
638	case IPC_SET:
639		ipc_update_perm(&shmid64.shm_perm, ipcp);
640		shp->shm_ctim = get_seconds();
641		break;
642	default:
643		err = -EINVAL;
644	}
645out_unlock:
646	shm_unlock(shp);
647out_up:
648	up_write(&shm_ids(ns).rw_mutex);
649	return err;
650}
651
652SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
653{
654	struct shmid_kernel *shp;
655	int err, version;
656	struct ipc_namespace *ns;
657
658	if (cmd < 0 || shmid < 0) {
659		err = -EINVAL;
660		goto out;
661	}
662
663	version = ipc_parse_version(&cmd);
664	ns = current->nsproxy->ipc_ns;
665
666	switch (cmd) { /* replace with proc interface ? */
667	case IPC_INFO:
668	{
669		struct shminfo64 shminfo;
670
671		err = security_shm_shmctl(NULL, cmd);
672		if (err)
673			return err;
674
675		memset(&shminfo, 0, sizeof(shminfo));
676		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
677		shminfo.shmmax = ns->shm_ctlmax;
678		shminfo.shmall = ns->shm_ctlall;
679
680		shminfo.shmmin = SHMMIN;
681		if(copy_shminfo_to_user (buf, &shminfo, version))
682			return -EFAULT;
683
684		down_read(&shm_ids(ns).rw_mutex);
685		err = ipc_get_maxid(&shm_ids(ns));
686		up_read(&shm_ids(ns).rw_mutex);
687
688		if(err<0)
689			err = 0;
690		goto out;
691	}
692	case SHM_INFO:
693	{
694		struct shm_info shm_info;
695
696		err = security_shm_shmctl(NULL, cmd);
697		if (err)
698			return err;
699
700		memset(&shm_info, 0, sizeof(shm_info));
701		down_read(&shm_ids(ns).rw_mutex);
702		shm_info.used_ids = shm_ids(ns).in_use;
703		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
704		shm_info.shm_tot = ns->shm_tot;
705		shm_info.swap_attempts = 0;
706		shm_info.swap_successes = 0;
707		err = ipc_get_maxid(&shm_ids(ns));
708		up_read(&shm_ids(ns).rw_mutex);
709		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
710			err = -EFAULT;
711			goto out;
712		}
713
714		err = err < 0 ? 0 : err;
715		goto out;
716	}
717	case SHM_STAT:
718	case IPC_STAT:
719	{
720		struct shmid64_ds tbuf;
721		int result;
722
723		if (cmd == SHM_STAT) {
724			shp = shm_lock(ns, shmid);
725			if (IS_ERR(shp)) {
726				err = PTR_ERR(shp);
727				goto out;
728			}
729			result = shp->shm_perm.id;
730		} else {
731			shp = shm_lock_check(ns, shmid);
732			if (IS_ERR(shp)) {
733				err = PTR_ERR(shp);
734				goto out;
735			}
736			result = 0;
737		}
738		err = -EACCES;
739		if (ipcperms (&shp->shm_perm, S_IRUGO))
740			goto out_unlock;
741		err = security_shm_shmctl(shp, cmd);
742		if (err)
743			goto out_unlock;
744		memset(&tbuf, 0, sizeof(tbuf));
745		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
746		tbuf.shm_segsz	= shp->shm_segsz;
747		tbuf.shm_atime	= shp->shm_atim;
748		tbuf.shm_dtime	= shp->shm_dtim;
749		tbuf.shm_ctime	= shp->shm_ctim;
750		tbuf.shm_cpid	= shp->shm_cprid;
751		tbuf.shm_lpid	= shp->shm_lprid;
752		tbuf.shm_nattch	= shp->shm_nattch;
753		shm_unlock(shp);
754		if(copy_shmid_to_user (buf, &tbuf, version))
755			err = -EFAULT;
756		else
757			err = result;
758		goto out;
759	}
760	case SHM_LOCK:
761	case SHM_UNLOCK:
762	{
763		struct file *uninitialized_var(shm_file);
764
765		lru_add_drain_all();  /* drain pagevecs to lru lists */
766
767		shp = shm_lock_check(ns, shmid);
768		if (IS_ERR(shp)) {
769			err = PTR_ERR(shp);
770			goto out;
771		}
772
773		audit_ipc_obj(&(shp->shm_perm));
774
775		if (!capable(CAP_IPC_LOCK)) {
776			uid_t euid = current_euid();
777			err = -EPERM;
778			if (euid != shp->shm_perm.uid &&
779			    euid != shp->shm_perm.cuid)
780				goto out_unlock;
781			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
782				goto out_unlock;
783		}
784
785		err = security_shm_shmctl(shp, cmd);
786		if (err)
787			goto out_unlock;
788
789		if(cmd==SHM_LOCK) {
790			struct user_struct *user = current_user();
791			if (!is_file_hugepages(shp->shm_file)) {
792				err = shmem_lock(shp->shm_file, 1, user);
793				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
794					shp->shm_perm.mode |= SHM_LOCKED;
795					shp->mlock_user = user;
796				}
797			}
798		} else if (!is_file_hugepages(shp->shm_file)) {
799			shmem_lock(shp->shm_file, 0, shp->mlock_user);
800			shp->shm_perm.mode &= ~SHM_LOCKED;
801			shp->mlock_user = NULL;
802		}
803		shm_unlock(shp);
804		goto out;
805	}
806	case IPC_RMID:
807	case IPC_SET:
808		err = shmctl_down(ns, shmid, cmd, buf, version);
809		return err;
810	default:
811		return -EINVAL;
812	}
813
814out_unlock:
815	shm_unlock(shp);
816out:
817	return err;
818}
819
820/*
821 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
822 *
823 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
824 * "raddr" thing points to kernel space, and there has to be a wrapper around
825 * this.
826 */
827long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
828{
829	struct shmid_kernel *shp;
830	unsigned long addr;
831	unsigned long size;
832	struct file * file;
833	int    err;
834	unsigned long flags;
835	unsigned long prot;
836	int acc_mode;
837	unsigned long user_addr;
838	struct ipc_namespace *ns;
839	struct shm_file_data *sfd;
840	struct path path;
841	fmode_t f_mode;
842
843	err = -EINVAL;
844	if (shmid < 0)
845		goto out;
846	else if ((addr = (ulong)shmaddr)) {
847		if (addr & (SHMLBA-1)) {
848			if (shmflg & SHM_RND)
849				addr &= ~(SHMLBA-1);	   /* round down */
850			else
851#ifndef __ARCH_FORCE_SHMLBA
852				if (addr & ~PAGE_MASK)
853#endif
854					goto out;
855		}
856		flags = MAP_SHARED | MAP_FIXED;
857	} else {
858		if ((shmflg & SHM_REMAP))
859			goto out;
860
861		flags = MAP_SHARED;
862	}
863
864	if (shmflg & SHM_RDONLY) {
865		prot = PROT_READ;
866		acc_mode = S_IRUGO;
867		f_mode = FMODE_READ;
868	} else {
869		prot = PROT_READ | PROT_WRITE;
870		acc_mode = S_IRUGO | S_IWUGO;
871		f_mode = FMODE_READ | FMODE_WRITE;
872	}
873	if (shmflg & SHM_EXEC) {
874		prot |= PROT_EXEC;
875		acc_mode |= S_IXUGO;
876	}
877
878	/*
879	 * We cannot rely on the fs check since SYSV IPC does have an
880	 * additional creator id...
881	 */
882	ns = current->nsproxy->ipc_ns;
883	shp = shm_lock_check(ns, shmid);
884	if (IS_ERR(shp)) {
885		err = PTR_ERR(shp);
886		goto out;
887	}
888
889	err = -EACCES;
890	if (ipcperms(&shp->shm_perm, acc_mode))
891		goto out_unlock;
892
893	err = security_shm_shmat(shp, shmaddr, shmflg);
894	if (err)
895		goto out_unlock;
896
897	path = shp->shm_file->f_path;
898	path_get(&path);
899	shp->shm_nattch++;
900	size = i_size_read(path.dentry->d_inode);
901	shm_unlock(shp);
902
903	err = -ENOMEM;
904	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
905	if (!sfd)
906		goto out_put_dentry;
907
908	file = alloc_file(&path, f_mode,
909			  is_file_hugepages(shp->shm_file) ?
910				&shm_file_operations_huge :
911				&shm_file_operations);
912	if (!file)
913		goto out_free;
914
915	file->private_data = sfd;
916	file->f_mapping = shp->shm_file->f_mapping;
917	sfd->id = shp->shm_perm.id;
918	sfd->ns = get_ipc_ns(ns);
919	sfd->file = shp->shm_file;
920	sfd->vm_ops = NULL;
921
922	down_write(&current->mm->mmap_sem);
923	if (addr && !(shmflg & SHM_REMAP)) {
924		err = -EINVAL;
925		if (find_vma_intersection(current->mm, addr, addr + size))
926			goto invalid;
927		/*
928		 * If shm segment goes below stack, make sure there is some
929		 * space left for the stack to grow (at least 4 pages).
930		 */
931		if (addr < current->mm->start_stack &&
932		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
933			goto invalid;
934	}
935
936	user_addr = do_mmap (file, addr, size, prot, flags, 0);
937	*raddr = user_addr;
938	err = 0;
939	if (IS_ERR_VALUE(user_addr))
940		err = (long)user_addr;
941invalid:
942	up_write(&current->mm->mmap_sem);
943
944	fput(file);
945
946out_nattch:
947	down_write(&shm_ids(ns).rw_mutex);
948	shp = shm_lock(ns, shmid);
949	BUG_ON(IS_ERR(shp));
950	shp->shm_nattch--;
951	if(shp->shm_nattch == 0 &&
952	   shp->shm_perm.mode & SHM_DEST)
953		shm_destroy(ns, shp);
954	else
955		shm_unlock(shp);
956	up_write(&shm_ids(ns).rw_mutex);
957
958out:
959	return err;
960
961out_unlock:
962	shm_unlock(shp);
963	goto out;
964
965out_free:
966	kfree(sfd);
967out_put_dentry:
968	path_put(&path);
969	goto out_nattch;
970}
971
972SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
973{
974	unsigned long ret;
975	long err;
976
977	err = do_shmat(shmid, shmaddr, shmflg, &ret);
978	if (err)
979		return err;
980	force_successful_syscall_return();
981	return (long)ret;
982}
983
984/*
985 * detach and kill segment if marked destroyed.
986 * The work is done in shm_close.
987 */
988SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
989{
990	struct mm_struct *mm = current->mm;
991	struct vm_area_struct *vma;
992	unsigned long addr = (unsigned long)shmaddr;
993	int retval = -EINVAL;
994#ifdef CONFIG_MMU
995	loff_t size = 0;
996	struct vm_area_struct *next;
997#endif
998
999	if (addr & ~PAGE_MASK)
1000		return retval;
1001
1002	down_write(&mm->mmap_sem);
1003
1004	/*
1005	 * This function tries to be smart and unmap shm segments that
1006	 * were modified by partial mlock or munmap calls:
1007	 * - It first determines the size of the shm segment that should be
1008	 *   unmapped: It searches for a vma that is backed by shm and that
1009	 *   started at address shmaddr. It records it's size and then unmaps
1010	 *   it.
1011	 * - Then it unmaps all shm vmas that started at shmaddr and that
1012	 *   are within the initially determined size.
1013	 * Errors from do_munmap are ignored: the function only fails if
1014	 * it's called with invalid parameters or if it's called to unmap
1015	 * a part of a vma. Both calls in this function are for full vmas,
1016	 * the parameters are directly copied from the vma itself and always
1017	 * valid - therefore do_munmap cannot fail. (famous last words?)
1018	 */
1019	/*
1020	 * If it had been mremap()'d, the starting address would not
1021	 * match the usual checks anyway. So assume all vma's are
1022	 * above the starting address given.
1023	 */
1024	vma = find_vma(mm, addr);
1025
1026#ifdef CONFIG_MMU
1027	while (vma) {
1028		next = vma->vm_next;
1029
1030		/*
1031		 * Check if the starting address would match, i.e. it's
1032		 * a fragment created by mprotect() and/or munmap(), or it
1033		 * otherwise it starts at this address with no hassles.
1034		 */
1035		if ((vma->vm_ops == &shm_vm_ops) &&
1036			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1037
1038
1039			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1040			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1041			/*
1042			 * We discovered the size of the shm segment, so
1043			 * break out of here and fall through to the next
1044			 * loop that uses the size information to stop
1045			 * searching for matching vma's.
1046			 */
1047			retval = 0;
1048			vma = next;
1049			break;
1050		}
1051		vma = next;
1052	}
1053
1054	/*
1055	 * We need look no further than the maximum address a fragment
1056	 * could possibly have landed at. Also cast things to loff_t to
1057	 * prevent overflows and make comparisions vs. equal-width types.
1058	 */
1059	size = PAGE_ALIGN(size);
1060	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1061		next = vma->vm_next;
1062
1063		/* finding a matching vma now does not alter retval */
1064		if ((vma->vm_ops == &shm_vm_ops) &&
1065			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1066
1067			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1068		vma = next;
1069	}
1070
1071#else /* CONFIG_MMU */
1072	/* under NOMMU conditions, the exact address to be destroyed must be
1073	 * given */
1074	retval = -EINVAL;
1075	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1076		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1077		retval = 0;
1078	}
1079
1080#endif
1081
1082	up_write(&mm->mmap_sem);
1083	return retval;
1084}
1085
1086#ifdef CONFIG_PROC_FS
1087static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1088{
1089	struct shmid_kernel *shp = it;
1090	unsigned long rss = 0, swp = 0;
1091
1092	shm_add_rss_swap(shp, &rss, &swp);
1093
1094#if BITS_PER_LONG <= 32
1095#define SIZE_SPEC "%10lu"
1096#else
1097#define SIZE_SPEC "%21lu"
1098#endif
1099
1100	return seq_printf(s,
1101			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1102			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1103			  SIZE_SPEC " " SIZE_SPEC "\n",
1104			  shp->shm_perm.key,
1105			  shp->shm_perm.id,
1106			  shp->shm_perm.mode,
1107			  shp->shm_segsz,
1108			  shp->shm_cprid,
1109			  shp->shm_lprid,
1110			  shp->shm_nattch,
1111			  shp->shm_perm.uid,
1112			  shp->shm_perm.gid,
1113			  shp->shm_perm.cuid,
1114			  shp->shm_perm.cgid,
1115			  shp->shm_atim,
1116			  shp->shm_dtim,
1117			  shp->shm_ctim,
1118			  rss * PAGE_SIZE,
1119			  swp * PAGE_SIZE);
1120}
1121#endif
1122