shm.c revision 3d3653f9
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 *	 Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 *
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25 */
26
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/hugetlb.h>
30#include <linux/shm.h>
31#include <linux/init.h>
32#include <linux/file.h>
33#include <linux/mman.h>
34#include <linux/shmem_fs.h>
35#include <linux/security.h>
36#include <linux/syscalls.h>
37#include <linux/audit.h>
38#include <linux/capability.h>
39#include <linux/ptrace.h>
40#include <linux/seq_file.h>
41#include <linux/rwsem.h>
42#include <linux/nsproxy.h>
43#include <linux/mount.h>
44#include <linux/ipc_namespace.h>
45
46#include <linux/uaccess.h>
47
48#include "util.h"
49
50struct shm_file_data {
51	int id;
52	struct ipc_namespace *ns;
53	struct file *file;
54	const struct vm_operations_struct *vm_ops;
55};
56
57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58
59static const struct file_operations shm_file_operations;
60static const struct vm_operations_struct shm_vm_ops;
61
62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
63
64#define shm_unlock(shp)			\
65	ipc_unlock(&(shp)->shm_perm)
66
67static int newseg(struct ipc_namespace *, struct ipc_params *);
68static void shm_open(struct vm_area_struct *vma);
69static void shm_close(struct vm_area_struct *vma);
70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71#ifdef CONFIG_PROC_FS
72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73#endif
74
75void shm_init_ns(struct ipc_namespace *ns)
76{
77	ns->shm_ctlmax = SHMMAX;
78	ns->shm_ctlall = SHMALL;
79	ns->shm_ctlmni = SHMMNI;
80	ns->shm_rmid_forced = 0;
81	ns->shm_tot = 0;
82	ipc_init_ids(&shm_ids(ns));
83}
84
85/*
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
88 */
89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90{
91	struct shmid_kernel *shp;
92
93	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
94
95	if (shp->shm_nattch) {
96		shp->shm_perm.mode |= SHM_DEST;
97		/* Do not find it any more */
98		shp->shm_perm.key = IPC_PRIVATE;
99		shm_unlock(shp);
100	} else
101		shm_destroy(ns, shp);
102}
103
104#ifdef CONFIG_IPC_NS
105void shm_exit_ns(struct ipc_namespace *ns)
106{
107	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
108	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
109}
110#endif
111
112static int __init ipc_ns_init(void)
113{
114	shm_init_ns(&init_ipc_ns);
115	return 0;
116}
117
118pure_initcall(ipc_ns_init);
119
120void __init shm_init(void)
121{
122	ipc_init_proc_interface("sysvipc/shm",
123#if BITS_PER_LONG <= 32
124				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
125#else
126				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
127#endif
128				IPC_SHM_IDS, sysvipc_shm_proc_show);
129}
130
131static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
132{
133	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
134
135	if (IS_ERR(ipcp))
136		return ERR_CAST(ipcp);
137
138	return container_of(ipcp, struct shmid_kernel, shm_perm);
139}
140
141static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
142{
143	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
144
145	if (IS_ERR(ipcp))
146		return ERR_CAST(ipcp);
147
148	return container_of(ipcp, struct shmid_kernel, shm_perm);
149}
150
151/*
152 * shm_lock_(check_) routines are called in the paths where the rwsem
153 * is not necessarily held.
154 */
155static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156{
157	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
158
159	/*
160	 * Callers of shm_lock() must validate the status of the returned ipc
161	 * object pointer (as returned by ipc_lock()), and error out as
162	 * appropriate.
163	 */
164	if (IS_ERR(ipcp))
165		return (void *)ipcp;
166	return container_of(ipcp, struct shmid_kernel, shm_perm);
167}
168
169static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
170{
171	rcu_read_lock();
172	ipc_lock_object(&ipcp->shm_perm);
173}
174
175static void __shm_free(struct shmid_kernel *shp)
176{
177	kvfree(shp);
178}
179
180static void shm_rcu_free(struct rcu_head *head)
181{
182	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
183							rcu);
184	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
185							shm_perm);
186	security_shm_free(shp);
187	__shm_free(shp);
188}
189
190static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
191{
192	list_del(&s->shm_clist);
193	ipc_rmid(&shm_ids(ns), &s->shm_perm);
194}
195
196
197static int __shm_open(struct vm_area_struct *vma)
198{
199	struct file *file = vma->vm_file;
200	struct shm_file_data *sfd = shm_file_data(file);
201	struct shmid_kernel *shp;
202
203	shp = shm_lock(sfd->ns, sfd->id);
204
205	if (IS_ERR(shp))
206		return PTR_ERR(shp);
207
208	shp->shm_atim = get_seconds();
209	shp->shm_lprid = task_tgid_vnr(current);
210	shp->shm_nattch++;
211	shm_unlock(shp);
212	return 0;
213}
214
215/* This is called by fork, once for every shm attach. */
216static void shm_open(struct vm_area_struct *vma)
217{
218	int err = __shm_open(vma);
219	/*
220	 * We raced in the idr lookup or with shm_destroy().
221	 * Either way, the ID is busted.
222	 */
223	WARN_ON_ONCE(err);
224}
225
226/*
227 * shm_destroy - free the struct shmid_kernel
228 *
229 * @ns: namespace
230 * @shp: struct to free
231 *
232 * It has to be called with shp and shm_ids.rwsem (writer) locked,
233 * but returns with shp unlocked and freed.
234 */
235static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
236{
237	struct file *shm_file;
238
239	shm_file = shp->shm_file;
240	shp->shm_file = NULL;
241	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
242	shm_rmid(ns, shp);
243	shm_unlock(shp);
244	if (!is_file_hugepages(shm_file))
245		shmem_lock(shm_file, 0, shp->mlock_user);
246	else if (shp->mlock_user)
247		user_shm_unlock(i_size_read(file_inode(shm_file)),
248				shp->mlock_user);
249	fput(shm_file);
250	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
251}
252
253/*
254 * shm_may_destroy - identifies whether shm segment should be destroyed now
255 *
256 * Returns true if and only if there are no active users of the segment and
257 * one of the following is true:
258 *
259 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
260 *
261 * 2) sysctl kernel.shm_rmid_forced is set to 1.
262 */
263static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
264{
265	return (shp->shm_nattch == 0) &&
266	       (ns->shm_rmid_forced ||
267		(shp->shm_perm.mode & SHM_DEST));
268}
269
270/*
271 * remove the attach descriptor vma.
272 * free memory for segment if it is marked destroyed.
273 * The descriptor has already been removed from the current->mm->mmap list
274 * and will later be kfree()d.
275 */
276static void shm_close(struct vm_area_struct *vma)
277{
278	struct file *file = vma->vm_file;
279	struct shm_file_data *sfd = shm_file_data(file);
280	struct shmid_kernel *shp;
281	struct ipc_namespace *ns = sfd->ns;
282
283	down_write(&shm_ids(ns).rwsem);
284	/* remove from the list of attaches of the shm segment */
285	shp = shm_lock(ns, sfd->id);
286
287	/*
288	 * We raced in the idr lookup or with shm_destroy().
289	 * Either way, the ID is busted.
290	 */
291	if (WARN_ON_ONCE(IS_ERR(shp)))
292		goto done; /* no-op */
293
294	shp->shm_lprid = task_tgid_vnr(current);
295	shp->shm_dtim = get_seconds();
296	shp->shm_nattch--;
297	if (shm_may_destroy(ns, shp))
298		shm_destroy(ns, shp);
299	else
300		shm_unlock(shp);
301done:
302	up_write(&shm_ids(ns).rwsem);
303}
304
305/* Called with ns->shm_ids(ns).rwsem locked */
306static int shm_try_destroy_orphaned(int id, void *p, void *data)
307{
308	struct ipc_namespace *ns = data;
309	struct kern_ipc_perm *ipcp = p;
310	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
311
312	/*
313	 * We want to destroy segments without users and with already
314	 * exit'ed originating process.
315	 *
316	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
317	 */
318	if (shp->shm_creator != NULL)
319		return 0;
320
321	if (shm_may_destroy(ns, shp)) {
322		shm_lock_by_ptr(shp);
323		shm_destroy(ns, shp);
324	}
325	return 0;
326}
327
328void shm_destroy_orphaned(struct ipc_namespace *ns)
329{
330	down_write(&shm_ids(ns).rwsem);
331	if (shm_ids(ns).in_use)
332		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
333	up_write(&shm_ids(ns).rwsem);
334}
335
336/* Locking assumes this will only be called with task == current */
337void exit_shm(struct task_struct *task)
338{
339	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
340	struct shmid_kernel *shp, *n;
341
342	if (list_empty(&task->sysvshm.shm_clist))
343		return;
344
345	/*
346	 * If kernel.shm_rmid_forced is not set then only keep track of
347	 * which shmids are orphaned, so that a later set of the sysctl
348	 * can clean them up.
349	 */
350	if (!ns->shm_rmid_forced) {
351		down_read(&shm_ids(ns).rwsem);
352		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
353			shp->shm_creator = NULL;
354		/*
355		 * Only under read lock but we are only called on current
356		 * so no entry on the list will be shared.
357		 */
358		list_del(&task->sysvshm.shm_clist);
359		up_read(&shm_ids(ns).rwsem);
360		return;
361	}
362
363	/*
364	 * Destroy all already created segments, that were not yet mapped,
365	 * and mark any mapped as orphan to cover the sysctl toggling.
366	 * Destroy is skipped if shm_may_destroy() returns false.
367	 */
368	down_write(&shm_ids(ns).rwsem);
369	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
370		shp->shm_creator = NULL;
371
372		if (shm_may_destroy(ns, shp)) {
373			shm_lock_by_ptr(shp);
374			shm_destroy(ns, shp);
375		}
376	}
377
378	/* Remove the list head from any segments still attached. */
379	list_del(&task->sysvshm.shm_clist);
380	up_write(&shm_ids(ns).rwsem);
381}
382
383static int shm_fault(struct vm_fault *vmf)
384{
385	struct file *file = vmf->vma->vm_file;
386	struct shm_file_data *sfd = shm_file_data(file);
387
388	return sfd->vm_ops->fault(vmf);
389}
390
391#ifdef CONFIG_NUMA
392static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
393{
394	struct file *file = vma->vm_file;
395	struct shm_file_data *sfd = shm_file_data(file);
396	int err = 0;
397
398	if (sfd->vm_ops->set_policy)
399		err = sfd->vm_ops->set_policy(vma, new);
400	return err;
401}
402
403static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
404					unsigned long addr)
405{
406	struct file *file = vma->vm_file;
407	struct shm_file_data *sfd = shm_file_data(file);
408	struct mempolicy *pol = NULL;
409
410	if (sfd->vm_ops->get_policy)
411		pol = sfd->vm_ops->get_policy(vma, addr);
412	else if (vma->vm_policy)
413		pol = vma->vm_policy;
414
415	return pol;
416}
417#endif
418
419static int shm_mmap(struct file *file, struct vm_area_struct *vma)
420{
421	struct shm_file_data *sfd = shm_file_data(file);
422	int ret;
423
424	/*
425	 * In case of remap_file_pages() emulation, the file can represent
426	 * removed IPC ID: propogate shm_lock() error to caller.
427	 */
428	ret = __shm_open(vma);
429	if (ret)
430		return ret;
431
432	ret = call_mmap(sfd->file, vma);
433	if (ret) {
434		shm_close(vma);
435		return ret;
436	}
437	sfd->vm_ops = vma->vm_ops;
438#ifdef CONFIG_MMU
439	WARN_ON(!sfd->vm_ops->fault);
440#endif
441	vma->vm_ops = &shm_vm_ops;
442	return 0;
443}
444
445static int shm_release(struct inode *ino, struct file *file)
446{
447	struct shm_file_data *sfd = shm_file_data(file);
448
449	put_ipc_ns(sfd->ns);
450	shm_file_data(file) = NULL;
451	kfree(sfd);
452	return 0;
453}
454
455static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
456{
457	struct shm_file_data *sfd = shm_file_data(file);
458
459	if (!sfd->file->f_op->fsync)
460		return -EINVAL;
461	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
462}
463
464static long shm_fallocate(struct file *file, int mode, loff_t offset,
465			  loff_t len)
466{
467	struct shm_file_data *sfd = shm_file_data(file);
468
469	if (!sfd->file->f_op->fallocate)
470		return -EOPNOTSUPP;
471	return sfd->file->f_op->fallocate(file, mode, offset, len);
472}
473
474static unsigned long shm_get_unmapped_area(struct file *file,
475	unsigned long addr, unsigned long len, unsigned long pgoff,
476	unsigned long flags)
477{
478	struct shm_file_data *sfd = shm_file_data(file);
479
480	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
481						pgoff, flags);
482}
483
484static const struct file_operations shm_file_operations = {
485	.mmap		= shm_mmap,
486	.fsync		= shm_fsync,
487	.release	= shm_release,
488	.get_unmapped_area	= shm_get_unmapped_area,
489	.llseek		= noop_llseek,
490	.fallocate	= shm_fallocate,
491};
492
493/*
494 * shm_file_operations_huge is now identical to shm_file_operations,
495 * but we keep it distinct for the sake of is_file_shm_hugepages().
496 */
497static const struct file_operations shm_file_operations_huge = {
498	.mmap		= shm_mmap,
499	.fsync		= shm_fsync,
500	.release	= shm_release,
501	.get_unmapped_area	= shm_get_unmapped_area,
502	.llseek		= noop_llseek,
503	.fallocate	= shm_fallocate,
504};
505
506bool is_file_shm_hugepages(struct file *file)
507{
508	return file->f_op == &shm_file_operations_huge;
509}
510
511static const struct vm_operations_struct shm_vm_ops = {
512	.open	= shm_open,	/* callback for a new vm-area open */
513	.close	= shm_close,	/* callback for when the vm-area is released */
514	.fault	= shm_fault,
515#if defined(CONFIG_NUMA)
516	.set_policy = shm_set_policy,
517	.get_policy = shm_get_policy,
518#endif
519};
520
521static struct shmid_kernel *shm_alloc(void)
522{
523	struct shmid_kernel *shp;
524
525	shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
526	if (unlikely(!shp))
527		return NULL;
528
529	return shp;
530}
531
532/**
533 * newseg - Create a new shared memory segment
534 * @ns: namespace
535 * @params: ptr to the structure that contains key, size and shmflg
536 *
537 * Called with shm_ids.rwsem held as a writer.
538 */
539static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
540{
541	key_t key = params->key;
542	int shmflg = params->flg;
543	size_t size = params->u.size;
544	int error;
545	struct shmid_kernel *shp;
546	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
547	struct file *file;
548	char name[13];
549	vm_flags_t acctflag = 0;
550
551	if (size < SHMMIN || size > ns->shm_ctlmax)
552		return -EINVAL;
553
554	if (numpages << PAGE_SHIFT < size)
555		return -ENOSPC;
556
557	if (ns->shm_tot + numpages < ns->shm_tot ||
558			ns->shm_tot + numpages > ns->shm_ctlall)
559		return -ENOSPC;
560
561	shp = shm_alloc();
562	if (!shp)
563		return -ENOMEM;
564
565	shp->shm_perm.key = key;
566	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
567	shp->mlock_user = NULL;
568
569	shp->shm_perm.security = NULL;
570	error = security_shm_alloc(shp);
571	if (error) {
572		__shm_free(shp);
573		return error;
574	}
575
576	sprintf(name, "SYSV%08x", key);
577	if (shmflg & SHM_HUGETLB) {
578		struct hstate *hs;
579		size_t hugesize;
580
581		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
582		if (!hs) {
583			error = -EINVAL;
584			goto no_file;
585		}
586		hugesize = ALIGN(size, huge_page_size(hs));
587
588		/* hugetlb_file_setup applies strict accounting */
589		if (shmflg & SHM_NORESERVE)
590			acctflag = VM_NORESERVE;
591		file = hugetlb_file_setup(name, hugesize, acctflag,
592				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
593				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
594	} else {
595		/*
596		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
597		 * if it's asked for.
598		 */
599		if  ((shmflg & SHM_NORESERVE) &&
600				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
601			acctflag = VM_NORESERVE;
602		file = shmem_kernel_file_setup(name, size, acctflag);
603	}
604	error = PTR_ERR(file);
605	if (IS_ERR(file))
606		goto no_file;
607
608	shp->shm_cprid = task_tgid_vnr(current);
609	shp->shm_lprid = 0;
610	shp->shm_atim = shp->shm_dtim = 0;
611	shp->shm_ctim = get_seconds();
612	shp->shm_segsz = size;
613	shp->shm_nattch = 0;
614	shp->shm_file = file;
615	shp->shm_creator = current;
616
617	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
618	if (error < 0)
619		goto no_id;
620
621	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
622
623	/*
624	 * shmid gets reported as "inode#" in /proc/pid/maps.
625	 * proc-ps tools use this. Changing this will break them.
626	 */
627	file_inode(file)->i_ino = shp->shm_perm.id;
628
629	ns->shm_tot += numpages;
630	error = shp->shm_perm.id;
631
632	ipc_unlock_object(&shp->shm_perm);
633	rcu_read_unlock();
634	return error;
635
636no_id:
637	if (is_file_hugepages(file) && shp->mlock_user)
638		user_shm_unlock(size, shp->mlock_user);
639	fput(file);
640no_file:
641	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
642	return error;
643}
644
645/*
646 * Called with shm_ids.rwsem and ipcp locked.
647 */
648static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
649{
650	struct shmid_kernel *shp;
651
652	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
653	return security_shm_associate(shp, shmflg);
654}
655
656/*
657 * Called with shm_ids.rwsem and ipcp locked.
658 */
659static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
660				struct ipc_params *params)
661{
662	struct shmid_kernel *shp;
663
664	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
665	if (shp->shm_segsz < params->u.size)
666		return -EINVAL;
667
668	return 0;
669}
670
671SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
672{
673	struct ipc_namespace *ns;
674	static const struct ipc_ops shm_ops = {
675		.getnew = newseg,
676		.associate = shm_security,
677		.more_checks = shm_more_checks,
678	};
679	struct ipc_params shm_params;
680
681	ns = current->nsproxy->ipc_ns;
682
683	shm_params.key = key;
684	shm_params.flg = shmflg;
685	shm_params.u.size = size;
686
687	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
688}
689
690static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
691{
692	switch (version) {
693	case IPC_64:
694		return copy_to_user(buf, in, sizeof(*in));
695	case IPC_OLD:
696	    {
697		struct shmid_ds out;
698
699		memset(&out, 0, sizeof(out));
700		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
701		out.shm_segsz	= in->shm_segsz;
702		out.shm_atime	= in->shm_atime;
703		out.shm_dtime	= in->shm_dtime;
704		out.shm_ctime	= in->shm_ctime;
705		out.shm_cpid	= in->shm_cpid;
706		out.shm_lpid	= in->shm_lpid;
707		out.shm_nattch	= in->shm_nattch;
708
709		return copy_to_user(buf, &out, sizeof(out));
710	    }
711	default:
712		return -EINVAL;
713	}
714}
715
716static inline unsigned long
717copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
718{
719	switch (version) {
720	case IPC_64:
721		if (copy_from_user(out, buf, sizeof(*out)))
722			return -EFAULT;
723		return 0;
724	case IPC_OLD:
725	    {
726		struct shmid_ds tbuf_old;
727
728		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
729			return -EFAULT;
730
731		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
732		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
733		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
734
735		return 0;
736	    }
737	default:
738		return -EINVAL;
739	}
740}
741
742static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
743{
744	switch (version) {
745	case IPC_64:
746		return copy_to_user(buf, in, sizeof(*in));
747	case IPC_OLD:
748	    {
749		struct shminfo out;
750
751		if (in->shmmax > INT_MAX)
752			out.shmmax = INT_MAX;
753		else
754			out.shmmax = (int)in->shmmax;
755
756		out.shmmin	= in->shmmin;
757		out.shmmni	= in->shmmni;
758		out.shmseg	= in->shmseg;
759		out.shmall	= in->shmall;
760
761		return copy_to_user(buf, &out, sizeof(out));
762	    }
763	default:
764		return -EINVAL;
765	}
766}
767
768/*
769 * Calculate and add used RSS and swap pages of a shm.
770 * Called with shm_ids.rwsem held as a reader
771 */
772static void shm_add_rss_swap(struct shmid_kernel *shp,
773	unsigned long *rss_add, unsigned long *swp_add)
774{
775	struct inode *inode;
776
777	inode = file_inode(shp->shm_file);
778
779	if (is_file_hugepages(shp->shm_file)) {
780		struct address_space *mapping = inode->i_mapping;
781		struct hstate *h = hstate_file(shp->shm_file);
782		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
783	} else {
784#ifdef CONFIG_SHMEM
785		struct shmem_inode_info *info = SHMEM_I(inode);
786
787		spin_lock_irq(&info->lock);
788		*rss_add += inode->i_mapping->nrpages;
789		*swp_add += info->swapped;
790		spin_unlock_irq(&info->lock);
791#else
792		*rss_add += inode->i_mapping->nrpages;
793#endif
794	}
795}
796
797/*
798 * Called with shm_ids.rwsem held as a reader
799 */
800static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
801		unsigned long *swp)
802{
803	int next_id;
804	int total, in_use;
805
806	*rss = 0;
807	*swp = 0;
808
809	in_use = shm_ids(ns).in_use;
810
811	for (total = 0, next_id = 0; total < in_use; next_id++) {
812		struct kern_ipc_perm *ipc;
813		struct shmid_kernel *shp;
814
815		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
816		if (ipc == NULL)
817			continue;
818		shp = container_of(ipc, struct shmid_kernel, shm_perm);
819
820		shm_add_rss_swap(shp, rss, swp);
821
822		total++;
823	}
824}
825
826/*
827 * This function handles some shmctl commands which require the rwsem
828 * to be held in write mode.
829 * NOTE: no locks must be held, the rwsem is taken inside this function.
830 */
831static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
832		       struct shmid_ds __user *buf, int version)
833{
834	struct kern_ipc_perm *ipcp;
835	struct shmid64_ds shmid64;
836	struct shmid_kernel *shp;
837	int err;
838
839	if (cmd == IPC_SET) {
840		if (copy_shmid_from_user(&shmid64, buf, version))
841			return -EFAULT;
842	}
843
844	down_write(&shm_ids(ns).rwsem);
845	rcu_read_lock();
846
847	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
848				      &shmid64.shm_perm, 0);
849	if (IS_ERR(ipcp)) {
850		err = PTR_ERR(ipcp);
851		goto out_unlock1;
852	}
853
854	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
855
856	err = security_shm_shmctl(shp, cmd);
857	if (err)
858		goto out_unlock1;
859
860	switch (cmd) {
861	case IPC_RMID:
862		ipc_lock_object(&shp->shm_perm);
863		/* do_shm_rmid unlocks the ipc object and rcu */
864		do_shm_rmid(ns, ipcp);
865		goto out_up;
866	case IPC_SET:
867		ipc_lock_object(&shp->shm_perm);
868		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
869		if (err)
870			goto out_unlock0;
871		shp->shm_ctim = get_seconds();
872		break;
873	default:
874		err = -EINVAL;
875		goto out_unlock1;
876	}
877
878out_unlock0:
879	ipc_unlock_object(&shp->shm_perm);
880out_unlock1:
881	rcu_read_unlock();
882out_up:
883	up_write(&shm_ids(ns).rwsem);
884	return err;
885}
886
887static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
888			 int cmd, int version, void __user *buf)
889{
890	int err;
891	struct shmid_kernel *shp;
892
893	/* preliminary security checks for *_INFO */
894	if (cmd == IPC_INFO || cmd == SHM_INFO) {
895		err = security_shm_shmctl(NULL, cmd);
896		if (err)
897			return err;
898	}
899
900	switch (cmd) {
901	case IPC_INFO:
902	{
903		struct shminfo64 shminfo;
904
905		memset(&shminfo, 0, sizeof(shminfo));
906		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
907		shminfo.shmmax = ns->shm_ctlmax;
908		shminfo.shmall = ns->shm_ctlall;
909
910		shminfo.shmmin = SHMMIN;
911		if (copy_shminfo_to_user(buf, &shminfo, version))
912			return -EFAULT;
913
914		down_read(&shm_ids(ns).rwsem);
915		err = ipc_get_maxid(&shm_ids(ns));
916		up_read(&shm_ids(ns).rwsem);
917
918		if (err < 0)
919			err = 0;
920		goto out;
921	}
922	case SHM_INFO:
923	{
924		struct shm_info shm_info;
925
926		memset(&shm_info, 0, sizeof(shm_info));
927		down_read(&shm_ids(ns).rwsem);
928		shm_info.used_ids = shm_ids(ns).in_use;
929		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
930		shm_info.shm_tot = ns->shm_tot;
931		shm_info.swap_attempts = 0;
932		shm_info.swap_successes = 0;
933		err = ipc_get_maxid(&shm_ids(ns));
934		up_read(&shm_ids(ns).rwsem);
935		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
936			err = -EFAULT;
937			goto out;
938		}
939
940		err = err < 0 ? 0 : err;
941		goto out;
942	}
943	case SHM_STAT:
944	case IPC_STAT:
945	{
946		struct shmid64_ds tbuf;
947		int result;
948
949		rcu_read_lock();
950		if (cmd == SHM_STAT) {
951			shp = shm_obtain_object(ns, shmid);
952			if (IS_ERR(shp)) {
953				err = PTR_ERR(shp);
954				goto out_unlock;
955			}
956			result = shp->shm_perm.id;
957		} else {
958			shp = shm_obtain_object_check(ns, shmid);
959			if (IS_ERR(shp)) {
960				err = PTR_ERR(shp);
961				goto out_unlock;
962			}
963			result = 0;
964		}
965
966		err = -EACCES;
967		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
968			goto out_unlock;
969
970		err = security_shm_shmctl(shp, cmd);
971		if (err)
972			goto out_unlock;
973
974		memset(&tbuf, 0, sizeof(tbuf));
975		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
976		tbuf.shm_segsz	= shp->shm_segsz;
977		tbuf.shm_atime	= shp->shm_atim;
978		tbuf.shm_dtime	= shp->shm_dtim;
979		tbuf.shm_ctime	= shp->shm_ctim;
980		tbuf.shm_cpid	= shp->shm_cprid;
981		tbuf.shm_lpid	= shp->shm_lprid;
982		tbuf.shm_nattch	= shp->shm_nattch;
983		rcu_read_unlock();
984
985		if (copy_shmid_to_user(buf, &tbuf, version))
986			err = -EFAULT;
987		else
988			err = result;
989		goto out;
990	}
991	default:
992		return -EINVAL;
993	}
994
995out_unlock:
996	rcu_read_unlock();
997out:
998	return err;
999}
1000
1001SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1002{
1003	struct shmid_kernel *shp;
1004	int err, version;
1005	struct ipc_namespace *ns;
1006
1007	if (cmd < 0 || shmid < 0)
1008		return -EINVAL;
1009
1010	version = ipc_parse_version(&cmd);
1011	ns = current->nsproxy->ipc_ns;
1012
1013	switch (cmd) {
1014	case IPC_INFO:
1015	case SHM_INFO:
1016	case SHM_STAT:
1017	case IPC_STAT:
1018		return shmctl_nolock(ns, shmid, cmd, version, buf);
1019	case IPC_RMID:
1020	case IPC_SET:
1021		return shmctl_down(ns, shmid, cmd, buf, version);
1022	case SHM_LOCK:
1023	case SHM_UNLOCK:
1024	{
1025		struct file *shm_file;
1026
1027		rcu_read_lock();
1028		shp = shm_obtain_object_check(ns, shmid);
1029		if (IS_ERR(shp)) {
1030			err = PTR_ERR(shp);
1031			goto out_unlock1;
1032		}
1033
1034		audit_ipc_obj(&(shp->shm_perm));
1035		err = security_shm_shmctl(shp, cmd);
1036		if (err)
1037			goto out_unlock1;
1038
1039		ipc_lock_object(&shp->shm_perm);
1040
1041		/* check if shm_destroy() is tearing down shp */
1042		if (!ipc_valid_object(&shp->shm_perm)) {
1043			err = -EIDRM;
1044			goto out_unlock0;
1045		}
1046
1047		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1048			kuid_t euid = current_euid();
1049
1050			if (!uid_eq(euid, shp->shm_perm.uid) &&
1051			    !uid_eq(euid, shp->shm_perm.cuid)) {
1052				err = -EPERM;
1053				goto out_unlock0;
1054			}
1055			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1056				err = -EPERM;
1057				goto out_unlock0;
1058			}
1059		}
1060
1061		shm_file = shp->shm_file;
1062		if (is_file_hugepages(shm_file))
1063			goto out_unlock0;
1064
1065		if (cmd == SHM_LOCK) {
1066			struct user_struct *user = current_user();
1067
1068			err = shmem_lock(shm_file, 1, user);
1069			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1070				shp->shm_perm.mode |= SHM_LOCKED;
1071				shp->mlock_user = user;
1072			}
1073			goto out_unlock0;
1074		}
1075
1076		/* SHM_UNLOCK */
1077		if (!(shp->shm_perm.mode & SHM_LOCKED))
1078			goto out_unlock0;
1079		shmem_lock(shm_file, 0, shp->mlock_user);
1080		shp->shm_perm.mode &= ~SHM_LOCKED;
1081		shp->mlock_user = NULL;
1082		get_file(shm_file);
1083		ipc_unlock_object(&shp->shm_perm);
1084		rcu_read_unlock();
1085		shmem_unlock_mapping(shm_file->f_mapping);
1086
1087		fput(shm_file);
1088		return err;
1089	}
1090	default:
1091		return -EINVAL;
1092	}
1093
1094out_unlock0:
1095	ipc_unlock_object(&shp->shm_perm);
1096out_unlock1:
1097	rcu_read_unlock();
1098	return err;
1099}
1100
1101/*
1102 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1103 *
1104 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1105 * "raddr" thing points to kernel space, and there has to be a wrapper around
1106 * this.
1107 */
1108long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1109	      ulong *raddr, unsigned long shmlba)
1110{
1111	struct shmid_kernel *shp;
1112	unsigned long addr = (unsigned long)shmaddr;
1113	unsigned long size;
1114	struct file *file;
1115	int    err;
1116	unsigned long flags = MAP_SHARED;
1117	unsigned long prot;
1118	int acc_mode;
1119	struct ipc_namespace *ns;
1120	struct shm_file_data *sfd;
1121	struct path path;
1122	fmode_t f_mode;
1123	unsigned long populate = 0;
1124
1125	err = -EINVAL;
1126	if (shmid < 0)
1127		goto out;
1128
1129	if (addr) {
1130		if (addr & (shmlba - 1)) {
1131			/*
1132			 * Round down to the nearest multiple of shmlba.
1133			 * For sane do_mmap_pgoff() parameters, avoid
1134			 * round downs that trigger nil-page and MAP_FIXED.
1135			 */
1136			if ((shmflg & SHM_RND) && addr >= shmlba)
1137				addr &= ~(shmlba - 1);
1138			else
1139#ifndef __ARCH_FORCE_SHMLBA
1140				if (addr & ~PAGE_MASK)
1141#endif
1142					goto out;
1143		}
1144
1145		flags |= MAP_FIXED;
1146	} else if ((shmflg & SHM_REMAP))
1147		goto out;
1148
1149	if (shmflg & SHM_RDONLY) {
1150		prot = PROT_READ;
1151		acc_mode = S_IRUGO;
1152		f_mode = FMODE_READ;
1153	} else {
1154		prot = PROT_READ | PROT_WRITE;
1155		acc_mode = S_IRUGO | S_IWUGO;
1156		f_mode = FMODE_READ | FMODE_WRITE;
1157	}
1158	if (shmflg & SHM_EXEC) {
1159		prot |= PROT_EXEC;
1160		acc_mode |= S_IXUGO;
1161	}
1162
1163	/*
1164	 * We cannot rely on the fs check since SYSV IPC does have an
1165	 * additional creator id...
1166	 */
1167	ns = current->nsproxy->ipc_ns;
1168	rcu_read_lock();
1169	shp = shm_obtain_object_check(ns, shmid);
1170	if (IS_ERR(shp)) {
1171		err = PTR_ERR(shp);
1172		goto out_unlock;
1173	}
1174
1175	err = -EACCES;
1176	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1177		goto out_unlock;
1178
1179	err = security_shm_shmat(shp, shmaddr, shmflg);
1180	if (err)
1181		goto out_unlock;
1182
1183	ipc_lock_object(&shp->shm_perm);
1184
1185	/* check if shm_destroy() is tearing down shp */
1186	if (!ipc_valid_object(&shp->shm_perm)) {
1187		ipc_unlock_object(&shp->shm_perm);
1188		err = -EIDRM;
1189		goto out_unlock;
1190	}
1191
1192	path = shp->shm_file->f_path;
1193	path_get(&path);
1194	shp->shm_nattch++;
1195	size = i_size_read(d_inode(path.dentry));
1196	ipc_unlock_object(&shp->shm_perm);
1197	rcu_read_unlock();
1198
1199	err = -ENOMEM;
1200	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1201	if (!sfd) {
1202		path_put(&path);
1203		goto out_nattch;
1204	}
1205
1206	file = alloc_file(&path, f_mode,
1207			  is_file_hugepages(shp->shm_file) ?
1208				&shm_file_operations_huge :
1209				&shm_file_operations);
1210	err = PTR_ERR(file);
1211	if (IS_ERR(file)) {
1212		kfree(sfd);
1213		path_put(&path);
1214		goto out_nattch;
1215	}
1216
1217	file->private_data = sfd;
1218	file->f_mapping = shp->shm_file->f_mapping;
1219	sfd->id = shp->shm_perm.id;
1220	sfd->ns = get_ipc_ns(ns);
1221	sfd->file = shp->shm_file;
1222	sfd->vm_ops = NULL;
1223
1224	err = security_mmap_file(file, prot, flags);
1225	if (err)
1226		goto out_fput;
1227
1228	if (down_write_killable(&current->mm->mmap_sem)) {
1229		err = -EINTR;
1230		goto out_fput;
1231	}
1232
1233	if (addr && !(shmflg & SHM_REMAP)) {
1234		err = -EINVAL;
1235		if (addr + size < addr)
1236			goto invalid;
1237
1238		if (find_vma_intersection(current->mm, addr, addr + size))
1239			goto invalid;
1240	}
1241
1242	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1243	*raddr = addr;
1244	err = 0;
1245	if (IS_ERR_VALUE(addr))
1246		err = (long)addr;
1247invalid:
1248	up_write(&current->mm->mmap_sem);
1249	if (populate)
1250		mm_populate(addr, populate);
1251
1252out_fput:
1253	fput(file);
1254
1255out_nattch:
1256	down_write(&shm_ids(ns).rwsem);
1257	shp = shm_lock(ns, shmid);
1258	shp->shm_nattch--;
1259	if (shm_may_destroy(ns, shp))
1260		shm_destroy(ns, shp);
1261	else
1262		shm_unlock(shp);
1263	up_write(&shm_ids(ns).rwsem);
1264	return err;
1265
1266out_unlock:
1267	rcu_read_unlock();
1268out:
1269	return err;
1270}
1271
1272SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1273{
1274	unsigned long ret;
1275	long err;
1276
1277	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1278	if (err)
1279		return err;
1280	force_successful_syscall_return();
1281	return (long)ret;
1282}
1283
1284/*
1285 * detach and kill segment if marked destroyed.
1286 * The work is done in shm_close.
1287 */
1288SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1289{
1290	struct mm_struct *mm = current->mm;
1291	struct vm_area_struct *vma;
1292	unsigned long addr = (unsigned long)shmaddr;
1293	int retval = -EINVAL;
1294#ifdef CONFIG_MMU
1295	loff_t size = 0;
1296	struct file *file;
1297	struct vm_area_struct *next;
1298#endif
1299
1300	if (addr & ~PAGE_MASK)
1301		return retval;
1302
1303	if (down_write_killable(&mm->mmap_sem))
1304		return -EINTR;
1305
1306	/*
1307	 * This function tries to be smart and unmap shm segments that
1308	 * were modified by partial mlock or munmap calls:
1309	 * - It first determines the size of the shm segment that should be
1310	 *   unmapped: It searches for a vma that is backed by shm and that
1311	 *   started at address shmaddr. It records it's size and then unmaps
1312	 *   it.
1313	 * - Then it unmaps all shm vmas that started at shmaddr and that
1314	 *   are within the initially determined size and that are from the
1315	 *   same shm segment from which we determined the size.
1316	 * Errors from do_munmap are ignored: the function only fails if
1317	 * it's called with invalid parameters or if it's called to unmap
1318	 * a part of a vma. Both calls in this function are for full vmas,
1319	 * the parameters are directly copied from the vma itself and always
1320	 * valid - therefore do_munmap cannot fail. (famous last words?)
1321	 */
1322	/*
1323	 * If it had been mremap()'d, the starting address would not
1324	 * match the usual checks anyway. So assume all vma's are
1325	 * above the starting address given.
1326	 */
1327	vma = find_vma(mm, addr);
1328
1329#ifdef CONFIG_MMU
1330	while (vma) {
1331		next = vma->vm_next;
1332
1333		/*
1334		 * Check if the starting address would match, i.e. it's
1335		 * a fragment created by mprotect() and/or munmap(), or it
1336		 * otherwise it starts at this address with no hassles.
1337		 */
1338		if ((vma->vm_ops == &shm_vm_ops) &&
1339			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1340
1341			/*
1342			 * Record the file of the shm segment being
1343			 * unmapped.  With mremap(), someone could place
1344			 * page from another segment but with equal offsets
1345			 * in the range we are unmapping.
1346			 */
1347			file = vma->vm_file;
1348			size = i_size_read(file_inode(vma->vm_file));
1349			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1350			/*
1351			 * We discovered the size of the shm segment, so
1352			 * break out of here and fall through to the next
1353			 * loop that uses the size information to stop
1354			 * searching for matching vma's.
1355			 */
1356			retval = 0;
1357			vma = next;
1358			break;
1359		}
1360		vma = next;
1361	}
1362
1363	/*
1364	 * We need look no further than the maximum address a fragment
1365	 * could possibly have landed at. Also cast things to loff_t to
1366	 * prevent overflows and make comparisons vs. equal-width types.
1367	 */
1368	size = PAGE_ALIGN(size);
1369	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1370		next = vma->vm_next;
1371
1372		/* finding a matching vma now does not alter retval */
1373		if ((vma->vm_ops == &shm_vm_ops) &&
1374		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1375		    (vma->vm_file == file))
1376			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1377		vma = next;
1378	}
1379
1380#else	/* CONFIG_MMU */
1381	/* under NOMMU conditions, the exact address to be destroyed must be
1382	 * given
1383	 */
1384	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1385		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1386		retval = 0;
1387	}
1388
1389#endif
1390
1391	up_write(&mm->mmap_sem);
1392	return retval;
1393}
1394
1395#ifdef CONFIG_PROC_FS
1396static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1397{
1398	struct user_namespace *user_ns = seq_user_ns(s);
1399	struct shmid_kernel *shp = it;
1400	unsigned long rss = 0, swp = 0;
1401
1402	shm_add_rss_swap(shp, &rss, &swp);
1403
1404#if BITS_PER_LONG <= 32
1405#define SIZE_SPEC "%10lu"
1406#else
1407#define SIZE_SPEC "%21lu"
1408#endif
1409
1410	seq_printf(s,
1411		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1412		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1413		   SIZE_SPEC " " SIZE_SPEC "\n",
1414		   shp->shm_perm.key,
1415		   shp->shm_perm.id,
1416		   shp->shm_perm.mode,
1417		   shp->shm_segsz,
1418		   shp->shm_cprid,
1419		   shp->shm_lprid,
1420		   shp->shm_nattch,
1421		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1422		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1423		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1424		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1425		   shp->shm_atim,
1426		   shp->shm_dtim,
1427		   shp->shm_ctim,
1428		   rss * PAGE_SIZE,
1429		   swp * PAGE_SIZE);
1430
1431	return 0;
1432}
1433#endif
1434