1/*
2 * linux/ipc/namespace.c
3 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
4 */
5
6#include <linux/ipc.h>
7#include <linux/msg.h>
8#include <linux/ipc_namespace.h>
9#include <linux/rcupdate.h>
10#include <linux/nsproxy.h>
11#include <linux/slab.h>
12#include <linux/fs.h>
13#include <linux/mount.h>
14
15#include "util.h"
16
17static struct ipc_namespace *create_ipc_ns(void)
18{
19	struct ipc_namespace *ns;
20	int err;
21
22	ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
23	if (ns == NULL)
24		return ERR_PTR(-ENOMEM);
25
26	atomic_set(&ns->count, 1);
27	err = mq_init_ns(ns);
28	if (err) {
29		kfree(ns);
30		return ERR_PTR(err);
31	}
32	atomic_inc(&nr_ipc_ns);
33
34	sem_init_ns(ns);
35	msg_init_ns(ns);
36	shm_init_ns(ns);
37
38	/*
39	 * msgmni has already been computed for the new ipc ns.
40	 * Thus, do the ipcns creation notification before registering that
41	 * new ipcns in the chain.
42	 */
43	ipcns_notify(IPCNS_CREATED);
44	register_ipcns_notifier(ns);
45
46	return ns;
47}
48
49struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns)
50{
51	if (!(flags & CLONE_NEWIPC))
52		return get_ipc_ns(ns);
53	return create_ipc_ns();
54}
55
56/*
57 * free_ipcs - free all ipcs of one type
58 * @ns:   the namespace to remove the ipcs from
59 * @ids:  the table of ipcs to free
60 * @free: the function called to free each individual ipc
61 *
62 * Called for each kind of ipc when an ipc_namespace exits.
63 */
64void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
65	       void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
66{
67	struct kern_ipc_perm *perm;
68	int next_id;
69	int total, in_use;
70
71	down_write(&ids->rw_mutex);
72
73	in_use = ids->in_use;
74
75	for (total = 0, next_id = 0; total < in_use; next_id++) {
76		perm = idr_find(&ids->ipcs_idr, next_id);
77		if (perm == NULL)
78			continue;
79		ipc_lock_by_ptr(perm);
80		free(ns, perm);
81		total++;
82	}
83	up_write(&ids->rw_mutex);
84}
85
86static void free_ipc_ns(struct ipc_namespace *ns)
87{
88	/*
89	 * Unregistering the hotplug notifier at the beginning guarantees
90	 * that the ipc namespace won't be freed while we are inside the
91	 * callback routine. Since the blocking_notifier_chain_XXX routines
92	 * hold a rw lock on the notifier list, unregister_ipcns_notifier()
93	 * won't take the rw lock before blocking_notifier_call_chain() has
94	 * released the rd lock.
95	 */
96	unregister_ipcns_notifier(ns);
97	sem_exit_ns(ns);
98	msg_exit_ns(ns);
99	shm_exit_ns(ns);
100	kfree(ns);
101	atomic_dec(&nr_ipc_ns);
102
103	/*
104	 * Do the ipcns removal notification after decrementing nr_ipc_ns in
105	 * order to have a correct value when recomputing msgmni.
106	 */
107	ipcns_notify(IPCNS_REMOVED);
108}
109
110/*
111 * put_ipc_ns - drop a reference to an ipc namespace.
112 * @ns: the namespace to put
113 *
114 * If this is the last task in the namespace exiting, and
115 * it is dropping the refcount to 0, then it can race with
116 * a task in another ipc namespace but in a mounts namespace
117 * which has this ipcns's mqueuefs mounted, doing some action
118 * with one of the mqueuefs files.  That can raise the refcount.
119 * So dropping the refcount, and raising the refcount when
120 * accessing it through the VFS, are protected with mq_lock.
121 *
122 * (Clearly, a task raising the refcount on its own ipc_ns
123 * needn't take mq_lock since it can't race with the last task
124 * in the ipcns exiting).
125 */
126void put_ipc_ns(struct ipc_namespace *ns)
127{
128	if (atomic_dec_and_lock(&ns->count, &mq_lock)) {
129		mq_clear_sbinfo(ns);
130		spin_unlock(&mq_lock);
131		mq_put_mnt(ns);
132		free_ipc_ns(ns);
133	}
134}
135