1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Pid namespaces
4 *
5 * Authors:
6 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
7 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
8 *     Many thanks to Oleg Nesterov for comments and help
9 *
10 */
11
12#include <linux/pid.h>
13#include <linux/pid_namespace.h>
14#include <linux/user_namespace.h>
15#include <linux/syscalls.h>
16#include <linux/cred.h>
17#include <linux/err.h>
18#include <linux/acct.h>
19#include <linux/slab.h>
20#include <linux/proc_ns.h>
21#include <linux/reboot.h>
22#include <linux/export.h>
23#include <linux/sched/task.h>
24#include <linux/sched/signal.h>
25#include <linux/idr.h>
26#include <uapi/linux/wait.h>
27#include "pid_sysctl.h"
28
29static DEFINE_MUTEX(pid_caches_mutex);
30static struct kmem_cache *pid_ns_cachep;
31/* Write once array, filled from the beginning. */
32static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL];
33
34/*
35 * creates the kmem cache to allocate pids from.
36 * @level: pid namespace level
37 */
38
39static struct kmem_cache *create_pid_cachep(unsigned int level)
40{
41	/* Level 0 is init_pid_ns.pid_cachep */
42	struct kmem_cache **pkc = &pid_cache[level - 1];
43	struct kmem_cache *kc;
44	char name[4 + 10 + 1];
45	unsigned int len;
46
47	kc = READ_ONCE(*pkc);
48	if (kc)
49		return kc;
50
51	snprintf(name, sizeof(name), "pid_%u", level + 1);
52	len = struct_size_t(struct pid, numbers, level + 1);
53	mutex_lock(&pid_caches_mutex);
54	/* Name collision forces to do allocation under mutex. */
55	if (!*pkc)
56		*pkc = kmem_cache_create(name, len, 0,
57					 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
58	mutex_unlock(&pid_caches_mutex);
59	/* current can fail, but someone else can succeed. */
60	return READ_ONCE(*pkc);
61}
62
63static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
64{
65	return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
66}
67
68static void dec_pid_namespaces(struct ucounts *ucounts)
69{
70	dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
71}
72
73static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
74	struct pid_namespace *parent_pid_ns)
75{
76	struct pid_namespace *ns;
77	unsigned int level = parent_pid_ns->level + 1;
78	struct ucounts *ucounts;
79	int err;
80
81	err = -EINVAL;
82	if (!in_userns(parent_pid_ns->user_ns, user_ns))
83		goto out;
84
85	err = -ENOSPC;
86	if (level > MAX_PID_NS_LEVEL)
87		goto out;
88	ucounts = inc_pid_namespaces(user_ns);
89	if (!ucounts)
90		goto out;
91
92	err = -ENOMEM;
93	ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
94	if (ns == NULL)
95		goto out_dec;
96
97	idr_init(&ns->idr);
98
99	ns->pid_cachep = create_pid_cachep(level);
100	if (ns->pid_cachep == NULL)
101		goto out_free_idr;
102
103	err = ns_alloc_inum(&ns->ns);
104	if (err)
105		goto out_free_idr;
106	ns->ns.ops = &pidns_operations;
107
108	refcount_set(&ns->ns.count, 1);
109	ns->level = level;
110	ns->parent = get_pid_ns(parent_pid_ns);
111	ns->user_ns = get_user_ns(user_ns);
112	ns->ucounts = ucounts;
113	ns->pid_allocated = PIDNS_ADDING;
114#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
115	ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns);
116#endif
117	return ns;
118
119out_free_idr:
120	idr_destroy(&ns->idr);
121	kmem_cache_free(pid_ns_cachep, ns);
122out_dec:
123	dec_pid_namespaces(ucounts);
124out:
125	return ERR_PTR(err);
126}
127
128static void delayed_free_pidns(struct rcu_head *p)
129{
130	struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
131
132	dec_pid_namespaces(ns->ucounts);
133	put_user_ns(ns->user_ns);
134
135	kmem_cache_free(pid_ns_cachep, ns);
136}
137
138static void destroy_pid_namespace(struct pid_namespace *ns)
139{
140	ns_free_inum(&ns->ns);
141
142	idr_destroy(&ns->idr);
143	call_rcu(&ns->rcu, delayed_free_pidns);
144}
145
146struct pid_namespace *copy_pid_ns(unsigned long flags,
147	struct user_namespace *user_ns, struct pid_namespace *old_ns)
148{
149	if (!(flags & CLONE_NEWPID))
150		return get_pid_ns(old_ns);
151	if (task_active_pid_ns(current) != old_ns)
152		return ERR_PTR(-EINVAL);
153	return create_pid_namespace(user_ns, old_ns);
154}
155
156void put_pid_ns(struct pid_namespace *ns)
157{
158	struct pid_namespace *parent;
159
160	while (ns != &init_pid_ns) {
161		parent = ns->parent;
162		if (!refcount_dec_and_test(&ns->ns.count))
163			break;
164		destroy_pid_namespace(ns);
165		ns = parent;
166	}
167}
168EXPORT_SYMBOL_GPL(put_pid_ns);
169
170void zap_pid_ns_processes(struct pid_namespace *pid_ns)
171{
172	int nr;
173	int rc;
174	struct task_struct *task, *me = current;
175	int init_pids = thread_group_leader(me) ? 1 : 2;
176	struct pid *pid;
177
178	/* Don't allow any more processes into the pid namespace */
179	disable_pid_allocation(pid_ns);
180
181	/*
182	 * Ignore SIGCHLD causing any terminated children to autoreap.
183	 * This speeds up the namespace shutdown, plus see the comment
184	 * below.
185	 */
186	spin_lock_irq(&me->sighand->siglock);
187	me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
188	spin_unlock_irq(&me->sighand->siglock);
189
190	/*
191	 * The last thread in the cgroup-init thread group is terminating.
192	 * Find remaining pid_ts in the namespace, signal and wait for them
193	 * to exit.
194	 *
195	 * Note:  This signals each threads in the namespace - even those that
196	 * 	  belong to the same thread group, To avoid this, we would have
197	 * 	  to walk the entire tasklist looking a processes in this
198	 * 	  namespace, but that could be unnecessarily expensive if the
199	 * 	  pid namespace has just a few processes. Or we need to
200	 * 	  maintain a tasklist for each pid namespace.
201	 *
202	 */
203	rcu_read_lock();
204	read_lock(&tasklist_lock);
205	nr = 2;
206	idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
207		task = pid_task(pid, PIDTYPE_PID);
208		if (task && !__fatal_signal_pending(task))
209			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
210	}
211	read_unlock(&tasklist_lock);
212	rcu_read_unlock();
213
214	/*
215	 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
216	 * kernel_wait4() will also block until our children traced from the
217	 * parent namespace are detached and become EXIT_DEAD.
218	 */
219	do {
220		clear_thread_flag(TIF_SIGPENDING);
221		rc = kernel_wait4(-1, NULL, __WALL, NULL);
222	} while (rc != -ECHILD);
223
224	/*
225	 * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE
226	 * process whose parents processes are outside of the pid
227	 * namespace.  Such processes are created with setns()+fork().
228	 *
229	 * If those EXIT_ZOMBIE processes are not reaped by their
230	 * parents before their parents exit, they will be reparented
231	 * to pid_ns->child_reaper.  Thus pidns->child_reaper needs to
232	 * stay valid until they all go away.
233	 *
234	 * The code relies on the pid_ns->child_reaper ignoring
235	 * SIGCHILD to cause those EXIT_ZOMBIE processes to be
236	 * autoreaped if reparented.
237	 *
238	 * Semantically it is also desirable to wait for EXIT_ZOMBIE
239	 * processes before allowing the child_reaper to be reaped, as
240	 * that gives the invariant that when the init process of a
241	 * pid namespace is reaped all of the processes in the pid
242	 * namespace are gone.
243	 *
244	 * Once all of the other tasks are gone from the pid_namespace
245	 * free_pid() will awaken this task.
246	 */
247	for (;;) {
248		set_current_state(TASK_INTERRUPTIBLE);
249		if (pid_ns->pid_allocated == init_pids)
250			break;
251		/*
252		 * Release tasks_rcu_exit_srcu to avoid following deadlock:
253		 *
254		 * 1) TASK A unshare(CLONE_NEWPID)
255		 * 2) TASK A fork() twice -> TASK B (child reaper for new ns)
256		 *    and TASK C
257		 * 3) TASK B exits, kills TASK C, waits for TASK A to reap it
258		 * 4) TASK A calls synchronize_rcu_tasks()
259		 *                   -> synchronize_srcu(tasks_rcu_exit_srcu)
260		 * 5) *DEADLOCK*
261		 *
262		 * It is considered safe to release tasks_rcu_exit_srcu here
263		 * because we assume the current task can not be concurrently
264		 * reaped at this point.
265		 */
266		exit_tasks_rcu_stop();
267		schedule();
268		exit_tasks_rcu_start();
269	}
270	__set_current_state(TASK_RUNNING);
271
272	if (pid_ns->reboot)
273		current->signal->group_exit_code = pid_ns->reboot;
274
275	acct_exit_ns(pid_ns);
276	return;
277}
278
279#ifdef CONFIG_CHECKPOINT_RESTORE
280static int pid_ns_ctl_handler(struct ctl_table *table, int write,
281		void *buffer, size_t *lenp, loff_t *ppos)
282{
283	struct pid_namespace *pid_ns = task_active_pid_ns(current);
284	struct ctl_table tmp = *table;
285	int ret, next;
286
287	if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns))
288		return -EPERM;
289
290	next = idr_get_cursor(&pid_ns->idr) - 1;
291
292	tmp.data = &next;
293	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
294	if (!ret && write)
295		idr_set_cursor(&pid_ns->idr, next + 1);
296
297	return ret;
298}
299
300extern int pid_max;
301static struct ctl_table pid_ns_ctl_table[] = {
302	{
303		.procname = "ns_last_pid",
304		.maxlen = sizeof(int),
305		.mode = 0666, /* permissions are checked in the handler */
306		.proc_handler = pid_ns_ctl_handler,
307		.extra1 = SYSCTL_ZERO,
308		.extra2 = &pid_max,
309	},
310	{ }
311};
312#endif	/* CONFIG_CHECKPOINT_RESTORE */
313
314int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
315{
316	if (pid_ns == &init_pid_ns)
317		return 0;
318
319	switch (cmd) {
320	case LINUX_REBOOT_CMD_RESTART2:
321	case LINUX_REBOOT_CMD_RESTART:
322		pid_ns->reboot = SIGHUP;
323		break;
324
325	case LINUX_REBOOT_CMD_POWER_OFF:
326	case LINUX_REBOOT_CMD_HALT:
327		pid_ns->reboot = SIGINT;
328		break;
329	default:
330		return -EINVAL;
331	}
332
333	read_lock(&tasklist_lock);
334	send_sig(SIGKILL, pid_ns->child_reaper, 1);
335	read_unlock(&tasklist_lock);
336
337	do_exit(0);
338
339	/* Not reached */
340	return 0;
341}
342
343static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
344{
345	return container_of(ns, struct pid_namespace, ns);
346}
347
348static struct ns_common *pidns_get(struct task_struct *task)
349{
350	struct pid_namespace *ns;
351
352	rcu_read_lock();
353	ns = task_active_pid_ns(task);
354	if (ns)
355		get_pid_ns(ns);
356	rcu_read_unlock();
357
358	return ns ? &ns->ns : NULL;
359}
360
361static struct ns_common *pidns_for_children_get(struct task_struct *task)
362{
363	struct pid_namespace *ns = NULL;
364
365	task_lock(task);
366	if (task->nsproxy) {
367		ns = task->nsproxy->pid_ns_for_children;
368		get_pid_ns(ns);
369	}
370	task_unlock(task);
371
372	if (ns) {
373		read_lock(&tasklist_lock);
374		if (!ns->child_reaper) {
375			put_pid_ns(ns);
376			ns = NULL;
377		}
378		read_unlock(&tasklist_lock);
379	}
380
381	return ns ? &ns->ns : NULL;
382}
383
384static void pidns_put(struct ns_common *ns)
385{
386	put_pid_ns(to_pid_ns(ns));
387}
388
389static int pidns_install(struct nsset *nsset, struct ns_common *ns)
390{
391	struct nsproxy *nsproxy = nsset->nsproxy;
392	struct pid_namespace *active = task_active_pid_ns(current);
393	struct pid_namespace *ancestor, *new = to_pid_ns(ns);
394
395	if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
396	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
397		return -EPERM;
398
399	/*
400	 * Only allow entering the current active pid namespace
401	 * or a child of the current active pid namespace.
402	 *
403	 * This is required for fork to return a usable pid value and
404	 * this maintains the property that processes and their
405	 * children can not escape their current pid namespace.
406	 */
407	if (new->level < active->level)
408		return -EINVAL;
409
410	ancestor = new;
411	while (ancestor->level > active->level)
412		ancestor = ancestor->parent;
413	if (ancestor != active)
414		return -EINVAL;
415
416	put_pid_ns(nsproxy->pid_ns_for_children);
417	nsproxy->pid_ns_for_children = get_pid_ns(new);
418	return 0;
419}
420
421static struct ns_common *pidns_get_parent(struct ns_common *ns)
422{
423	struct pid_namespace *active = task_active_pid_ns(current);
424	struct pid_namespace *pid_ns, *p;
425
426	/* See if the parent is in the current namespace */
427	pid_ns = p = to_pid_ns(ns)->parent;
428	for (;;) {
429		if (!p)
430			return ERR_PTR(-EPERM);
431		if (p == active)
432			break;
433		p = p->parent;
434	}
435
436	return &get_pid_ns(pid_ns)->ns;
437}
438
439static struct user_namespace *pidns_owner(struct ns_common *ns)
440{
441	return to_pid_ns(ns)->user_ns;
442}
443
444const struct proc_ns_operations pidns_operations = {
445	.name		= "pid",
446	.type		= CLONE_NEWPID,
447	.get		= pidns_get,
448	.put		= pidns_put,
449	.install	= pidns_install,
450	.owner		= pidns_owner,
451	.get_parent	= pidns_get_parent,
452};
453
454const struct proc_ns_operations pidns_for_children_operations = {
455	.name		= "pid_for_children",
456	.real_ns_name	= "pid",
457	.type		= CLONE_NEWPID,
458	.get		= pidns_for_children_get,
459	.put		= pidns_put,
460	.install	= pidns_install,
461	.owner		= pidns_owner,
462	.get_parent	= pidns_get_parent,
463};
464
465static __init int pid_namespaces_init(void)
466{
467	pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT);
468
469#ifdef CONFIG_CHECKPOINT_RESTORE
470	register_sysctl_init("kernel", pid_ns_ctl_table);
471#endif
472
473	register_pid_ns_sysctl_table_vm();
474	return 0;
475}
476
477__initcall(pid_namespaces_init);
478