1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/user_namespace.h>
19
20/*
21 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends).
23 */
24
25#define UIDHASH_MASK		(UIDHASH_SZ - 1)
26#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27#define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid)))
28
29static struct kmem_cache *uid_cachep;
30
31/*
32 * The uidhash_lock is mostly taken from process context, but it is
33 * occasionally also taken from softirq/tasklet context, when
34 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
35 * But free_uid() is also called with local interrupts disabled, and running
36 * local_bh_enable() with local interrupts disabled is an error - we'll run
37 * softirq callbacks, and they can unconditionally enable interrupts, and
38 * the caller of free_uid() didn't expect that..
39 */
40static DEFINE_SPINLOCK(uidhash_lock);
41
42struct user_struct root_user = {
43	.__count	= ATOMIC_INIT(1),
44	.processes	= ATOMIC_INIT(1),
45	.files		= ATOMIC_INIT(0),
46	.sigpending	= ATOMIC_INIT(0),
47	.mq_bytes	= 0,
48	.locked_shm     = 0,
49#ifdef CONFIG_KEYS
50	.uid_keyring	= &root_user_keyring,
51	.session_keyring = &root_session_keyring,
52#endif
53#ifdef CONFIG_FAIR_USER_SCHED
54	.tg		= &init_task_group,
55#endif
56};
57
58/*
59 * These routines must be called with the uidhash spinlock held!
60 */
61static inline void uid_hash_insert(struct user_struct *up,
62						struct hlist_head *hashent)
63{
64	hlist_add_head(&up->uidhash_node, hashent);
65}
66
67static inline void uid_hash_remove(struct user_struct *up)
68{
69	hlist_del_init(&up->uidhash_node);
70}
71
72static inline struct user_struct *uid_hash_find(uid_t uid,
73						struct hlist_head *hashent)
74{
75	struct user_struct *user;
76	struct hlist_node *h;
77
78	hlist_for_each_entry(user, h, hashent, uidhash_node) {
79		if (user->uid == uid) {
80			atomic_inc(&user->__count);
81			return user;
82		}
83	}
84
85	return NULL;
86}
87
88#ifdef CONFIG_FAIR_USER_SCHED
89
90static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
91static DEFINE_MUTEX(uids_mutex);
92
93static void sched_destroy_user(struct user_struct *up)
94{
95	sched_destroy_group(up->tg);
96}
97
98static int sched_create_user(struct user_struct *up)
99{
100	int rc = 0;
101
102	up->tg = sched_create_group();
103	if (IS_ERR(up->tg))
104		rc = -ENOMEM;
105
106	return rc;
107}
108
109static void sched_switch_user(struct task_struct *p)
110{
111	sched_move_task(p);
112}
113
114static inline void uids_mutex_lock(void)
115{
116	mutex_lock(&uids_mutex);
117}
118
119static inline void uids_mutex_unlock(void)
120{
121	mutex_unlock(&uids_mutex);
122}
123
124/* return cpu shares held by the user */
125ssize_t cpu_shares_show(struct kset *kset, char *buffer)
126{
127	struct user_struct *up = container_of(kset, struct user_struct, kset);
128
129	return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
130}
131
132/* modify cpu shares held by the user */
133ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
134{
135	struct user_struct *up = container_of(kset, struct user_struct, kset);
136	unsigned long shares;
137	int rc;
138
139	sscanf(buffer, "%lu", &shares);
140
141	rc = sched_group_set_shares(up->tg, shares);
142
143	return (rc ? rc : size);
144}
145
146static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
147{
148	sa->attr.name = name; sa->attr.owner = NULL;
149	sa->attr.mode = mode;
150	sa->show = cpu_shares_show;
151	sa->store = cpu_shares_store;
152}
153
154/* Create "/sys/kernel/uids/<uid>" directory and
155 *  "/sys/kernel/uids/<uid>/cpu_share" file for this user.
156 */
157static int user_kobject_create(struct user_struct *up)
158{
159	struct kset *kset = &up->kset;
160	struct kobject *kobj = &kset->kobj;
161	int error;
162
163	memset(kset, 0, sizeof(struct kset));
164	kobj->parent = &uids_kobject;	/* create under /sys/kernel/uids dir */
165	kobject_set_name(kobj, "%d", up->uid);
166	kset_init(kset);
167	user_attr_init(&up->user_attr, "cpu_share", 0644);
168
169	error = kobject_add(kobj);
170	if (error)
171		goto done;
172
173	error = sysfs_create_file(kobj, &up->user_attr.attr);
174	if (error)
175		kobject_del(kobj);
176
177	kobject_uevent(kobj, KOBJ_ADD);
178
179done:
180	return error;
181}
182
183/* create these in sysfs filesystem:
184 * 	"/sys/kernel/uids" directory
185 * 	"/sys/kernel/uids/0" directory (for root user)
186 * 	"/sys/kernel/uids/0/cpu_share" file (for root user)
187 */
188int __init uids_kobject_init(void)
189{
190	int error;
191
192	/* create under /sys/kernel dir */
193	uids_kobject.parent = &kernel_subsys.kobj;
194	uids_kobject.kset = &kernel_subsys;
195	kobject_set_name(&uids_kobject, "uids");
196	kobject_init(&uids_kobject);
197
198	error = kobject_add(&uids_kobject);
199	if (!error)
200		error = user_kobject_create(&root_user);
201
202	return error;
203}
204
205/* work function to remove sysfs directory for a user and free up
206 * corresponding structures.
207 */
208static void remove_user_sysfs_dir(struct work_struct *w)
209{
210	struct user_struct *up = container_of(w, struct user_struct, work);
211	struct kobject *kobj = &up->kset.kobj;
212	unsigned long flags;
213	int remove_user = 0;
214
215	/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
216	 * atomic.
217	 */
218	uids_mutex_lock();
219
220	local_irq_save(flags);
221
222	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
223		uid_hash_remove(up);
224		remove_user = 1;
225		spin_unlock_irqrestore(&uidhash_lock, flags);
226	} else {
227		local_irq_restore(flags);
228	}
229
230	if (!remove_user)
231		goto done;
232
233	sysfs_remove_file(kobj, &up->user_attr.attr);
234	kobject_uevent(kobj, KOBJ_REMOVE);
235	kobject_del(kobj);
236
237	sched_destroy_user(up);
238	key_put(up->uid_keyring);
239	key_put(up->session_keyring);
240	kmem_cache_free(uid_cachep, up);
241
242done:
243	uids_mutex_unlock();
244}
245
246/* IRQs are disabled and uidhash_lock is held upon function entry.
247 * IRQ state (as stored in flags) is restored and uidhash_lock released
248 * upon function exit.
249 */
250static inline void free_user(struct user_struct *up, unsigned long flags)
251{
252	/* restore back the count */
253	atomic_inc(&up->__count);
254	spin_unlock_irqrestore(&uidhash_lock, flags);
255
256	INIT_WORK(&up->work, remove_user_sysfs_dir);
257	schedule_work(&up->work);
258}
259
260#else	/* CONFIG_FAIR_USER_SCHED */
261
262static void sched_destroy_user(struct user_struct *up) { }
263static int sched_create_user(struct user_struct *up) { return 0; }
264static void sched_switch_user(struct task_struct *p) { }
265static inline int user_kobject_create(struct user_struct *up) { return 0; }
266static inline void uids_mutex_lock(void) { }
267static inline void uids_mutex_unlock(void) { }
268
269/* IRQs are disabled and uidhash_lock is held upon function entry.
270 * IRQ state (as stored in flags) is restored and uidhash_lock released
271 * upon function exit.
272 */
273static inline void free_user(struct user_struct *up, unsigned long flags)
274{
275	uid_hash_remove(up);
276	spin_unlock_irqrestore(&uidhash_lock, flags);
277	sched_destroy_user(up);
278	key_put(up->uid_keyring);
279	key_put(up->session_keyring);
280	kmem_cache_free(uid_cachep, up);
281}
282
283#endif	/* CONFIG_FAIR_USER_SCHED */
284
285/*
286 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
287 * caller must undo that ref with free_uid().
288 *
289 * If the user_struct could not be found, return NULL.
290 */
291struct user_struct *find_user(uid_t uid)
292{
293	struct user_struct *ret;
294	unsigned long flags;
295	struct user_namespace *ns = current->nsproxy->user_ns;
296
297	spin_lock_irqsave(&uidhash_lock, flags);
298	ret = uid_hash_find(uid, uidhashentry(ns, uid));
299	spin_unlock_irqrestore(&uidhash_lock, flags);
300	return ret;
301}
302
303void free_uid(struct user_struct *up)
304{
305	unsigned long flags;
306
307	if (!up)
308		return;
309
310	local_irq_save(flags);
311	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
312		free_user(up, flags);
313	else
314		local_irq_restore(flags);
315}
316
317struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
318{
319	struct hlist_head *hashent = uidhashentry(ns, uid);
320	struct user_struct *up;
321
322	/* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
323	 * atomic.
324	 */
325	uids_mutex_lock();
326
327	spin_lock_irq(&uidhash_lock);
328	up = uid_hash_find(uid, hashent);
329	spin_unlock_irq(&uidhash_lock);
330
331	if (!up) {
332		struct user_struct *new;
333
334		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
335		if (!new)
336			return NULL;
337		new->uid = uid;
338		atomic_set(&new->__count, 1);
339		atomic_set(&new->processes, 0);
340		atomic_set(&new->files, 0);
341		atomic_set(&new->sigpending, 0);
342#ifdef CONFIG_INOTIFY_USER
343		atomic_set(&new->inotify_watches, 0);
344		atomic_set(&new->inotify_devs, 0);
345#endif
346
347		new->mq_bytes = 0;
348		new->locked_shm = 0;
349
350		if (alloc_uid_keyring(new, current) < 0) {
351			kmem_cache_free(uid_cachep, new);
352			return NULL;
353		}
354
355		if (sched_create_user(new) < 0) {
356			key_put(new->uid_keyring);
357			key_put(new->session_keyring);
358			kmem_cache_free(uid_cachep, new);
359			return NULL;
360		}
361
362		if (user_kobject_create(new)) {
363			sched_destroy_user(new);
364			key_put(new->uid_keyring);
365			key_put(new->session_keyring);
366			kmem_cache_free(uid_cachep, new);
367			uids_mutex_unlock();
368			return NULL;
369		}
370
371		/*
372		 * Before adding this, check whether we raced
373		 * on adding the same user already..
374		 */
375		spin_lock_irq(&uidhash_lock);
376		up = uid_hash_find(uid, hashent);
377		if (up) {
378			/* This case is not possible when CONFIG_FAIR_USER_SCHED
379			 * is defined, since we serialize alloc_uid() using
380			 * uids_mutex. Hence no need to call
381			 * sched_destroy_user() or remove_user_sysfs_dir().
382			 */
383			key_put(new->uid_keyring);
384			key_put(new->session_keyring);
385			kmem_cache_free(uid_cachep, new);
386		} else {
387			uid_hash_insert(new, hashent);
388			up = new;
389		}
390		spin_unlock_irq(&uidhash_lock);
391
392	}
393
394	uids_mutex_unlock();
395
396	return up;
397}
398
399void switch_uid(struct user_struct *new_user)
400{
401	struct user_struct *old_user;
402
403	/* What if a process setreuid()'s and this brings the
404	 * new uid over his NPROC rlimit?  We can check this now
405	 * cheaply with the new uid cache, so if it matters
406	 * we should be checking for it.  -DaveM
407	 */
408	old_user = current->user;
409	atomic_inc(&new_user->processes);
410	atomic_dec(&old_user->processes);
411	switch_uid_keyring(new_user);
412	current->user = new_user;
413	sched_switch_user(current);
414
415	/*
416	 * We need to synchronize with __sigqueue_alloc()
417	 * doing a get_uid(p->user).. If that saw the old
418	 * user value, we need to wait until it has exited
419	 * its critical region before we can free the old
420	 * structure.
421	 */
422	smp_mb();
423	spin_unlock_wait(&current->sighand->siglock);
424
425	free_uid(old_user);
426	suid_keys(current);
427}
428
429void release_uids(struct user_namespace *ns)
430{
431	int i;
432	unsigned long flags;
433	struct hlist_head *head;
434	struct hlist_node *nd;
435
436	spin_lock_irqsave(&uidhash_lock, flags);
437	/*
438	 * collapse the chains so that the user_struct-s will
439	 * be still alive, but not in hashes. subsequent free_uid()
440	 * will free them.
441	 */
442	for (i = 0; i < UIDHASH_SZ; i++) {
443		head = ns->uidhash_table + i;
444		while (!hlist_empty(head)) {
445			nd = head->first;
446			hlist_del_init(nd);
447		}
448	}
449	spin_unlock_irqrestore(&uidhash_lock, flags);
450
451	free_uid(ns->root_user);
452}
453
454static int __init uid_cache_init(void)
455{
456	int n;
457
458	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
459			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
460
461	for(n = 0; n < UIDHASH_SZ; ++n)
462		INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
463
464	/* Insert the root user immediately (init already runs as root) */
465	spin_lock_irq(&uidhash_lock);
466	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
467	spin_unlock_irq(&uidhash_lock);
468
469	return 0;
470}
471
472module_init(uid_cache_init);
473