1#include "audit.h"
2#include <linux/fsnotify_backend.h>
3#include <linux/namei.h>
4#include <linux/mount.h>
5#include <linux/kthread.h>
6#include <linux/slab.h>
7
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12	atomic_t count;
13	int goner;
14	struct audit_chunk *root;
15	struct list_head chunks;
16	struct list_head rules;
17	struct list_head list;
18	struct list_head same_root;
19	struct rcu_head head;
20	char pathname[];
21};
22
23struct audit_chunk {
24	struct list_head hash;
25	struct fsnotify_mark mark;
26	struct list_head trees;		/* with root here */
27	int dead;
28	int count;
29	atomic_long_t refs;
30	struct rcu_head head;
31	struct node {
32		struct list_head list;
33		struct audit_tree *owner;
34		unsigned index;		/* index; upper bit indicates 'will prune' */
35	} owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
40
41/*
42 * One struct chunk is attached to each inode of interest.
43 * We replace struct chunk on tagging/untagging.
44 * Rules have pointer to struct audit_tree.
45 * Rules have struct list_head rlist forming a list of rules over
46 * the same tree.
47 * References to struct chunk are collected at audit_inode{,_child}()
48 * time and used in AUDIT_TREE rule matching.
49 * These references are dropped at the same time we are calling
50 * audit_free_names(), etc.
51 *
52 * Cyclic lists galore:
53 * tree.chunks anchors chunk.owners[].list			hash_lock
54 * tree.rules anchors rule.rlist				audit_filter_mutex
55 * chunk.trees anchors tree.same_root				hash_lock
56 * chunk.hash is a hash with middle bits of watch.inode as
57 * a hash function.						RCU, hash_lock
58 *
59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it.
61 *
62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63 * of watch contributes 1 to .refs).
64 *
65 * node.index allows to get from node.list to containing chunk.
66 * MSB of that sucker is stolen to mark taggings that we might have to
67 * revert - several operations have very unpleasant cleanup logics and
68 * that makes a difference.  Some.
69 */
70
71static struct fsnotify_group *audit_tree_group;
72
73static struct audit_tree *alloc_tree(const char *s)
74{
75	struct audit_tree *tree;
76
77	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78	if (tree) {
79		atomic_set(&tree->count, 1);
80		tree->goner = 0;
81		INIT_LIST_HEAD(&tree->chunks);
82		INIT_LIST_HEAD(&tree->rules);
83		INIT_LIST_HEAD(&tree->list);
84		INIT_LIST_HEAD(&tree->same_root);
85		tree->root = NULL;
86		strcpy(tree->pathname, s);
87	}
88	return tree;
89}
90
91static inline void get_tree(struct audit_tree *tree)
92{
93	atomic_inc(&tree->count);
94}
95
96static void __put_tree(struct rcu_head *rcu)
97{
98	struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
99	kfree(tree);
100}
101
102static inline void put_tree(struct audit_tree *tree)
103{
104	if (atomic_dec_and_test(&tree->count))
105		call_rcu(&tree->head, __put_tree);
106}
107
108/* to avoid bringing the entire thing in audit.h */
109const char *audit_tree_path(struct audit_tree *tree)
110{
111	return tree->pathname;
112}
113
114static void free_chunk(struct audit_chunk *chunk)
115{
116	int i;
117
118	for (i = 0; i < chunk->count; i++) {
119		if (chunk->owners[i].owner)
120			put_tree(chunk->owners[i].owner);
121	}
122	kfree(chunk);
123}
124
125void audit_put_chunk(struct audit_chunk *chunk)
126{
127	if (atomic_long_dec_and_test(&chunk->refs))
128		free_chunk(chunk);
129}
130
131static void __put_chunk(struct rcu_head *rcu)
132{
133	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
134	audit_put_chunk(chunk);
135}
136
137static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
138{
139	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
140	call_rcu(&chunk->head, __put_chunk);
141}
142
143static struct audit_chunk *alloc_chunk(int count)
144{
145	struct audit_chunk *chunk;
146	size_t size;
147	int i;
148
149	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
150	chunk = kzalloc(size, GFP_KERNEL);
151	if (!chunk)
152		return NULL;
153
154	INIT_LIST_HEAD(&chunk->hash);
155	INIT_LIST_HEAD(&chunk->trees);
156	chunk->count = count;
157	atomic_long_set(&chunk->refs, 1);
158	for (i = 0; i < count; i++) {
159		INIT_LIST_HEAD(&chunk->owners[i].list);
160		chunk->owners[i].index = i;
161	}
162	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
163	return chunk;
164}
165
166enum {HASH_SIZE = 128};
167static struct list_head chunk_hash_heads[HASH_SIZE];
168static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
169
170static inline struct list_head *chunk_hash(const struct inode *inode)
171{
172	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
173	return chunk_hash_heads + n % HASH_SIZE;
174}
175
176/* hash_lock & entry->lock is held by caller */
177static void insert_hash(struct audit_chunk *chunk)
178{
179	struct fsnotify_mark *entry = &chunk->mark;
180	struct list_head *list;
181
182	if (!entry->i.inode)
183		return;
184	list = chunk_hash(entry->i.inode);
185	list_add_rcu(&chunk->hash, list);
186}
187
188/* called under rcu_read_lock */
189struct audit_chunk *audit_tree_lookup(const struct inode *inode)
190{
191	struct list_head *list = chunk_hash(inode);
192	struct audit_chunk *p;
193
194	list_for_each_entry_rcu(p, list, hash) {
195		/* mark.inode may have gone NULL, but who cares? */
196		if (p->mark.i.inode == inode) {
197			atomic_long_inc(&p->refs);
198			return p;
199		}
200	}
201	return NULL;
202}
203
204int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
205{
206	int n;
207	for (n = 0; n < chunk->count; n++)
208		if (chunk->owners[n].owner == tree)
209			return 1;
210	return 0;
211}
212
213/* tagging and untagging inodes with trees */
214
215static struct audit_chunk *find_chunk(struct node *p)
216{
217	int index = p->index & ~(1U<<31);
218	p -= index;
219	return container_of(p, struct audit_chunk, owners[0]);
220}
221
222static void untag_chunk(struct node *p)
223{
224	struct audit_chunk *chunk = find_chunk(p);
225	struct fsnotify_mark *entry = &chunk->mark;
226	struct audit_chunk *new;
227	struct audit_tree *owner;
228	int size = chunk->count - 1;
229	int i, j;
230
231	fsnotify_get_mark(entry);
232
233	spin_unlock(&hash_lock);
234
235	spin_lock(&entry->lock);
236	if (chunk->dead || !entry->i.inode) {
237		spin_unlock(&entry->lock);
238		goto out;
239	}
240
241	owner = p->owner;
242
243	if (!size) {
244		chunk->dead = 1;
245		spin_lock(&hash_lock);
246		list_del_init(&chunk->trees);
247		if (owner->root == chunk)
248			owner->root = NULL;
249		list_del_init(&p->list);
250		list_del_rcu(&chunk->hash);
251		spin_unlock(&hash_lock);
252		spin_unlock(&entry->lock);
253		fsnotify_destroy_mark(entry);
254		fsnotify_put_mark(entry);
255		goto out;
256	}
257
258	new = alloc_chunk(size);
259	if (!new)
260		goto Fallback;
261	fsnotify_duplicate_mark(&new->mark, entry);
262	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
263		free_chunk(new);
264		goto Fallback;
265	}
266
267	chunk->dead = 1;
268	spin_lock(&hash_lock);
269	list_replace_init(&chunk->trees, &new->trees);
270	if (owner->root == chunk) {
271		list_del_init(&owner->same_root);
272		owner->root = NULL;
273	}
274
275	for (i = j = 0; j <= size; i++, j++) {
276		struct audit_tree *s;
277		if (&chunk->owners[j] == p) {
278			list_del_init(&p->list);
279			i--;
280			continue;
281		}
282		s = chunk->owners[j].owner;
283		new->owners[i].owner = s;
284		new->owners[i].index = chunk->owners[j].index - j + i;
285		if (!s) /* result of earlier fallback */
286			continue;
287		get_tree(s);
288		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
289	}
290
291	list_replace_rcu(&chunk->hash, &new->hash);
292	list_for_each_entry(owner, &new->trees, same_root)
293		owner->root = new;
294	spin_unlock(&hash_lock);
295	spin_unlock(&entry->lock);
296	fsnotify_destroy_mark(entry);
297	fsnotify_put_mark(entry);
298	goto out;
299
300Fallback:
301	// do the best we can
302	spin_lock(&hash_lock);
303	if (owner->root == chunk) {
304		list_del_init(&owner->same_root);
305		owner->root = NULL;
306	}
307	list_del_init(&p->list);
308	p->owner = NULL;
309	put_tree(owner);
310	spin_unlock(&hash_lock);
311	spin_unlock(&entry->lock);
312out:
313	fsnotify_put_mark(entry);
314	spin_lock(&hash_lock);
315}
316
317static int create_chunk(struct inode *inode, struct audit_tree *tree)
318{
319	struct fsnotify_mark *entry;
320	struct audit_chunk *chunk = alloc_chunk(1);
321	if (!chunk)
322		return -ENOMEM;
323
324	entry = &chunk->mark;
325	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
326		free_chunk(chunk);
327		return -ENOSPC;
328	}
329
330	spin_lock(&entry->lock);
331	spin_lock(&hash_lock);
332	if (tree->goner) {
333		spin_unlock(&hash_lock);
334		chunk->dead = 1;
335		spin_unlock(&entry->lock);
336		fsnotify_destroy_mark(entry);
337		fsnotify_put_mark(entry);
338		return 0;
339	}
340	chunk->owners[0].index = (1U << 31);
341	chunk->owners[0].owner = tree;
342	get_tree(tree);
343	list_add(&chunk->owners[0].list, &tree->chunks);
344	if (!tree->root) {
345		tree->root = chunk;
346		list_add(&tree->same_root, &chunk->trees);
347	}
348	insert_hash(chunk);
349	spin_unlock(&hash_lock);
350	spin_unlock(&entry->lock);
351	return 0;
352}
353
354/* the first tagged inode becomes root of tree */
355static int tag_chunk(struct inode *inode, struct audit_tree *tree)
356{
357	struct fsnotify_mark *old_entry, *chunk_entry;
358	struct audit_tree *owner;
359	struct audit_chunk *chunk, *old;
360	struct node *p;
361	int n;
362
363	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
364	if (!old_entry)
365		return create_chunk(inode, tree);
366
367	old = container_of(old_entry, struct audit_chunk, mark);
368
369	/* are we already there? */
370	spin_lock(&hash_lock);
371	for (n = 0; n < old->count; n++) {
372		if (old->owners[n].owner == tree) {
373			spin_unlock(&hash_lock);
374			fsnotify_put_mark(old_entry);
375			return 0;
376		}
377	}
378	spin_unlock(&hash_lock);
379
380	chunk = alloc_chunk(old->count + 1);
381	if (!chunk) {
382		fsnotify_put_mark(old_entry);
383		return -ENOMEM;
384	}
385
386	chunk_entry = &chunk->mark;
387
388	spin_lock(&old_entry->lock);
389	if (!old_entry->i.inode) {
390		/* old_entry is being shot, lets just lie */
391		spin_unlock(&old_entry->lock);
392		fsnotify_put_mark(old_entry);
393		free_chunk(chunk);
394		return -ENOENT;
395	}
396
397	fsnotify_duplicate_mark(chunk_entry, old_entry);
398	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
399		spin_unlock(&old_entry->lock);
400		free_chunk(chunk);
401		fsnotify_put_mark(old_entry);
402		return -ENOSPC;
403	}
404
405	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
406	spin_lock(&chunk_entry->lock);
407	spin_lock(&hash_lock);
408
409	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
410	if (tree->goner) {
411		spin_unlock(&hash_lock);
412		chunk->dead = 1;
413		spin_unlock(&chunk_entry->lock);
414		spin_unlock(&old_entry->lock);
415
416		fsnotify_destroy_mark(chunk_entry);
417
418		fsnotify_put_mark(chunk_entry);
419		fsnotify_put_mark(old_entry);
420		return 0;
421	}
422	list_replace_init(&old->trees, &chunk->trees);
423	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
424		struct audit_tree *s = old->owners[n].owner;
425		p->owner = s;
426		p->index = old->owners[n].index;
427		if (!s) /* result of fallback in untag */
428			continue;
429		get_tree(s);
430		list_replace_init(&old->owners[n].list, &p->list);
431	}
432	p->index = (chunk->count - 1) | (1U<<31);
433	p->owner = tree;
434	get_tree(tree);
435	list_add(&p->list, &tree->chunks);
436	list_replace_rcu(&old->hash, &chunk->hash);
437	list_for_each_entry(owner, &chunk->trees, same_root)
438		owner->root = chunk;
439	old->dead = 1;
440	if (!tree->root) {
441		tree->root = chunk;
442		list_add(&tree->same_root, &chunk->trees);
443	}
444	spin_unlock(&hash_lock);
445	spin_unlock(&chunk_entry->lock);
446	spin_unlock(&old_entry->lock);
447	fsnotify_destroy_mark(old_entry);
448	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
449	fsnotify_put_mark(old_entry); /* and kill it */
450	return 0;
451}
452
453static void kill_rules(struct audit_tree *tree)
454{
455	struct audit_krule *rule, *next;
456	struct audit_entry *entry;
457	struct audit_buffer *ab;
458
459	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
460		entry = container_of(rule, struct audit_entry, rule);
461
462		list_del_init(&rule->rlist);
463		if (rule->tree) {
464			/* not a half-baked one */
465			ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
466			audit_log_format(ab, "op=");
467			audit_log_string(ab, "remove rule");
468			audit_log_format(ab, " dir=");
469			audit_log_untrustedstring(ab, rule->tree->pathname);
470			audit_log_key(ab, rule->filterkey);
471			audit_log_format(ab, " list=%d res=1", rule->listnr);
472			audit_log_end(ab);
473			rule->tree = NULL;
474			list_del_rcu(&entry->list);
475			list_del(&entry->rule.list);
476			call_rcu(&entry->rcu, audit_free_rule_rcu);
477		}
478	}
479}
480
481/*
482 * finish killing struct audit_tree
483 */
484static void prune_one(struct audit_tree *victim)
485{
486	spin_lock(&hash_lock);
487	while (!list_empty(&victim->chunks)) {
488		struct node *p;
489
490		p = list_entry(victim->chunks.next, struct node, list);
491
492		untag_chunk(p);
493	}
494	spin_unlock(&hash_lock);
495	put_tree(victim);
496}
497
498/* trim the uncommitted chunks from tree */
499
500static void trim_marked(struct audit_tree *tree)
501{
502	struct list_head *p, *q;
503	spin_lock(&hash_lock);
504	if (tree->goner) {
505		spin_unlock(&hash_lock);
506		return;
507	}
508	/* reorder */
509	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
510		struct node *node = list_entry(p, struct node, list);
511		q = p->next;
512		if (node->index & (1U<<31)) {
513			list_del_init(p);
514			list_add(p, &tree->chunks);
515		}
516	}
517
518	while (!list_empty(&tree->chunks)) {
519		struct node *node;
520
521		node = list_entry(tree->chunks.next, struct node, list);
522
523		/* have we run out of marked? */
524		if (!(node->index & (1U<<31)))
525			break;
526
527		untag_chunk(node);
528	}
529	if (!tree->root && !tree->goner) {
530		tree->goner = 1;
531		spin_unlock(&hash_lock);
532		mutex_lock(&audit_filter_mutex);
533		kill_rules(tree);
534		list_del_init(&tree->list);
535		mutex_unlock(&audit_filter_mutex);
536		prune_one(tree);
537	} else {
538		spin_unlock(&hash_lock);
539	}
540}
541
542static void audit_schedule_prune(void);
543
544/* called with audit_filter_mutex */
545int audit_remove_tree_rule(struct audit_krule *rule)
546{
547	struct audit_tree *tree;
548	tree = rule->tree;
549	if (tree) {
550		spin_lock(&hash_lock);
551		list_del_init(&rule->rlist);
552		if (list_empty(&tree->rules) && !tree->goner) {
553			tree->root = NULL;
554			list_del_init(&tree->same_root);
555			tree->goner = 1;
556			list_move(&tree->list, &prune_list);
557			rule->tree = NULL;
558			spin_unlock(&hash_lock);
559			audit_schedule_prune();
560			return 1;
561		}
562		rule->tree = NULL;
563		spin_unlock(&hash_lock);
564		return 1;
565	}
566	return 0;
567}
568
569static int compare_root(struct vfsmount *mnt, void *arg)
570{
571	return mnt->mnt_root->d_inode == arg;
572}
573
574void audit_trim_trees(void)
575{
576	struct list_head cursor;
577
578	mutex_lock(&audit_filter_mutex);
579	list_add(&cursor, &tree_list);
580	while (cursor.next != &tree_list) {
581		struct audit_tree *tree;
582		struct path path;
583		struct vfsmount *root_mnt;
584		struct node *node;
585		int err;
586
587		tree = container_of(cursor.next, struct audit_tree, list);
588		get_tree(tree);
589		list_del(&cursor);
590		list_add(&cursor, &tree->list);
591		mutex_unlock(&audit_filter_mutex);
592
593		err = kern_path(tree->pathname, 0, &path);
594		if (err)
595			goto skip_it;
596
597		root_mnt = collect_mounts(&path);
598		path_put(&path);
599		if (!root_mnt)
600			goto skip_it;
601
602		spin_lock(&hash_lock);
603		list_for_each_entry(node, &tree->chunks, list) {
604			struct audit_chunk *chunk = find_chunk(node);
605			/* this could be NULL if the watch is dieing else where... */
606			struct inode *inode = chunk->mark.i.inode;
607			node->index |= 1U<<31;
608			if (iterate_mounts(compare_root, inode, root_mnt))
609				node->index &= ~(1U<<31);
610		}
611		spin_unlock(&hash_lock);
612		trim_marked(tree);
613		put_tree(tree);
614		drop_collected_mounts(root_mnt);
615skip_it:
616		mutex_lock(&audit_filter_mutex);
617	}
618	list_del(&cursor);
619	mutex_unlock(&audit_filter_mutex);
620}
621
622int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
623{
624
625	if (pathname[0] != '/' ||
626	    rule->listnr != AUDIT_FILTER_EXIT ||
627	    op != Audit_equal ||
628	    rule->inode_f || rule->watch || rule->tree)
629		return -EINVAL;
630	rule->tree = alloc_tree(pathname);
631	if (!rule->tree)
632		return -ENOMEM;
633	return 0;
634}
635
636void audit_put_tree(struct audit_tree *tree)
637{
638	put_tree(tree);
639}
640
641static int tag_mount(struct vfsmount *mnt, void *arg)
642{
643	return tag_chunk(mnt->mnt_root->d_inode, arg);
644}
645
646/* called with audit_filter_mutex */
647int audit_add_tree_rule(struct audit_krule *rule)
648{
649	struct audit_tree *seed = rule->tree, *tree;
650	struct path path;
651	struct vfsmount *mnt;
652	int err;
653
654	list_for_each_entry(tree, &tree_list, list) {
655		if (!strcmp(seed->pathname, tree->pathname)) {
656			put_tree(seed);
657			rule->tree = tree;
658			list_add(&rule->rlist, &tree->rules);
659			return 0;
660		}
661	}
662	tree = seed;
663	list_add(&tree->list, &tree_list);
664	list_add(&rule->rlist, &tree->rules);
665	/* do not set rule->tree yet */
666	mutex_unlock(&audit_filter_mutex);
667
668	err = kern_path(tree->pathname, 0, &path);
669	if (err)
670		goto Err;
671	mnt = collect_mounts(&path);
672	path_put(&path);
673	if (!mnt) {
674		err = -ENOMEM;
675		goto Err;
676	}
677
678	get_tree(tree);
679	err = iterate_mounts(tag_mount, tree, mnt);
680	drop_collected_mounts(mnt);
681
682	if (!err) {
683		struct node *node;
684		spin_lock(&hash_lock);
685		list_for_each_entry(node, &tree->chunks, list)
686			node->index &= ~(1U<<31);
687		spin_unlock(&hash_lock);
688	} else {
689		trim_marked(tree);
690		goto Err;
691	}
692
693	mutex_lock(&audit_filter_mutex);
694	if (list_empty(&rule->rlist)) {
695		put_tree(tree);
696		return -ENOENT;
697	}
698	rule->tree = tree;
699	put_tree(tree);
700
701	return 0;
702Err:
703	mutex_lock(&audit_filter_mutex);
704	list_del_init(&tree->list);
705	list_del_init(&tree->rules);
706	put_tree(tree);
707	return err;
708}
709
710int audit_tag_tree(char *old, char *new)
711{
712	struct list_head cursor, barrier;
713	int failed = 0;
714	struct path path1, path2;
715	struct vfsmount *tagged;
716	int err;
717
718	err = kern_path(new, 0, &path2);
719	if (err)
720		return err;
721	tagged = collect_mounts(&path2);
722	path_put(&path2);
723	if (!tagged)
724		return -ENOMEM;
725
726	err = kern_path(old, 0, &path1);
727	if (err) {
728		drop_collected_mounts(tagged);
729		return err;
730	}
731
732	mutex_lock(&audit_filter_mutex);
733	list_add(&barrier, &tree_list);
734	list_add(&cursor, &barrier);
735
736	while (cursor.next != &tree_list) {
737		struct audit_tree *tree;
738		int good_one = 0;
739
740		tree = container_of(cursor.next, struct audit_tree, list);
741		get_tree(tree);
742		list_del(&cursor);
743		list_add(&cursor, &tree->list);
744		mutex_unlock(&audit_filter_mutex);
745
746		err = kern_path(tree->pathname, 0, &path2);
747		if (!err) {
748			good_one = path_is_under(&path1, &path2);
749			path_put(&path2);
750		}
751
752		if (!good_one) {
753			put_tree(tree);
754			mutex_lock(&audit_filter_mutex);
755			continue;
756		}
757
758		failed = iterate_mounts(tag_mount, tree, tagged);
759		if (failed) {
760			put_tree(tree);
761			mutex_lock(&audit_filter_mutex);
762			break;
763		}
764
765		mutex_lock(&audit_filter_mutex);
766		spin_lock(&hash_lock);
767		if (!tree->goner) {
768			list_del(&tree->list);
769			list_add(&tree->list, &tree_list);
770		}
771		spin_unlock(&hash_lock);
772		put_tree(tree);
773	}
774
775	while (barrier.prev != &tree_list) {
776		struct audit_tree *tree;
777
778		tree = container_of(barrier.prev, struct audit_tree, list);
779		get_tree(tree);
780		list_del(&tree->list);
781		list_add(&tree->list, &barrier);
782		mutex_unlock(&audit_filter_mutex);
783
784		if (!failed) {
785			struct node *node;
786			spin_lock(&hash_lock);
787			list_for_each_entry(node, &tree->chunks, list)
788				node->index &= ~(1U<<31);
789			spin_unlock(&hash_lock);
790		} else {
791			trim_marked(tree);
792		}
793
794		put_tree(tree);
795		mutex_lock(&audit_filter_mutex);
796	}
797	list_del(&barrier);
798	list_del(&cursor);
799	mutex_unlock(&audit_filter_mutex);
800	path_put(&path1);
801	drop_collected_mounts(tagged);
802	return failed;
803}
804
805/*
806 * That gets run when evict_chunk() ends up needing to kill audit_tree.
807 * Runs from a separate thread.
808 */
809static int prune_tree_thread(void *unused)
810{
811	mutex_lock(&audit_cmd_mutex);
812	mutex_lock(&audit_filter_mutex);
813
814	while (!list_empty(&prune_list)) {
815		struct audit_tree *victim;
816
817		victim = list_entry(prune_list.next, struct audit_tree, list);
818		list_del_init(&victim->list);
819
820		mutex_unlock(&audit_filter_mutex);
821
822		prune_one(victim);
823
824		mutex_lock(&audit_filter_mutex);
825	}
826
827	mutex_unlock(&audit_filter_mutex);
828	mutex_unlock(&audit_cmd_mutex);
829	return 0;
830}
831
832static void audit_schedule_prune(void)
833{
834	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
835}
836
837/*
838 * ... and that one is done if evict_chunk() decides to delay until the end
839 * of syscall.  Runs synchronously.
840 */
841void audit_kill_trees(struct list_head *list)
842{
843	mutex_lock(&audit_cmd_mutex);
844	mutex_lock(&audit_filter_mutex);
845
846	while (!list_empty(list)) {
847		struct audit_tree *victim;
848
849		victim = list_entry(list->next, struct audit_tree, list);
850		kill_rules(victim);
851		list_del_init(&victim->list);
852
853		mutex_unlock(&audit_filter_mutex);
854
855		prune_one(victim);
856
857		mutex_lock(&audit_filter_mutex);
858	}
859
860	mutex_unlock(&audit_filter_mutex);
861	mutex_unlock(&audit_cmd_mutex);
862}
863
864/*
865 *  Here comes the stuff asynchronous to auditctl operations
866 */
867
868static void evict_chunk(struct audit_chunk *chunk)
869{
870	struct audit_tree *owner;
871	struct list_head *postponed = audit_killed_trees();
872	int need_prune = 0;
873	int n;
874
875	if (chunk->dead)
876		return;
877
878	chunk->dead = 1;
879	mutex_lock(&audit_filter_mutex);
880	spin_lock(&hash_lock);
881	while (!list_empty(&chunk->trees)) {
882		owner = list_entry(chunk->trees.next,
883				   struct audit_tree, same_root);
884		owner->goner = 1;
885		owner->root = NULL;
886		list_del_init(&owner->same_root);
887		spin_unlock(&hash_lock);
888		if (!postponed) {
889			kill_rules(owner);
890			list_move(&owner->list, &prune_list);
891			need_prune = 1;
892		} else {
893			list_move(&owner->list, postponed);
894		}
895		spin_lock(&hash_lock);
896	}
897	list_del_rcu(&chunk->hash);
898	for (n = 0; n < chunk->count; n++)
899		list_del_init(&chunk->owners[n].list);
900	spin_unlock(&hash_lock);
901	if (need_prune)
902		audit_schedule_prune();
903	mutex_unlock(&audit_filter_mutex);
904}
905
906static int audit_tree_handle_event(struct fsnotify_group *group,
907				   struct fsnotify_mark *inode_mark,
908				   struct fsnotify_mark *vfsmonut_mark,
909				   struct fsnotify_event *event)
910{
911	BUG();
912	return -EOPNOTSUPP;
913}
914
915static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
916{
917	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
918
919	evict_chunk(chunk);
920	fsnotify_put_mark(entry);
921}
922
923static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
924				  struct fsnotify_mark *inode_mark,
925				  struct fsnotify_mark *vfsmount_mark,
926				  __u32 mask, void *data, int data_type)
927{
928	return false;
929}
930
931static const struct fsnotify_ops audit_tree_ops = {
932	.handle_event = audit_tree_handle_event,
933	.should_send_event = audit_tree_send_event,
934	.free_group_priv = NULL,
935	.free_event_priv = NULL,
936	.freeing_mark = audit_tree_freeing_mark,
937};
938
939static int __init audit_tree_init(void)
940{
941	int i;
942
943	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
944	if (IS_ERR(audit_tree_group))
945		audit_panic("cannot initialize fsnotify group for rectree watches");
946
947	for (i = 0; i < HASH_SIZE; i++)
948		INIT_LIST_HEAD(&chunk_hash_heads[i]);
949
950	return 0;
951}
952__initcall(audit_tree_init);
953