1// SPDX-License-Identifier: GPL-2.0-or-later
2/* CacheFiles path walking and related routines
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/fs.h>
9#include <linux/namei.h>
10#include "internal.h"
11
12/*
13 * Mark the backing file as being a cache file if it's not already in use.  The
14 * mark tells the culling request command that it's not allowed to cull the
15 * file or directory.  The caller must hold the inode lock.
16 */
17static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18					   struct inode *inode)
19{
20	bool can_use = false;
21
22	if (!(inode->i_flags & S_KERNEL_FILE)) {
23		inode->i_flags |= S_KERNEL_FILE;
24		trace_cachefiles_mark_active(object, inode);
25		can_use = true;
26	} else {
27		trace_cachefiles_mark_failed(object, inode);
28	}
29
30	return can_use;
31}
32
33static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
34					 struct inode *inode)
35{
36	bool can_use;
37
38	inode_lock(inode);
39	can_use = __cachefiles_mark_inode_in_use(object, inode);
40	inode_unlock(inode);
41	return can_use;
42}
43
44/*
45 * Unmark a backing inode.  The caller must hold the inode lock.
46 */
47static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
48					     struct inode *inode)
49{
50	inode->i_flags &= ~S_KERNEL_FILE;
51	trace_cachefiles_mark_inactive(object, inode);
52}
53
54static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
55					      struct inode *inode)
56{
57	inode_lock(inode);
58	__cachefiles_unmark_inode_in_use(object, inode);
59	inode_unlock(inode);
60}
61
62/*
63 * Unmark a backing inode and tell cachefilesd that there's something that can
64 * be culled.
65 */
66void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
67				    struct file *file)
68{
69	struct cachefiles_cache *cache = object->volume->cache;
70	struct inode *inode = file_inode(file);
71
72	cachefiles_do_unmark_inode_in_use(object, inode);
73
74	if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
75		atomic_long_add(inode->i_blocks, &cache->b_released);
76		if (atomic_inc_return(&cache->f_released))
77			cachefiles_state_changed(cache);
78	}
79}
80
81/*
82 * get a subdirectory
83 */
84struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
85					struct dentry *dir,
86					const char *dirname,
87					bool *_is_new)
88{
89	struct dentry *subdir;
90	struct path path;
91	int ret;
92
93	_enter(",,%s", dirname);
94
95	/* search the current directory for the element name */
96	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
97
98retry:
99	ret = cachefiles_inject_read_error();
100	if (ret == 0)
101		subdir = lookup_one_len(dirname, dir, strlen(dirname));
102	else
103		subdir = ERR_PTR(ret);
104	trace_cachefiles_lookup(NULL, dir, subdir);
105	if (IS_ERR(subdir)) {
106		trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
107					   PTR_ERR(subdir),
108					   cachefiles_trace_lookup_error);
109		if (PTR_ERR(subdir) == -ENOMEM)
110			goto nomem_d_alloc;
111		goto lookup_error;
112	}
113
114	_debug("subdir -> %pd %s",
115	       subdir, d_backing_inode(subdir) ? "positive" : "negative");
116
117	/* we need to create the subdir if it doesn't exist yet */
118	if (d_is_negative(subdir)) {
119		ret = cachefiles_has_space(cache, 1, 0,
120					   cachefiles_has_space_for_create);
121		if (ret < 0)
122			goto mkdir_error;
123
124		_debug("attempt mkdir");
125
126		path.mnt = cache->mnt;
127		path.dentry = dir;
128		ret = security_path_mkdir(&path, subdir, 0700);
129		if (ret < 0)
130			goto mkdir_error;
131		ret = cachefiles_inject_write_error();
132		if (ret == 0)
133			ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
134		if (ret < 0) {
135			trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
136						   cachefiles_trace_mkdir_error);
137			goto mkdir_error;
138		}
139		trace_cachefiles_mkdir(dir, subdir);
140
141		if (unlikely(d_unhashed(subdir))) {
142			cachefiles_put_directory(subdir);
143			goto retry;
144		}
145		ASSERT(d_backing_inode(subdir));
146
147		_debug("mkdir -> %pd{ino=%lu}",
148		       subdir, d_backing_inode(subdir)->i_ino);
149		if (_is_new)
150			*_is_new = true;
151	}
152
153	/* Tell rmdir() it's not allowed to delete the subdir */
154	inode_lock(d_inode(subdir));
155	inode_unlock(d_inode(dir));
156
157	if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
158		pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
159			  subdir, d_inode(subdir)->i_ino);
160		goto mark_error;
161	}
162
163	inode_unlock(d_inode(subdir));
164
165	/* we need to make sure the subdir is a directory */
166	ASSERT(d_backing_inode(subdir));
167
168	if (!d_can_lookup(subdir)) {
169		pr_err("%s is not a directory\n", dirname);
170		ret = -EIO;
171		goto check_error;
172	}
173
174	ret = -EPERM;
175	if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
176	    !d_backing_inode(subdir)->i_op->lookup ||
177	    !d_backing_inode(subdir)->i_op->mkdir ||
178	    !d_backing_inode(subdir)->i_op->rename ||
179	    !d_backing_inode(subdir)->i_op->rmdir ||
180	    !d_backing_inode(subdir)->i_op->unlink)
181		goto check_error;
182
183	_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
184	return subdir;
185
186check_error:
187	cachefiles_put_directory(subdir);
188	_leave(" = %d [check]", ret);
189	return ERR_PTR(ret);
190
191mark_error:
192	inode_unlock(d_inode(subdir));
193	dput(subdir);
194	return ERR_PTR(-EBUSY);
195
196mkdir_error:
197	inode_unlock(d_inode(dir));
198	dput(subdir);
199	pr_err("mkdir %s failed with error %d\n", dirname, ret);
200	return ERR_PTR(ret);
201
202lookup_error:
203	inode_unlock(d_inode(dir));
204	ret = PTR_ERR(subdir);
205	pr_err("Lookup %s failed with error %d\n", dirname, ret);
206	return ERR_PTR(ret);
207
208nomem_d_alloc:
209	inode_unlock(d_inode(dir));
210	_leave(" = -ENOMEM");
211	return ERR_PTR(-ENOMEM);
212}
213
214/*
215 * Put a subdirectory.
216 */
217void cachefiles_put_directory(struct dentry *dir)
218{
219	if (dir) {
220		cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
221		dput(dir);
222	}
223}
224
225/*
226 * Remove a regular file from the cache.
227 */
228static int cachefiles_unlink(struct cachefiles_cache *cache,
229			     struct cachefiles_object *object,
230			     struct dentry *dir, struct dentry *dentry,
231			     enum fscache_why_object_killed why)
232{
233	struct path path = {
234		.mnt	= cache->mnt,
235		.dentry	= dir,
236	};
237	int ret;
238
239	trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
240	ret = security_path_unlink(&path, dentry);
241	if (ret < 0) {
242		cachefiles_io_error(cache, "Unlink security error");
243		return ret;
244	}
245
246	ret = cachefiles_inject_remove_error();
247	if (ret == 0) {
248		ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
249		if (ret == -EIO)
250			cachefiles_io_error(cache, "Unlink failed");
251	}
252	if (ret != 0)
253		trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
254					   cachefiles_trace_unlink_error);
255	return ret;
256}
257
258/*
259 * Delete an object representation from the cache
260 * - File backed objects are unlinked
261 * - Directory backed objects are stuffed into the graveyard for userspace to
262 *   delete
263 */
264int cachefiles_bury_object(struct cachefiles_cache *cache,
265			   struct cachefiles_object *object,
266			   struct dentry *dir,
267			   struct dentry *rep,
268			   enum fscache_why_object_killed why)
269{
270	struct dentry *grave, *trap;
271	struct path path, path_to_graveyard;
272	char nbuffer[8 + 8 + 1];
273	int ret;
274
275	_enter(",'%pd','%pd'", dir, rep);
276
277	if (rep->d_parent != dir) {
278		inode_unlock(d_inode(dir));
279		_leave(" = -ESTALE");
280		return -ESTALE;
281	}
282
283	/* non-directories can just be unlinked */
284	if (!d_is_dir(rep)) {
285		dget(rep); /* Stop the dentry being negated if it's only pinned
286			    * by a file struct.
287			    */
288		ret = cachefiles_unlink(cache, object, dir, rep, why);
289		dput(rep);
290
291		inode_unlock(d_inode(dir));
292		_leave(" = %d", ret);
293		return ret;
294	}
295
296	/* directories have to be moved to the graveyard */
297	_debug("move stale object to graveyard");
298	inode_unlock(d_inode(dir));
299
300try_again:
301	/* first step is to make up a grave dentry in the graveyard */
302	sprintf(nbuffer, "%08x%08x",
303		(uint32_t) ktime_get_real_seconds(),
304		(uint32_t) atomic_inc_return(&cache->gravecounter));
305
306	/* do the multiway lock magic */
307	trap = lock_rename(cache->graveyard, dir);
308	if (IS_ERR(trap))
309		return PTR_ERR(trap);
310
311	/* do some checks before getting the grave dentry */
312	if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
313		/* the entry was probably culled when we dropped the parent dir
314		 * lock */
315		unlock_rename(cache->graveyard, dir);
316		_leave(" = 0 [culled?]");
317		return 0;
318	}
319
320	if (!d_can_lookup(cache->graveyard)) {
321		unlock_rename(cache->graveyard, dir);
322		cachefiles_io_error(cache, "Graveyard no longer a directory");
323		return -EIO;
324	}
325
326	if (trap == rep) {
327		unlock_rename(cache->graveyard, dir);
328		cachefiles_io_error(cache, "May not make directory loop");
329		return -EIO;
330	}
331
332	if (d_mountpoint(rep)) {
333		unlock_rename(cache->graveyard, dir);
334		cachefiles_io_error(cache, "Mountpoint in cache");
335		return -EIO;
336	}
337
338	grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
339	if (IS_ERR(grave)) {
340		unlock_rename(cache->graveyard, dir);
341		trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
342					   PTR_ERR(grave),
343					   cachefiles_trace_lookup_error);
344
345		if (PTR_ERR(grave) == -ENOMEM) {
346			_leave(" = -ENOMEM");
347			return -ENOMEM;
348		}
349
350		cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
351		return -EIO;
352	}
353
354	if (d_is_positive(grave)) {
355		unlock_rename(cache->graveyard, dir);
356		dput(grave);
357		grave = NULL;
358		cond_resched();
359		goto try_again;
360	}
361
362	if (d_mountpoint(grave)) {
363		unlock_rename(cache->graveyard, dir);
364		dput(grave);
365		cachefiles_io_error(cache, "Mountpoint in graveyard");
366		return -EIO;
367	}
368
369	/* target should not be an ancestor of source */
370	if (trap == grave) {
371		unlock_rename(cache->graveyard, dir);
372		dput(grave);
373		cachefiles_io_error(cache, "May not make directory loop");
374		return -EIO;
375	}
376
377	/* attempt the rename */
378	path.mnt = cache->mnt;
379	path.dentry = dir;
380	path_to_graveyard.mnt = cache->mnt;
381	path_to_graveyard.dentry = cache->graveyard;
382	ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
383	if (ret < 0) {
384		cachefiles_io_error(cache, "Rename security error %d", ret);
385	} else {
386		struct renamedata rd = {
387			.old_mnt_idmap	= &nop_mnt_idmap,
388			.old_dir	= d_inode(dir),
389			.old_dentry	= rep,
390			.new_mnt_idmap	= &nop_mnt_idmap,
391			.new_dir	= d_inode(cache->graveyard),
392			.new_dentry	= grave,
393		};
394		trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
395		ret = cachefiles_inject_read_error();
396		if (ret == 0)
397			ret = vfs_rename(&rd);
398		if (ret != 0)
399			trace_cachefiles_vfs_error(object, d_inode(dir), ret,
400						   cachefiles_trace_rename_error);
401		if (ret != 0 && ret != -ENOMEM)
402			cachefiles_io_error(cache,
403					    "Rename failed with error %d", ret);
404	}
405
406	__cachefiles_unmark_inode_in_use(object, d_inode(rep));
407	unlock_rename(cache->graveyard, dir);
408	dput(grave);
409	_leave(" = 0");
410	return 0;
411}
412
413/*
414 * Delete a cache file.
415 */
416int cachefiles_delete_object(struct cachefiles_object *object,
417			     enum fscache_why_object_killed why)
418{
419	struct cachefiles_volume *volume = object->volume;
420	struct dentry *dentry = object->file->f_path.dentry;
421	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
422	int ret;
423
424	_enter(",OBJ%x{%pD}", object->debug_id, object->file);
425
426	/* Stop the dentry being negated if it's only pinned by a file struct. */
427	dget(dentry);
428
429	inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
430	ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
431	inode_unlock(d_backing_inode(fan));
432	dput(dentry);
433	return ret;
434}
435
436/*
437 * Create a temporary file and leave it unattached and un-xattr'd until the
438 * time comes to discard the object from memory.
439 */
440struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
441{
442	struct cachefiles_volume *volume = object->volume;
443	struct cachefiles_cache *cache = volume->cache;
444	const struct cred *saved_cred;
445	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
446	struct file *file;
447	const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
448	uint64_t ni_size;
449	long ret;
450
451
452	cachefiles_begin_secure(cache, &saved_cred);
453
454	ret = cachefiles_inject_write_error();
455	if (ret == 0) {
456		file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
457					   S_IFREG | 0600,
458					   O_RDWR | O_LARGEFILE | O_DIRECT,
459					   cache->cache_cred);
460		ret = PTR_ERR_OR_ZERO(file);
461	}
462	if (ret) {
463		trace_cachefiles_vfs_error(object, d_inode(fan), ret,
464					   cachefiles_trace_tmpfile_error);
465		if (ret == -EIO)
466			cachefiles_io_error_obj(object, "Failed to create tmpfile");
467		goto err;
468	}
469
470	trace_cachefiles_tmpfile(object, file_inode(file));
471
472	/* This is a newly created file with no other possible user */
473	if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
474		WARN_ON(1);
475
476	ret = cachefiles_ondemand_init_object(object);
477	if (ret < 0)
478		goto err_unuse;
479
480	ni_size = object->cookie->object_size;
481	ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
482
483	if (ni_size > 0) {
484		trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
485				       cachefiles_trunc_expand_tmpfile);
486		ret = cachefiles_inject_write_error();
487		if (ret == 0)
488			ret = vfs_truncate(&file->f_path, ni_size);
489		if (ret < 0) {
490			trace_cachefiles_vfs_error(
491				object, file_inode(file), ret,
492				cachefiles_trace_trunc_error);
493			goto err_unuse;
494		}
495	}
496
497	ret = -EINVAL;
498	if (unlikely(!file->f_op->read_iter) ||
499	    unlikely(!file->f_op->write_iter)) {
500		fput(file);
501		pr_notice("Cache does not support read_iter and write_iter\n");
502		goto err_unuse;
503	}
504out:
505	cachefiles_end_secure(cache, saved_cred);
506	return file;
507
508err_unuse:
509	cachefiles_do_unmark_inode_in_use(object, file_inode(file));
510	fput(file);
511err:
512	file = ERR_PTR(ret);
513	goto out;
514}
515
516/*
517 * Create a new file.
518 */
519static bool cachefiles_create_file(struct cachefiles_object *object)
520{
521	struct file *file;
522	int ret;
523
524	ret = cachefiles_has_space(object->volume->cache, 1, 0,
525				   cachefiles_has_space_for_create);
526	if (ret < 0)
527		return false;
528
529	file = cachefiles_create_tmpfile(object);
530	if (IS_ERR(file))
531		return false;
532
533	set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
534	set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
535	_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
536	object->file = file;
537	return true;
538}
539
540/*
541 * Open an existing file, checking its attributes and replacing it if it is
542 * stale.
543 */
544static bool cachefiles_open_file(struct cachefiles_object *object,
545				 struct dentry *dentry)
546{
547	struct cachefiles_cache *cache = object->volume->cache;
548	struct file *file;
549	struct path path;
550	int ret;
551
552	_enter("%pd", dentry);
553
554	if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
555		pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
556			  dentry, d_inode(dentry)->i_ino);
557		return false;
558	}
559
560	/* We need to open a file interface onto a data file now as we can't do
561	 * it on demand because writeback called from do_exit() sees
562	 * current->fs == NULL - which breaks d_path() called from ext4 open.
563	 */
564	path.mnt = cache->mnt;
565	path.dentry = dentry;
566	file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
567				d_backing_inode(dentry), cache->cache_cred);
568	if (IS_ERR(file)) {
569		trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
570					   PTR_ERR(file),
571					   cachefiles_trace_open_error);
572		goto error;
573	}
574
575	if (unlikely(!file->f_op->read_iter) ||
576	    unlikely(!file->f_op->write_iter)) {
577		pr_notice("Cache does not support read_iter and write_iter\n");
578		goto error_fput;
579	}
580	_debug("file -> %pd positive", dentry);
581
582	ret = cachefiles_ondemand_init_object(object);
583	if (ret < 0)
584		goto error_fput;
585
586	ret = cachefiles_check_auxdata(object, file);
587	if (ret < 0)
588		goto check_failed;
589
590	clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
591
592	object->file = file;
593
594	/* Always update the atime on an object we've just looked up (this is
595	 * used to keep track of culling, and atimes are only updated by read,
596	 * write and readdir but not lookup or open).
597	 */
598	touch_atime(&file->f_path);
599	dput(dentry);
600	return true;
601
602check_failed:
603	fscache_cookie_lookup_negative(object->cookie);
604	cachefiles_unmark_inode_in_use(object, file);
605	fput(file);
606	dput(dentry);
607	if (ret == -ESTALE)
608		return cachefiles_create_file(object);
609	return false;
610
611error_fput:
612	fput(file);
613error:
614	cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
615	dput(dentry);
616	return false;
617}
618
619/*
620 * walk from the parent object to the child object through the backing
621 * filesystem, creating directories as we go
622 */
623bool cachefiles_look_up_object(struct cachefiles_object *object)
624{
625	struct cachefiles_volume *volume = object->volume;
626	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
627	int ret;
628
629	_enter("OBJ%x,%s,", object->debug_id, object->d_name);
630
631	/* Look up path "cache/vol/fanout/file". */
632	ret = cachefiles_inject_read_error();
633	if (ret == 0)
634		dentry = lookup_positive_unlocked(object->d_name, fan,
635						  object->d_name_len);
636	else
637		dentry = ERR_PTR(ret);
638	trace_cachefiles_lookup(object, fan, dentry);
639	if (IS_ERR(dentry)) {
640		if (dentry == ERR_PTR(-ENOENT))
641			goto new_file;
642		if (dentry == ERR_PTR(-EIO))
643			cachefiles_io_error_obj(object, "Lookup failed");
644		return false;
645	}
646
647	if (!d_is_reg(dentry)) {
648		pr_err("%pd is not a file\n", dentry);
649		inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
650		ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
651					     FSCACHE_OBJECT_IS_WEIRD);
652		dput(dentry);
653		if (ret < 0)
654			return false;
655		goto new_file;
656	}
657
658	if (!cachefiles_open_file(object, dentry))
659		return false;
660
661	_leave(" = t [%lu]", file_inode(object->file)->i_ino);
662	return true;
663
664new_file:
665	fscache_cookie_lookup_negative(object->cookie);
666	return cachefiles_create_file(object);
667}
668
669/*
670 * Attempt to link a temporary file into its rightful place in the cache.
671 */
672bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
673			       struct cachefiles_object *object)
674{
675	struct cachefiles_volume *volume = object->volume;
676	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
677	bool success = false;
678	int ret;
679
680	_enter(",%pD", object->file);
681
682	inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
683	ret = cachefiles_inject_read_error();
684	if (ret == 0)
685		dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
686	else
687		dentry = ERR_PTR(ret);
688	if (IS_ERR(dentry)) {
689		trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
690					   cachefiles_trace_lookup_error);
691		_debug("lookup fail %ld", PTR_ERR(dentry));
692		goto out_unlock;
693	}
694
695	if (!d_is_negative(dentry)) {
696		if (d_backing_inode(dentry) == file_inode(object->file)) {
697			success = true;
698			goto out_dput;
699		}
700
701		ret = cachefiles_unlink(volume->cache, object, fan, dentry,
702					FSCACHE_OBJECT_IS_STALE);
703		if (ret < 0)
704			goto out_dput;
705
706		dput(dentry);
707		ret = cachefiles_inject_read_error();
708		if (ret == 0)
709			dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
710		else
711			dentry = ERR_PTR(ret);
712		if (IS_ERR(dentry)) {
713			trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
714						   cachefiles_trace_lookup_error);
715			_debug("lookup fail %ld", PTR_ERR(dentry));
716			goto out_unlock;
717		}
718	}
719
720	ret = cachefiles_inject_read_error();
721	if (ret == 0)
722		ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
723			       d_inode(fan), dentry, NULL);
724	if (ret < 0) {
725		trace_cachefiles_vfs_error(object, d_inode(fan), ret,
726					   cachefiles_trace_link_error);
727		_debug("link fail %d", ret);
728	} else {
729		trace_cachefiles_link(object, file_inode(object->file));
730		spin_lock(&object->lock);
731		/* TODO: Do we want to switch the file pointer to the new dentry? */
732		clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
733		spin_unlock(&object->lock);
734		success = true;
735	}
736
737out_dput:
738	dput(dentry);
739out_unlock:
740	inode_unlock(d_inode(fan));
741	_leave(" = %u", success);
742	return success;
743}
744
745/*
746 * Look up an inode to be checked or culled.  Return -EBUSY if the inode is
747 * marked in use.
748 */
749static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
750						 struct dentry *dir,
751						 char *filename)
752{
753	struct dentry *victim;
754	int ret = -ENOENT;
755
756	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
757
758	victim = lookup_one_len(filename, dir, strlen(filename));
759	if (IS_ERR(victim))
760		goto lookup_error;
761	if (d_is_negative(victim))
762		goto lookup_put;
763	if (d_inode(victim)->i_flags & S_KERNEL_FILE)
764		goto lookup_busy;
765	return victim;
766
767lookup_busy:
768	ret = -EBUSY;
769lookup_put:
770	inode_unlock(d_inode(dir));
771	dput(victim);
772	return ERR_PTR(ret);
773
774lookup_error:
775	inode_unlock(d_inode(dir));
776	ret = PTR_ERR(victim);
777	if (ret == -ENOENT)
778		return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
779
780	if (ret == -EIO) {
781		cachefiles_io_error(cache, "Lookup failed");
782	} else if (ret != -ENOMEM) {
783		pr_err("Internal error: %d\n", ret);
784		ret = -EIO;
785	}
786
787	return ERR_PTR(ret);
788}
789
790/*
791 * Cull an object if it's not in use
792 * - called only by cache manager daemon
793 */
794int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
795		    char *filename)
796{
797	struct dentry *victim;
798	struct inode *inode;
799	int ret;
800
801	_enter(",%pd/,%s", dir, filename);
802
803	victim = cachefiles_lookup_for_cull(cache, dir, filename);
804	if (IS_ERR(victim))
805		return PTR_ERR(victim);
806
807	/* check to see if someone is using this object */
808	inode = d_inode(victim);
809	inode_lock(inode);
810	if (inode->i_flags & S_KERNEL_FILE) {
811		ret = -EBUSY;
812	} else {
813		/* Stop the cache from picking it back up */
814		inode->i_flags |= S_KERNEL_FILE;
815		ret = 0;
816	}
817	inode_unlock(inode);
818	if (ret < 0)
819		goto error_unlock;
820
821	ret = cachefiles_bury_object(cache, NULL, dir, victim,
822				     FSCACHE_OBJECT_WAS_CULLED);
823	if (ret < 0)
824		goto error;
825
826	fscache_count_culled();
827	dput(victim);
828	_leave(" = 0");
829	return 0;
830
831error_unlock:
832	inode_unlock(d_inode(dir));
833error:
834	dput(victim);
835	if (ret == -ENOENT)
836		return -ESTALE; /* Probably got retired by the netfs */
837
838	if (ret != -ENOMEM) {
839		pr_err("Internal error: %d\n", ret);
840		ret = -EIO;
841	}
842
843	_leave(" = %d", ret);
844	return ret;
845}
846
847/*
848 * Find out if an object is in use or not
849 * - called only by cache manager daemon
850 * - returns -EBUSY or 0 to indicate whether an object is in use or not
851 */
852int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
853			    char *filename)
854{
855	struct dentry *victim;
856	int ret = 0;
857
858	victim = cachefiles_lookup_for_cull(cache, dir, filename);
859	if (IS_ERR(victim))
860		return PTR_ERR(victim);
861
862	inode_unlock(d_inode(dir));
863	dput(victim);
864	return ret;
865}
866