1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2011 Novell Inc.
5 */
6
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fileattr.h>
12#include <linux/splice.h>
13#include <linux/xattr.h>
14#include <linux/security.h>
15#include <linux/uaccess.h>
16#include <linux/sched/signal.h>
17#include <linux/cred.h>
18#include <linux/namei.h>
19#include <linux/fdtable.h>
20#include <linux/ratelimit.h>
21#include <linux/exportfs.h>
22#include "overlayfs.h"
23
24#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
25
26static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
27{
28	pr_warn("\"check_copy_up\" module option is obsolete\n");
29	return 0;
30}
31
32static int ovl_ccup_get(char *buf, const struct kernel_param *param)
33{
34	return sprintf(buf, "N\n");
35}
36
37module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644);
38MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing");
39
40static bool ovl_must_copy_xattr(const char *name)
41{
42	return !strcmp(name, XATTR_POSIX_ACL_ACCESS) ||
43	       !strcmp(name, XATTR_POSIX_ACL_DEFAULT) ||
44	       !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
45}
46
47static int ovl_copy_acl(struct ovl_fs *ofs, const struct path *path,
48			struct dentry *dentry, const char *acl_name)
49{
50	int err;
51	struct posix_acl *clone, *real_acl = NULL;
52
53	real_acl = ovl_get_acl_path(path, acl_name, false);
54	if (!real_acl)
55		return 0;
56
57	if (IS_ERR(real_acl)) {
58		err = PTR_ERR(real_acl);
59		if (err == -ENODATA || err == -EOPNOTSUPP)
60			return 0;
61		return err;
62	}
63
64	clone = posix_acl_clone(real_acl, GFP_KERNEL);
65	posix_acl_release(real_acl); /* release original acl */
66	if (!clone)
67		return -ENOMEM;
68
69	err = ovl_do_set_acl(ofs, dentry, acl_name, clone);
70
71	/* release cloned acl */
72	posix_acl_release(clone);
73	return err;
74}
75
76int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct dentry *new)
77{
78	struct dentry *old = oldpath->dentry;
79	ssize_t list_size, size, value_size = 0;
80	char *buf, *name, *value = NULL;
81	int error = 0;
82	size_t slen;
83
84	if (!old->d_inode->i_op->listxattr || !new->d_inode->i_op->listxattr)
85		return 0;
86
87	list_size = vfs_listxattr(old, NULL, 0);
88	if (list_size <= 0) {
89		if (list_size == -EOPNOTSUPP)
90			return 0;
91		return list_size;
92	}
93
94	buf = kvzalloc(list_size, GFP_KERNEL);
95	if (!buf)
96		return -ENOMEM;
97
98	list_size = vfs_listxattr(old, buf, list_size);
99	if (list_size <= 0) {
100		error = list_size;
101		goto out;
102	}
103
104	for (name = buf; list_size; name += slen) {
105		slen = strnlen(name, list_size) + 1;
106
107		/* underlying fs providing us with an broken xattr list? */
108		if (WARN_ON(slen > list_size)) {
109			error = -EIO;
110			break;
111		}
112		list_size -= slen;
113
114		if (ovl_is_private_xattr(sb, name))
115			continue;
116
117		error = security_inode_copy_up_xattr(name);
118		if (error < 0 && error != -EOPNOTSUPP)
119			break;
120		if (error == 1) {
121			error = 0;
122			continue; /* Discard */
123		}
124
125		if (is_posix_acl_xattr(name)) {
126			error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name);
127			if (!error)
128				continue;
129			/* POSIX ACLs must be copied. */
130			break;
131		}
132
133retry:
134		size = ovl_do_getxattr(oldpath, name, value, value_size);
135		if (size == -ERANGE)
136			size = ovl_do_getxattr(oldpath, name, NULL, 0);
137
138		if (size < 0) {
139			error = size;
140			break;
141		}
142
143		if (size > value_size) {
144			void *new;
145
146			new = kvmalloc(size, GFP_KERNEL);
147			if (!new) {
148				error = -ENOMEM;
149				break;
150			}
151			kvfree(value);
152			value = new;
153			value_size = size;
154			goto retry;
155		}
156
157		error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0);
158		if (error) {
159			if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
160				break;
161
162			/* Ignore failure to copy unknown xattrs */
163			error = 0;
164		}
165	}
166	kvfree(value);
167out:
168	kvfree(buf);
169	return error;
170}
171
172static int ovl_copy_fileattr(struct inode *inode, const struct path *old,
173			     const struct path *new)
174{
175	struct fileattr oldfa = { .flags_valid = true };
176	struct fileattr newfa = { .flags_valid = true };
177	int err;
178
179	err = ovl_real_fileattr_get(old, &oldfa);
180	if (err) {
181		/* Ntfs-3g returns -EINVAL for "no fileattr support" */
182		if (err == -ENOTTY || err == -EINVAL)
183			return 0;
184		pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
185			old->dentry, err);
186		return err;
187	}
188
189	/*
190	 * We cannot set immutable and append-only flags on upper inode,
191	 * because we would not be able to link upper inode to upper dir
192	 * not set overlay private xattr on upper inode.
193	 * Store these flags in overlay.protattr xattr instead.
194	 */
195	if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
196		err = ovl_set_protattr(inode, new->dentry, &oldfa);
197		if (err == -EPERM)
198			pr_warn_once("copying fileattr: no xattr on upper\n");
199		else if (err)
200			return err;
201	}
202
203	/* Don't bother copying flags if none are set */
204	if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK))
205		return 0;
206
207	err = ovl_real_fileattr_get(new, &newfa);
208	if (err) {
209		/*
210		 * Returning an error if upper doesn't support fileattr will
211		 * result in a regression, so revert to the old behavior.
212		 */
213		if (err == -ENOTTY || err == -EINVAL) {
214			pr_warn_once("copying fileattr: no support on upper\n");
215			return 0;
216		}
217		pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
218			new->dentry, err);
219		return err;
220	}
221
222	BUILD_BUG_ON(OVL_COPY_FS_FLAGS_MASK & ~FS_COMMON_FL);
223	newfa.flags &= ~OVL_COPY_FS_FLAGS_MASK;
224	newfa.flags |= (oldfa.flags & OVL_COPY_FS_FLAGS_MASK);
225
226	BUILD_BUG_ON(OVL_COPY_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON);
227	newfa.fsx_xflags &= ~OVL_COPY_FSX_FLAGS_MASK;
228	newfa.fsx_xflags |= (oldfa.fsx_xflags & OVL_COPY_FSX_FLAGS_MASK);
229
230	return ovl_real_fileattr_set(new, &newfa);
231}
232
233static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen)
234{
235	loff_t tmp;
236
237	if (pos != pos2)
238		return -EIO;
239	if (pos < 0 || len < 0 || totlen < 0)
240		return -EIO;
241	if (check_add_overflow(pos, len, &tmp))
242		return -EIO;
243	return 0;
244}
245
246static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
247			    struct file *new_file, loff_t len)
248{
249	struct path datapath;
250	struct file *old_file;
251	loff_t old_pos = 0;
252	loff_t new_pos = 0;
253	loff_t cloned;
254	loff_t data_pos = -1;
255	loff_t hole_len;
256	bool skip_hole = false;
257	int error = 0;
258
259	ovl_path_lowerdata(dentry, &datapath);
260	if (WARN_ON_ONCE(datapath.dentry == NULL) ||
261	    WARN_ON_ONCE(len < 0))
262		return -EIO;
263
264	old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY);
265	if (IS_ERR(old_file))
266		return PTR_ERR(old_file);
267
268	/* Try to use clone_file_range to clone up within the same fs */
269	cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0);
270	if (cloned == len)
271		goto out_fput;
272
273	/* Couldn't clone, so now we try to copy the data */
274	error = rw_verify_area(READ, old_file, &old_pos, len);
275	if (!error)
276		error = rw_verify_area(WRITE, new_file, &new_pos, len);
277	if (error)
278		goto out_fput;
279
280	/* Check if lower fs supports seek operation */
281	if (old_file->f_mode & FMODE_LSEEK)
282		skip_hole = true;
283
284	while (len) {
285		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
286		ssize_t bytes;
287
288		if (len < this_len)
289			this_len = len;
290
291		if (signal_pending_state(TASK_KILLABLE, current)) {
292			error = -EINTR;
293			break;
294		}
295
296		/*
297		 * Fill zero for hole will cost unnecessary disk space
298		 * and meanwhile slow down the copy-up speed, so we do
299		 * an optimization for hole during copy-up, it relies
300		 * on SEEK_DATA implementation in lower fs so if lower
301		 * fs does not support it, copy-up will behave as before.
302		 *
303		 * Detail logic of hole detection as below:
304		 * When we detect next data position is larger than current
305		 * position we will skip that hole, otherwise we copy
306		 * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
307		 * it may not recognize all kind of holes and sometimes
308		 * only skips partial of hole area. However, it will be
309		 * enough for most of the use cases.
310		 *
311		 * We do not hold upper sb_writers throughout the loop to avert
312		 * lockdep warning with llseek of lower file in nested overlay:
313		 * - upper sb_writers
314		 * -- lower ovl_inode_lock (ovl_llseek)
315		 */
316		if (skip_hole && data_pos < old_pos) {
317			data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
318			if (data_pos > old_pos) {
319				hole_len = data_pos - old_pos;
320				len -= hole_len;
321				old_pos = new_pos = data_pos;
322				continue;
323			} else if (data_pos == -ENXIO) {
324				break;
325			} else if (data_pos < 0) {
326				skip_hole = false;
327			}
328		}
329
330		error = ovl_verify_area(old_pos, new_pos, this_len, len);
331		if (error)
332			break;
333
334		bytes = do_splice_direct(old_file, &old_pos,
335					 new_file, &new_pos,
336					 this_len, SPLICE_F_MOVE);
337		if (bytes <= 0) {
338			error = bytes;
339			break;
340		}
341		WARN_ON(old_pos != new_pos);
342
343		len -= bytes;
344	}
345	if (!error && ovl_should_sync(ofs))
346		error = vfs_fsync(new_file, 0);
347out_fput:
348	fput(old_file);
349	return error;
350}
351
352static int ovl_set_size(struct ovl_fs *ofs,
353			struct dentry *upperdentry, struct kstat *stat)
354{
355	struct iattr attr = {
356		.ia_valid = ATTR_SIZE,
357		.ia_size = stat->size,
358	};
359
360	return ovl_do_notify_change(ofs, upperdentry, &attr);
361}
362
363static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
364			      struct kstat *stat)
365{
366	struct iattr attr = {
367		.ia_valid =
368		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
369		.ia_atime = stat->atime,
370		.ia_mtime = stat->mtime,
371	};
372
373	return ovl_do_notify_change(ofs, upperdentry, &attr);
374}
375
376int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
377		 struct kstat *stat)
378{
379	int err = 0;
380
381	if (!S_ISLNK(stat->mode)) {
382		struct iattr attr = {
383			.ia_valid = ATTR_MODE,
384			.ia_mode = stat->mode,
385		};
386		err = ovl_do_notify_change(ofs, upperdentry, &attr);
387	}
388	if (!err) {
389		struct iattr attr = {
390			.ia_valid = ATTR_UID | ATTR_GID,
391			.ia_vfsuid = VFSUIDT_INIT(stat->uid),
392			.ia_vfsgid = VFSGIDT_INIT(stat->gid),
393		};
394		err = ovl_do_notify_change(ofs, upperdentry, &attr);
395	}
396	if (!err)
397		ovl_set_timestamps(ofs, upperdentry, stat);
398
399	return err;
400}
401
402struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
403				  bool is_upper)
404{
405	struct ovl_fh *fh;
406	int fh_type, dwords;
407	int buflen = MAX_HANDLE_SZ;
408	uuid_t *uuid = &real->d_sb->s_uuid;
409	int err;
410
411	/* Make sure the real fid stays 32bit aligned */
412	BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
413	BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
414
415	fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL);
416	if (!fh)
417		return ERR_PTR(-ENOMEM);
418
419	/*
420	 * We encode a non-connectable file handle for non-dir, because we
421	 * only need to find the lower inode number and we don't want to pay
422	 * the price or reconnecting the dentry.
423	 */
424	dwords = buflen >> 2;
425	fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
426	buflen = (dwords << 2);
427
428	err = -EIO;
429	if (WARN_ON(fh_type < 0) ||
430	    WARN_ON(buflen > MAX_HANDLE_SZ) ||
431	    WARN_ON(fh_type == FILEID_INVALID))
432		goto out_err;
433
434	fh->fb.version = OVL_FH_VERSION;
435	fh->fb.magic = OVL_FH_MAGIC;
436	fh->fb.type = fh_type;
437	fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN;
438	/*
439	 * When we will want to decode an overlay dentry from this handle
440	 * and all layers are on the same fs, if we get a disconncted real
441	 * dentry when we decode fid, the only way to tell if we should assign
442	 * it to upperdentry or to lowerstack is by checking this flag.
443	 */
444	if (is_upper)
445		fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
446	fh->fb.len = sizeof(fh->fb) + buflen;
447	if (ovl_origin_uuid(ofs))
448		fh->fb.uuid = *uuid;
449
450	return fh;
451
452out_err:
453	kfree(fh);
454	return ERR_PTR(err);
455}
456
457struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
458{
459	/*
460	 * When lower layer doesn't support export operations store a 'null' fh,
461	 * so we can use the overlay.origin xattr to distignuish between a copy
462	 * up and a pure upper inode.
463	 */
464	if (!ovl_can_decode_fh(origin->d_sb))
465		return NULL;
466
467	return ovl_encode_real_fh(ofs, origin, false);
468}
469
470int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
471		      struct dentry *upper)
472{
473	int err;
474
475	/*
476	 * Do not fail when upper doesn't support xattrs.
477	 */
478	err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf,
479				 fh ? fh->fb.len : 0, 0);
480
481	/* Ignore -EPERM from setting "user.*" on symlink/special */
482	return err == -EPERM ? 0 : err;
483}
484
485/* Store file handle of @upper dir in @index dir entry */
486static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
487			    struct dentry *index)
488{
489	const struct ovl_fh *fh;
490	int err;
491
492	fh = ovl_encode_real_fh(ofs, upper, true);
493	if (IS_ERR(fh))
494		return PTR_ERR(fh);
495
496	err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
497
498	kfree(fh);
499	return err;
500}
501
502/*
503 * Create and install index entry.
504 *
505 * Caller must hold i_mutex on indexdir.
506 */
507static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh,
508			    struct dentry *upper)
509{
510	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
511	struct dentry *indexdir = ovl_indexdir(dentry->d_sb);
512	struct inode *dir = d_inode(indexdir);
513	struct dentry *index = NULL;
514	struct dentry *temp = NULL;
515	struct qstr name = { };
516	int err;
517
518	/*
519	 * For now this is only used for creating index entry for directories,
520	 * because non-dir are copied up directly to index and then hardlinked
521	 * to upper dir.
522	 *
523	 * TODO: implement create index for non-dir, so we can call it when
524	 * encoding file handle for non-dir in case index does not exist.
525	 */
526	if (WARN_ON(!d_is_dir(dentry)))
527		return -EIO;
528
529	/* Directory not expected to be indexed before copy up */
530	if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry))))
531		return -EIO;
532
533	err = ovl_get_index_name_fh(fh, &name);
534	if (err)
535		return err;
536
537	temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0));
538	err = PTR_ERR(temp);
539	if (IS_ERR(temp))
540		goto free_name;
541
542	err = ovl_set_upper_fh(ofs, upper, temp);
543	if (err)
544		goto out;
545
546	index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
547	if (IS_ERR(index)) {
548		err = PTR_ERR(index);
549	} else {
550		err = ovl_do_rename(ofs, dir, temp, dir, index, 0);
551		dput(index);
552	}
553out:
554	if (err)
555		ovl_cleanup(ofs, dir, temp);
556	dput(temp);
557free_name:
558	kfree(name.name);
559	return err;
560}
561
562struct ovl_copy_up_ctx {
563	struct dentry *parent;
564	struct dentry *dentry;
565	struct path lowerpath;
566	struct kstat stat;
567	struct kstat pstat;
568	const char *link;
569	struct dentry *destdir;
570	struct qstr destname;
571	struct dentry *workdir;
572	const struct ovl_fh *origin_fh;
573	bool origin;
574	bool indexed;
575	bool metacopy;
576	bool metacopy_digest;
577};
578
579static int ovl_link_up(struct ovl_copy_up_ctx *c)
580{
581	int err;
582	struct dentry *upper;
583	struct dentry *upperdir = ovl_dentry_upper(c->parent);
584	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
585	struct inode *udir = d_inode(upperdir);
586
587	ovl_start_write(c->dentry);
588
589	/* Mark parent "impure" because it may now contain non-pure upper */
590	err = ovl_set_impure(c->parent, upperdir);
591	if (err)
592		goto out;
593
594	err = ovl_set_nlink_lower(c->dentry);
595	if (err)
596		goto out;
597
598	inode_lock_nested(udir, I_MUTEX_PARENT);
599	upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
600				 c->dentry->d_name.len);
601	err = PTR_ERR(upper);
602	if (!IS_ERR(upper)) {
603		err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
604		dput(upper);
605
606		if (!err) {
607			/* Restore timestamps on parent (best effort) */
608			ovl_set_timestamps(ofs, upperdir, &c->pstat);
609			ovl_dentry_set_upper_alias(c->dentry);
610			ovl_dentry_update_reval(c->dentry, upper);
611		}
612	}
613	inode_unlock(udir);
614	if (err)
615		goto out;
616
617	err = ovl_set_nlink_upper(c->dentry);
618
619out:
620	ovl_end_write(c->dentry);
621	return err;
622}
623
624static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp)
625{
626	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
627	struct file *new_file;
628	int err;
629
630	if (!S_ISREG(c->stat.mode) || c->metacopy || !c->stat.size)
631		return 0;
632
633	new_file = ovl_path_open(temp, O_LARGEFILE | O_WRONLY);
634	if (IS_ERR(new_file))
635		return PTR_ERR(new_file);
636
637	err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size);
638	fput(new_file);
639
640	return err;
641}
642
643static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
644{
645	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
646	struct inode *inode = d_inode(c->dentry);
647	struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = temp };
648	int err;
649
650	err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp);
651	if (err)
652		return err;
653
654	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
655	    (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
656		/*
657		 * Copy the fileattr inode flags that are the source of already
658		 * copied i_flags
659		 */
660		err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath);
661		if (err)
662			return err;
663	}
664
665	/*
666	 * Store identifier of lower inode in upper inode xattr to
667	 * allow lookup of the copy up origin inode.
668	 *
669	 * Don't set origin when we are breaking the association with a lower
670	 * hard link.
671	 */
672	if (c->origin) {
673		err = ovl_set_origin_fh(ofs, c->origin_fh, temp);
674		if (err)
675			return err;
676	}
677
678	if (c->metacopy) {
679		struct path lowerdatapath;
680		struct ovl_metacopy metacopy_data = OVL_METACOPY_INIT;
681
682		ovl_path_lowerdata(c->dentry, &lowerdatapath);
683		if (WARN_ON_ONCE(lowerdatapath.dentry == NULL))
684			return -EIO;
685		err = ovl_get_verity_digest(ofs, &lowerdatapath, &metacopy_data);
686		if (err)
687			return err;
688
689		if (metacopy_data.digest_algo)
690			c->metacopy_digest = true;
691
692		err = ovl_set_metacopy_xattr(ofs, temp, &metacopy_data);
693		if (err)
694			return err;
695	}
696
697	inode_lock(temp->d_inode);
698	if (S_ISREG(c->stat.mode))
699		err = ovl_set_size(ofs, temp, &c->stat);
700	if (!err)
701		err = ovl_set_attr(ofs, temp, &c->stat);
702	inode_unlock(temp->d_inode);
703
704	return err;
705}
706
707struct ovl_cu_creds {
708	const struct cred *old;
709	struct cred *new;
710};
711
712static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
713{
714	int err;
715
716	cc->old = cc->new = NULL;
717	err = security_inode_copy_up(dentry, &cc->new);
718	if (err < 0)
719		return err;
720
721	if (cc->new)
722		cc->old = override_creds(cc->new);
723
724	return 0;
725}
726
727static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
728{
729	if (cc->new) {
730		revert_creds(cc->old);
731		put_cred(cc->new);
732	}
733}
734
735/*
736 * Copyup using workdir to prepare temp file.  Used when copying up directories,
737 * special files or when upper fs doesn't support O_TMPFILE.
738 */
739static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
740{
741	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
742	struct inode *inode;
743	struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
744	struct path path = { .mnt = ovl_upper_mnt(ofs) };
745	struct dentry *temp, *upper, *trap;
746	struct ovl_cu_creds cc;
747	int err;
748	struct ovl_cattr cattr = {
749		/* Can't properly set mode on creation because of the umask */
750		.mode = c->stat.mode & S_IFMT,
751		.rdev = c->stat.rdev,
752		.link = c->link
753	};
754
755	err = ovl_prep_cu_creds(c->dentry, &cc);
756	if (err)
757		return err;
758
759	ovl_start_write(c->dentry);
760	inode_lock(wdir);
761	temp = ovl_create_temp(ofs, c->workdir, &cattr);
762	inode_unlock(wdir);
763	ovl_end_write(c->dentry);
764	ovl_revert_cu_creds(&cc);
765
766	if (IS_ERR(temp))
767		return PTR_ERR(temp);
768
769	/*
770	 * Copy up data first and then xattrs. Writing data after
771	 * xattrs will remove security.capability xattr automatically.
772	 */
773	path.dentry = temp;
774	err = ovl_copy_up_data(c, &path);
775	/*
776	 * We cannot hold lock_rename() throughout this helper, because of
777	 * lock ordering with sb_writers, which shouldn't be held when calling
778	 * ovl_copy_up_data(), so lock workdir and destdir and make sure that
779	 * temp wasn't moved before copy up completion or cleanup.
780	 */
781	ovl_start_write(c->dentry);
782	trap = lock_rename(c->workdir, c->destdir);
783	if (trap || temp->d_parent != c->workdir) {
784		/* temp or workdir moved underneath us? abort without cleanup */
785		dput(temp);
786		err = -EIO;
787		if (IS_ERR(trap))
788			goto out;
789		goto unlock;
790	} else if (err) {
791		goto cleanup;
792	}
793
794	err = ovl_copy_up_metadata(c, temp);
795	if (err)
796		goto cleanup;
797
798	if (S_ISDIR(c->stat.mode) && c->indexed) {
799		err = ovl_create_index(c->dentry, c->origin_fh, temp);
800		if (err)
801			goto cleanup;
802	}
803
804	upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
805				 c->destname.len);
806	err = PTR_ERR(upper);
807	if (IS_ERR(upper))
808		goto cleanup;
809
810	err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0);
811	dput(upper);
812	if (err)
813		goto cleanup;
814
815	inode = d_inode(c->dentry);
816	if (c->metacopy_digest)
817		ovl_set_flag(OVL_HAS_DIGEST, inode);
818	else
819		ovl_clear_flag(OVL_HAS_DIGEST, inode);
820	ovl_clear_flag(OVL_VERIFIED_DIGEST, inode);
821
822	if (!c->metacopy)
823		ovl_set_upperdata(inode);
824	ovl_inode_update(inode, temp);
825	if (S_ISDIR(inode->i_mode))
826		ovl_set_flag(OVL_WHITEOUTS, inode);
827unlock:
828	unlock_rename(c->workdir, c->destdir);
829out:
830	ovl_end_write(c->dentry);
831
832	return err;
833
834cleanup:
835	ovl_cleanup(ofs, wdir, temp);
836	dput(temp);
837	goto unlock;
838}
839
840/* Copyup using O_TMPFILE which does not require cross dir locking */
841static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
842{
843	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
844	struct inode *udir = d_inode(c->destdir);
845	struct dentry *temp, *upper;
846	struct file *tmpfile;
847	struct ovl_cu_creds cc;
848	int err;
849
850	err = ovl_prep_cu_creds(c->dentry, &cc);
851	if (err)
852		return err;
853
854	ovl_start_write(c->dentry);
855	tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
856	ovl_end_write(c->dentry);
857	ovl_revert_cu_creds(&cc);
858	if (IS_ERR(tmpfile))
859		return PTR_ERR(tmpfile);
860
861	temp = tmpfile->f_path.dentry;
862	if (!c->metacopy && c->stat.size) {
863		err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
864		if (err)
865			goto out_fput;
866	}
867
868	ovl_start_write(c->dentry);
869
870	err = ovl_copy_up_metadata(c, temp);
871	if (err)
872		goto out;
873
874	inode_lock_nested(udir, I_MUTEX_PARENT);
875
876	upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
877				 c->destname.len);
878	err = PTR_ERR(upper);
879	if (!IS_ERR(upper)) {
880		err = ovl_do_link(ofs, temp, udir, upper);
881		dput(upper);
882	}
883	inode_unlock(udir);
884
885	if (err)
886		goto out;
887
888	if (c->metacopy_digest)
889		ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
890	else
891		ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
892	ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry));
893
894	if (!c->metacopy)
895		ovl_set_upperdata(d_inode(c->dentry));
896	ovl_inode_update(d_inode(c->dentry), dget(temp));
897
898out:
899	ovl_end_write(c->dentry);
900out_fput:
901	fput(tmpfile);
902	return err;
903}
904
905/*
906 * Copy up a single dentry
907 *
908 * All renames start with copy up of source if necessary.  The actual
909 * rename will only proceed once the copy up was successful.  Copy up uses
910 * upper parent i_mutex for exclusion.  Since rename can change d_parent it
911 * is possible that the copy up will lock the old parent.  At that point
912 * the file will have already been copied up anyway.
913 */
914static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
915{
916	int err;
917	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
918	struct dentry *origin = c->lowerpath.dentry;
919	struct ovl_fh *fh = NULL;
920	bool to_index = false;
921
922	/*
923	 * Indexed non-dir is copied up directly to the index entry and then
924	 * hardlinked to upper dir. Indexed dir is copied up to indexdir,
925	 * then index entry is created and then copied up dir installed.
926	 * Copying dir up to indexdir instead of workdir simplifies locking.
927	 */
928	if (ovl_need_index(c->dentry)) {
929		c->indexed = true;
930		if (S_ISDIR(c->stat.mode))
931			c->workdir = ovl_indexdir(c->dentry->d_sb);
932		else
933			to_index = true;
934	}
935
936	if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) {
937		fh = ovl_get_origin_fh(ofs, origin);
938		if (IS_ERR(fh))
939			return PTR_ERR(fh);
940
941		/* origin_fh may be NULL */
942		c->origin_fh = fh;
943		c->origin = true;
944	}
945
946	if (to_index) {
947		c->destdir = ovl_indexdir(c->dentry->d_sb);
948		err = ovl_get_index_name(ofs, origin, &c->destname);
949		if (err)
950			goto out_free_fh;
951	} else if (WARN_ON(!c->parent)) {
952		/* Disconnected dentry must be copied up to index dir */
953		err = -EIO;
954		goto out_free_fh;
955	} else {
956		/*
957		 * c->dentry->d_name is stabilzed by ovl_copy_up_start(),
958		 * because if we got here, it means that c->dentry has no upper
959		 * alias and changing ->d_name means going through ovl_rename()
960		 * that will call ovl_copy_up() on source and target dentry.
961		 */
962		c->destname = c->dentry->d_name;
963		/*
964		 * Mark parent "impure" because it may now contain non-pure
965		 * upper
966		 */
967		ovl_start_write(c->dentry);
968		err = ovl_set_impure(c->parent, c->destdir);
969		ovl_end_write(c->dentry);
970		if (err)
971			goto out_free_fh;
972	}
973
974	/* Should we copyup with O_TMPFILE or with workdir? */
975	if (S_ISREG(c->stat.mode) && ofs->tmpfile)
976		err = ovl_copy_up_tmpfile(c);
977	else
978		err = ovl_copy_up_workdir(c);
979	if (err)
980		goto out;
981
982	if (c->indexed)
983		ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
984
985	ovl_start_write(c->dentry);
986	if (to_index) {
987		/* Initialize nlink for copy up of disconnected dentry */
988		err = ovl_set_nlink_upper(c->dentry);
989	} else {
990		struct inode *udir = d_inode(c->destdir);
991
992		/* Restore timestamps on parent (best effort) */
993		inode_lock(udir);
994		ovl_set_timestamps(ofs, c->destdir, &c->pstat);
995		inode_unlock(udir);
996
997		ovl_dentry_set_upper_alias(c->dentry);
998		ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
999	}
1000	ovl_end_write(c->dentry);
1001
1002out:
1003	if (to_index)
1004		kfree(c->destname.name);
1005out_free_fh:
1006	kfree(fh);
1007	return err;
1008}
1009
1010static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
1011				  int flags)
1012{
1013	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
1014
1015	if (!ofs->config.metacopy)
1016		return false;
1017
1018	if (!S_ISREG(mode))
1019		return false;
1020
1021	if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)))
1022		return false;
1023
1024	/* Fall back to full copy if no fsverity on source data and we require verity */
1025	if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) {
1026		struct path lowerdata;
1027
1028		ovl_path_lowerdata(dentry, &lowerdata);
1029
1030		if (WARN_ON_ONCE(lowerdata.dentry == NULL) ||
1031		    ovl_ensure_verity_loaded(&lowerdata) ||
1032		    !fsverity_active(d_inode(lowerdata.dentry))) {
1033			return false;
1034		}
1035	}
1036
1037	return true;
1038}
1039
1040static ssize_t ovl_getxattr_value(const struct path *path, char *name, char **value)
1041{
1042	ssize_t res;
1043	char *buf;
1044
1045	res = ovl_do_getxattr(path, name, NULL, 0);
1046	if (res == -ENODATA || res == -EOPNOTSUPP)
1047		res = 0;
1048
1049	if (res > 0) {
1050		buf = kzalloc(res, GFP_KERNEL);
1051		if (!buf)
1052			return -ENOMEM;
1053
1054		res = ovl_do_getxattr(path, name, buf, res);
1055		if (res < 0)
1056			kfree(buf);
1057		else
1058			*value = buf;
1059	}
1060	return res;
1061}
1062
1063/* Copy up data of an inode which was copied up metadata only in the past. */
1064static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
1065{
1066	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
1067	struct path upperpath;
1068	int err;
1069	char *capability = NULL;
1070	ssize_t cap_size;
1071
1072	ovl_path_upper(c->dentry, &upperpath);
1073	if (WARN_ON(upperpath.dentry == NULL))
1074		return -EIO;
1075
1076	if (c->stat.size) {
1077		err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS,
1078						    &capability);
1079		if (cap_size < 0)
1080			goto out;
1081	}
1082
1083	err = ovl_copy_up_data(c, &upperpath);
1084	if (err)
1085		goto out_free;
1086
1087	/*
1088	 * Writing to upper file will clear security.capability xattr. We
1089	 * don't want that to happen for normal copy-up operation.
1090	 */
1091	ovl_start_write(c->dentry);
1092	if (capability) {
1093		err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
1094				      capability, cap_size, 0);
1095	}
1096	if (!err) {
1097		err = ovl_removexattr(ofs, upperpath.dentry,
1098				      OVL_XATTR_METACOPY);
1099	}
1100	ovl_end_write(c->dentry);
1101	if (err)
1102		goto out_free;
1103
1104	ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
1105	ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry));
1106	ovl_set_upperdata(d_inode(c->dentry));
1107out_free:
1108	kfree(capability);
1109out:
1110	return err;
1111}
1112
1113static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
1114			   int flags)
1115{
1116	int err;
1117	DEFINE_DELAYED_CALL(done);
1118	struct path parentpath;
1119	struct ovl_copy_up_ctx ctx = {
1120		.parent = parent,
1121		.dentry = dentry,
1122		.workdir = ovl_workdir(dentry),
1123	};
1124
1125	if (WARN_ON(!ctx.workdir))
1126		return -EROFS;
1127
1128	ovl_path_lower(dentry, &ctx.lowerpath);
1129	err = vfs_getattr(&ctx.lowerpath, &ctx.stat,
1130			  STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
1131	if (err)
1132		return err;
1133
1134	if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
1135	    !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
1136		return -EOVERFLOW;
1137
1138	ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
1139
1140	if (parent) {
1141		ovl_path_upper(parent, &parentpath);
1142		ctx.destdir = parentpath.dentry;
1143
1144		err = vfs_getattr(&parentpath, &ctx.pstat,
1145				  STATX_ATIME | STATX_MTIME,
1146				  AT_STATX_SYNC_AS_STAT);
1147		if (err)
1148			return err;
1149	}
1150
1151	/* maybe truncate regular file. this has no effect on dirs */
1152	if (flags & O_TRUNC)
1153		ctx.stat.size = 0;
1154
1155	if (S_ISLNK(ctx.stat.mode)) {
1156		ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done);
1157		if (IS_ERR(ctx.link))
1158			return PTR_ERR(ctx.link);
1159	}
1160
1161	err = ovl_copy_up_start(dentry, flags);
1162	/* err < 0: interrupted, err > 0: raced with another copy-up */
1163	if (unlikely(err)) {
1164		if (err > 0)
1165			err = 0;
1166	} else {
1167		if (!ovl_dentry_upper(dentry))
1168			err = ovl_do_copy_up(&ctx);
1169		if (!err && parent && !ovl_dentry_has_upper_alias(dentry))
1170			err = ovl_link_up(&ctx);
1171		if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags))
1172			err = ovl_copy_up_meta_inode_data(&ctx);
1173		ovl_copy_up_end(dentry);
1174	}
1175	do_delayed_call(&done);
1176
1177	return err;
1178}
1179
1180static int ovl_copy_up_flags(struct dentry *dentry, int flags)
1181{
1182	int err = 0;
1183	const struct cred *old_cred;
1184	bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
1185
1186	/*
1187	 * With NFS export, copy up can get called for a disconnected non-dir.
1188	 * In this case, we will copy up lower inode to index dir without
1189	 * linking it to upper dir.
1190	 */
1191	if (WARN_ON(disconnected && d_is_dir(dentry)))
1192		return -EIO;
1193
1194	/*
1195	 * We may not need lowerdata if we are only doing metacopy up, but it is
1196	 * not very important to optimize this case, so do lazy lowerdata lookup
1197	 * before any copy up, so we can do it before taking ovl_inode_lock().
1198	 */
1199	err = ovl_verify_lowerdata(dentry);
1200	if (err)
1201		return err;
1202
1203	old_cred = ovl_override_creds(dentry->d_sb);
1204	while (!err) {
1205		struct dentry *next;
1206		struct dentry *parent = NULL;
1207
1208		if (ovl_already_copied_up(dentry, flags))
1209			break;
1210
1211		next = dget(dentry);
1212		/* find the topmost dentry not yet copied up */
1213		for (; !disconnected;) {
1214			parent = dget_parent(next);
1215
1216			if (ovl_dentry_upper(parent))
1217				break;
1218
1219			dput(next);
1220			next = parent;
1221		}
1222
1223		err = ovl_copy_up_one(parent, next, flags);
1224
1225		dput(parent);
1226		dput(next);
1227	}
1228	revert_creds(old_cred);
1229
1230	return err;
1231}
1232
1233static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
1234{
1235	/* Copy up of disconnected dentry does not set upper alias */
1236	if (ovl_already_copied_up(dentry, flags))
1237		return false;
1238
1239	if (special_file(d_inode(dentry)->i_mode))
1240		return false;
1241
1242	if (!ovl_open_flags_need_copy_up(flags))
1243		return false;
1244
1245	return true;
1246}
1247
1248int ovl_maybe_copy_up(struct dentry *dentry, int flags)
1249{
1250	if (!ovl_open_need_copy_up(dentry, flags))
1251		return 0;
1252
1253	return ovl_copy_up_flags(dentry, flags);
1254}
1255
1256int ovl_copy_up_with_data(struct dentry *dentry)
1257{
1258	return ovl_copy_up_flags(dentry, O_WRONLY);
1259}
1260
1261int ovl_copy_up(struct dentry *dentry)
1262{
1263	return ovl_copy_up_flags(dentry, 0);
1264}
1265