1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * file.c
4 *
5 * PURPOSE
6 *  File handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 *  (C) 1998-1999 Dave Boynton
10 *  (C) 1998-2004 Ben Fennema
11 *  (C) 1999-2000 Stelias Computing Inc
12 *
13 * HISTORY
14 *
15 *  10/02/98 dgb  Attempt to integrate into udf.o
16 *  10/07/98      Switched to using generic_readpage, etc., like isofs
17 *                And it works!
18 *  12/06/98 blf  Added udf_file_read. uses generic_file_read for all cases but
19 *                ICBTAG_FLAG_AD_IN_ICB.
20 *  04/06/99      64 bit file handling on 32 bit systems taken from ext2 file.c
21 *  05/12/99      Preliminary file write support
22 */
23
24#include "udfdecl.h"
25#include <linux/fs.h>
26#include <linux/uaccess.h>
27#include <linux/kernel.h>
28#include <linux/string.h> /* memset */
29#include <linux/capability.h>
30#include <linux/errno.h>
31#include <linux/pagemap.h>
32#include <linux/uio.h>
33
34#include "udf_i.h"
35#include "udf_sb.h"
36
37static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
38{
39	struct vm_area_struct *vma = vmf->vma;
40	struct inode *inode = file_inode(vma->vm_file);
41	struct address_space *mapping = inode->i_mapping;
42	struct page *page = vmf->page;
43	loff_t size;
44	unsigned int end;
45	vm_fault_t ret = VM_FAULT_LOCKED;
46	int err;
47
48	sb_start_pagefault(inode->i_sb);
49	file_update_time(vma->vm_file);
50	filemap_invalidate_lock_shared(mapping);
51	lock_page(page);
52	size = i_size_read(inode);
53	if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
54		unlock_page(page);
55		ret = VM_FAULT_NOPAGE;
56		goto out_unlock;
57	}
58	/* Space is already allocated for in-ICB file */
59	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
60		goto out_dirty;
61	if (page->index == size >> PAGE_SHIFT)
62		end = size & ~PAGE_MASK;
63	else
64		end = PAGE_SIZE;
65	err = __block_write_begin(page, 0, end, udf_get_block);
66	if (err) {
67		unlock_page(page);
68		ret = vmf_fs_error(err);
69		goto out_unlock;
70	}
71
72	block_commit_write(page, 0, end);
73out_dirty:
74	set_page_dirty(page);
75	wait_for_stable_page(page);
76out_unlock:
77	filemap_invalidate_unlock_shared(mapping);
78	sb_end_pagefault(inode->i_sb);
79	return ret;
80}
81
82static const struct vm_operations_struct udf_file_vm_ops = {
83	.fault		= filemap_fault,
84	.map_pages	= filemap_map_pages,
85	.page_mkwrite	= udf_page_mkwrite,
86};
87
88static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
89{
90	ssize_t retval;
91	struct file *file = iocb->ki_filp;
92	struct inode *inode = file_inode(file);
93	struct udf_inode_info *iinfo = UDF_I(inode);
94
95	inode_lock(inode);
96
97	retval = generic_write_checks(iocb, from);
98	if (retval <= 0)
99		goto out;
100
101	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
102	    inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
103				 iocb->ki_pos + iov_iter_count(from))) {
104		filemap_invalidate_lock(inode->i_mapping);
105		retval = udf_expand_file_adinicb(inode);
106		filemap_invalidate_unlock(inode->i_mapping);
107		if (retval)
108			goto out;
109	}
110
111	retval = __generic_file_write_iter(iocb, from);
112out:
113	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
114		down_write(&iinfo->i_data_sem);
115		iinfo->i_lenAlloc = inode->i_size;
116		up_write(&iinfo->i_data_sem);
117	}
118	inode_unlock(inode);
119
120	if (retval > 0) {
121		mark_inode_dirty(inode);
122		retval = generic_write_sync(iocb, retval);
123	}
124
125	return retval;
126}
127
128long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
129{
130	struct inode *inode = file_inode(filp);
131	long old_block, new_block;
132	int result;
133
134	if (file_permission(filp, MAY_READ) != 0) {
135		udf_debug("no permission to access inode %lu\n", inode->i_ino);
136		return -EPERM;
137	}
138
139	if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
140		     (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
141		udf_debug("invalid argument to udf_ioctl\n");
142		return -EINVAL;
143	}
144
145	switch (cmd) {
146	case UDF_GETVOLIDENT:
147		if (copy_to_user((char __user *)arg,
148				 UDF_SB(inode->i_sb)->s_volume_ident, 32))
149			return -EFAULT;
150		return 0;
151	case UDF_RELOCATE_BLOCKS:
152		if (!capable(CAP_SYS_ADMIN))
153			return -EPERM;
154		if (get_user(old_block, (long __user *)arg))
155			return -EFAULT;
156		result = udf_relocate_blocks(inode->i_sb,
157						old_block, &new_block);
158		if (result == 0)
159			result = put_user(new_block, (long __user *)arg);
160		return result;
161	case UDF_GETEASIZE:
162		return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
163	case UDF_GETEABLOCK:
164		return copy_to_user((char __user *)arg,
165				    UDF_I(inode)->i_data,
166				    UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
167	default:
168		return -ENOIOCTLCMD;
169	}
170
171	return 0;
172}
173
174static int udf_release_file(struct inode *inode, struct file *filp)
175{
176	if (filp->f_mode & FMODE_WRITE &&
177	    atomic_read(&inode->i_writecount) == 1) {
178		/*
179		 * Grab i_mutex to avoid races with writes changing i_size
180		 * while we are running.
181		 */
182		inode_lock(inode);
183		down_write(&UDF_I(inode)->i_data_sem);
184		udf_discard_prealloc(inode);
185		udf_truncate_tail_extent(inode);
186		up_write(&UDF_I(inode)->i_data_sem);
187		inode_unlock(inode);
188	}
189	return 0;
190}
191
192static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
193{
194	file_accessed(file);
195	vma->vm_ops = &udf_file_vm_ops;
196
197	return 0;
198}
199
200const struct file_operations udf_file_operations = {
201	.read_iter		= generic_file_read_iter,
202	.unlocked_ioctl		= udf_ioctl,
203	.open			= generic_file_open,
204	.mmap			= udf_file_mmap,
205	.write_iter		= udf_file_write_iter,
206	.release		= udf_release_file,
207	.fsync			= generic_file_fsync,
208	.splice_read		= filemap_splice_read,
209	.splice_write		= iter_file_splice_write,
210	.llseek			= generic_file_llseek,
211};
212
213static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
214		       struct iattr *attr)
215{
216	struct inode *inode = d_inode(dentry);
217	struct super_block *sb = inode->i_sb;
218	int error;
219
220	error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
221	if (error)
222		return error;
223
224	if ((attr->ia_valid & ATTR_UID) &&
225	    UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
226	    !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
227		return -EPERM;
228	if ((attr->ia_valid & ATTR_GID) &&
229	    UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
230	    !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
231		return -EPERM;
232
233	if ((attr->ia_valid & ATTR_SIZE) &&
234	    attr->ia_size != i_size_read(inode)) {
235		error = udf_setsize(inode, attr->ia_size);
236		if (error)
237			return error;
238	}
239
240	if (attr->ia_valid & ATTR_MODE)
241		udf_update_extra_perms(inode, attr->ia_mode);
242
243	setattr_copy(&nop_mnt_idmap, inode, attr);
244	mark_inode_dirty(inode);
245	return 0;
246}
247
248const struct inode_operations udf_file_inode_operations = {
249	.setattr		= udf_setattr,
250};
251