1// SPDX-License-Identifier: GPL-2.0
2/*
3 * File operations for Coda.
4 * Original version: (C) 1996 Peter Braam
5 * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
6 *
7 * Carnegie Mellon encourages users of this code to contribute improvements
8 * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
9 */
10
11#include <linux/refcount.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/time.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/pagemap.h>
18#include <linux/stat.h>
19#include <linux/cred.h>
20#include <linux/errno.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25#include <linux/uio.h>
26#include <linux/splice.h>
27
28#include <linux/coda.h>
29#include "coda_psdev.h"
30#include "coda_linux.h"
31#include "coda_int.h"
32
33struct coda_vm_ops {
34	refcount_t refcnt;
35	struct file *coda_file;
36	const struct vm_operations_struct *host_vm_ops;
37	struct vm_operations_struct vm_ops;
38};
39
40static ssize_t
41coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
42{
43	struct file *coda_file = iocb->ki_filp;
44	struct inode *coda_inode = file_inode(coda_file);
45	struct coda_file_info *cfi = coda_ftoc(coda_file);
46	loff_t ki_pos = iocb->ki_pos;
47	size_t count = iov_iter_count(to);
48	ssize_t ret;
49
50	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
51				  &cfi->cfi_access_intent,
52				  count, ki_pos, CODA_ACCESS_TYPE_READ);
53	if (ret)
54		goto finish_read;
55
56	ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
57
58finish_read:
59	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
60			    &cfi->cfi_access_intent,
61			    count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
62	return ret;
63}
64
65static ssize_t
66coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
67{
68	struct file *coda_file = iocb->ki_filp;
69	struct inode *coda_inode = file_inode(coda_file);
70	struct coda_file_info *cfi = coda_ftoc(coda_file);
71	struct file *host_file = cfi->cfi_container;
72	loff_t ki_pos = iocb->ki_pos;
73	size_t count = iov_iter_count(to);
74	ssize_t ret;
75
76	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
77				  &cfi->cfi_access_intent,
78				  count, ki_pos, CODA_ACCESS_TYPE_WRITE);
79	if (ret)
80		goto finish_write;
81
82	inode_lock(coda_inode);
83	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
84	coda_inode->i_size = file_inode(host_file)->i_size;
85	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
86	inode_set_mtime_to_ts(coda_inode, inode_set_ctime_current(coda_inode));
87	inode_unlock(coda_inode);
88
89finish_write:
90	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
91			    &cfi->cfi_access_intent,
92			    count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH);
93	return ret;
94}
95
96static ssize_t
97coda_file_splice_read(struct file *coda_file, loff_t *ppos,
98		      struct pipe_inode_info *pipe,
99		      size_t len, unsigned int flags)
100{
101	struct inode *coda_inode = file_inode(coda_file);
102	struct coda_file_info *cfi = coda_ftoc(coda_file);
103	struct file *in = cfi->cfi_container;
104	loff_t ki_pos = *ppos;
105	ssize_t ret;
106
107	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
108				  &cfi->cfi_access_intent,
109				  len, ki_pos, CODA_ACCESS_TYPE_READ);
110	if (ret)
111		goto finish_read;
112
113	ret = vfs_splice_read(in, ppos, pipe, len, flags);
114
115finish_read:
116	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
117			    &cfi->cfi_access_intent,
118			    len, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
119	return ret;
120}
121
122static void
123coda_vm_open(struct vm_area_struct *vma)
124{
125	struct coda_vm_ops *cvm_ops =
126		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
127
128	refcount_inc(&cvm_ops->refcnt);
129
130	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
131		cvm_ops->host_vm_ops->open(vma);
132}
133
134static void
135coda_vm_close(struct vm_area_struct *vma)
136{
137	struct coda_vm_ops *cvm_ops =
138		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
139
140	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
141		cvm_ops->host_vm_ops->close(vma);
142
143	if (refcount_dec_and_test(&cvm_ops->refcnt)) {
144		vma->vm_ops = cvm_ops->host_vm_ops;
145		fput(cvm_ops->coda_file);
146		kfree(cvm_ops);
147	}
148}
149
150static int
151coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
152{
153	struct inode *coda_inode = file_inode(coda_file);
154	struct coda_file_info *cfi = coda_ftoc(coda_file);
155	struct file *host_file = cfi->cfi_container;
156	struct inode *host_inode = file_inode(host_file);
157	struct coda_inode_info *cii;
158	struct coda_vm_ops *cvm_ops;
159	loff_t ppos;
160	size_t count;
161	int ret;
162
163	if (!host_file->f_op->mmap)
164		return -ENODEV;
165
166	if (WARN_ON(coda_file != vma->vm_file))
167		return -EIO;
168
169	count = vma->vm_end - vma->vm_start;
170	ppos = vma->vm_pgoff * PAGE_SIZE;
171
172	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
173				  &cfi->cfi_access_intent,
174				  count, ppos, CODA_ACCESS_TYPE_MMAP);
175	if (ret)
176		return ret;
177
178	cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
179	if (!cvm_ops)
180		return -ENOMEM;
181
182	cii = ITOC(coda_inode);
183	spin_lock(&cii->c_lock);
184	coda_file->f_mapping = host_file->f_mapping;
185	if (coda_inode->i_mapping == &coda_inode->i_data)
186		coda_inode->i_mapping = host_inode->i_mapping;
187
188	/* only allow additional mmaps as long as userspace isn't changing
189	 * the container file on us! */
190	else if (coda_inode->i_mapping != host_inode->i_mapping) {
191		spin_unlock(&cii->c_lock);
192		kfree(cvm_ops);
193		return -EBUSY;
194	}
195
196	/* keep track of how often the coda_inode/host_file has been mmapped */
197	cii->c_mapcount++;
198	cfi->cfi_mapcount++;
199	spin_unlock(&cii->c_lock);
200
201	vma->vm_file = get_file(host_file);
202	ret = call_mmap(vma->vm_file, vma);
203
204	if (ret) {
205		/* if call_mmap fails, our caller will put host_file so we
206		 * should drop the reference to the coda_file that we got.
207		 */
208		fput(coda_file);
209		kfree(cvm_ops);
210	} else {
211		/* here we add redirects for the open/close vm_operations */
212		cvm_ops->host_vm_ops = vma->vm_ops;
213		if (vma->vm_ops)
214			cvm_ops->vm_ops = *vma->vm_ops;
215
216		cvm_ops->vm_ops.open = coda_vm_open;
217		cvm_ops->vm_ops.close = coda_vm_close;
218		cvm_ops->coda_file = coda_file;
219		refcount_set(&cvm_ops->refcnt, 1);
220
221		vma->vm_ops = &cvm_ops->vm_ops;
222	}
223	return ret;
224}
225
226int coda_open(struct inode *coda_inode, struct file *coda_file)
227{
228	struct file *host_file = NULL;
229	int error;
230	unsigned short flags = coda_file->f_flags & (~O_EXCL);
231	unsigned short coda_flags = coda_flags_to_cflags(flags);
232	struct coda_file_info *cfi;
233
234	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
235	if (!cfi)
236		return -ENOMEM;
237
238	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
239			   &host_file);
240	if (!host_file)
241		error = -EIO;
242
243	if (error) {
244		kfree(cfi);
245		return error;
246	}
247
248	host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC);
249
250	cfi->cfi_magic = CODA_MAGIC;
251	cfi->cfi_mapcount = 0;
252	cfi->cfi_container = host_file;
253	/* assume access intents are supported unless we hear otherwise */
254	cfi->cfi_access_intent = true;
255
256	BUG_ON(coda_file->private_data != NULL);
257	coda_file->private_data = cfi;
258	return 0;
259}
260
261int coda_release(struct inode *coda_inode, struct file *coda_file)
262{
263	unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
264	unsigned short coda_flags = coda_flags_to_cflags(flags);
265	struct coda_file_info *cfi;
266	struct coda_inode_info *cii;
267	struct inode *host_inode;
268
269	cfi = coda_ftoc(coda_file);
270
271	venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
272			  coda_flags, coda_file->f_cred->fsuid);
273
274	host_inode = file_inode(cfi->cfi_container);
275	cii = ITOC(coda_inode);
276
277	/* did we mmap this file? */
278	spin_lock(&cii->c_lock);
279	if (coda_inode->i_mapping == &host_inode->i_data) {
280		cii->c_mapcount -= cfi->cfi_mapcount;
281		if (!cii->c_mapcount)
282			coda_inode->i_mapping = &coda_inode->i_data;
283	}
284	spin_unlock(&cii->c_lock);
285
286	fput(cfi->cfi_container);
287	kfree(coda_file->private_data);
288	coda_file->private_data = NULL;
289
290	/* VFS fput ignores the return value from file_operations->release, so
291	 * there is no use returning an error here */
292	return 0;
293}
294
295int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
296{
297	struct file *host_file;
298	struct inode *coda_inode = file_inode(coda_file);
299	struct coda_file_info *cfi;
300	int err;
301
302	if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
303	      S_ISLNK(coda_inode->i_mode)))
304		return -EINVAL;
305
306	err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end);
307	if (err)
308		return err;
309	inode_lock(coda_inode);
310
311	cfi = coda_ftoc(coda_file);
312	host_file = cfi->cfi_container;
313
314	err = vfs_fsync(host_file, datasync);
315	if (!err && !datasync)
316		err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
317	inode_unlock(coda_inode);
318
319	return err;
320}
321
322const struct file_operations coda_file_operations = {
323	.llseek		= generic_file_llseek,
324	.read_iter	= coda_file_read_iter,
325	.write_iter	= coda_file_write_iter,
326	.mmap		= coda_file_mmap,
327	.open		= coda_open,
328	.release	= coda_release,
329	.fsync		= coda_fsync,
330	.splice_read	= coda_file_splice_read,
331};
332