1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Unbuffered and direct write support.
3 *
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>
9#include <linux/uio.h>
10#include "internal.h"
11
12static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
13{
14	struct inode *inode = wreq->inode;
15	unsigned long long end = wreq->start + wreq->len;
16
17	if (!wreq->error &&
18	    i_size_read(inode) < end) {
19		if (wreq->netfs_ops->update_i_size)
20			wreq->netfs_ops->update_i_size(inode, end);
21		else
22			i_size_write(inode, end);
23	}
24}
25
26/*
27 * Perform an unbuffered write where we may have to do an RMW operation on an
28 * encrypted file.  This can also be used for direct I/O writes.
29 */
30static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
31						  struct netfs_group *netfs_group)
32{
33	struct netfs_io_request *wreq;
34	unsigned long long start = iocb->ki_pos;
35	unsigned long long end = start + iov_iter_count(iter);
36	ssize_t ret, n;
37	bool async = !is_sync_kiocb(iocb);
38
39	_enter("");
40
41	/* We're going to need a bounce buffer if what we transmit is going to
42	 * be different in some way to the source buffer, e.g. because it gets
43	 * encrypted/compressed or because it needs expanding to a block size.
44	 */
45	// TODO
46
47	_debug("uw %llx-%llx", start, end);
48
49	wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
50				   start, end - start,
51				   iocb->ki_flags & IOCB_DIRECT ?
52				   NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
53	if (IS_ERR(wreq))
54		return PTR_ERR(wreq);
55
56	{
57		/* If this is an async op and we're not using a bounce buffer,
58		 * we have to save the source buffer as the iterator is only
59		 * good until we return.  In such a case, extract an iterator
60		 * to represent as much of the the output buffer as we can
61		 * manage.  Note that the extraction might not be able to
62		 * allocate a sufficiently large bvec array and may shorten the
63		 * request.
64		 */
65		if (async || user_backed_iter(iter)) {
66			n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
67			if (n < 0) {
68				ret = n;
69				goto out;
70			}
71			wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
72			wreq->direct_bv_count = n;
73			wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
74			wreq->len = iov_iter_count(&wreq->iter);
75		} else {
76			wreq->iter = *iter;
77		}
78
79		wreq->io_iter = wreq->iter;
80	}
81
82	/* Copy the data into the bounce buffer and encrypt it. */
83	// TODO
84
85	/* Dispatch the write. */
86	__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
87	if (async)
88		wreq->iocb = iocb;
89	wreq->cleanup = netfs_cleanup_dio_write;
90	ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
91				iocb->ki_flags & IOCB_DIRECT ?
92				netfs_write_trace_dio_write :
93				netfs_write_trace_unbuffered_write);
94	if (ret < 0) {
95		_debug("begin = %zd", ret);
96		goto out;
97	}
98
99	if (!async) {
100		trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
101		wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
102			    TASK_UNINTERRUPTIBLE);
103
104		ret = wreq->error;
105		_debug("waited = %zd", ret);
106		if (ret == 0) {
107			ret = wreq->transferred;
108			iocb->ki_pos += ret;
109		}
110	} else {
111		ret = -EIOCBQUEUED;
112	}
113
114out:
115	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
116	return ret;
117}
118
119/**
120 * netfs_unbuffered_write_iter - Unbuffered write to a file
121 * @iocb: IO state structure
122 * @from: iov_iter with data to write
123 *
124 * Do an unbuffered write to a file, writing the data directly to the server
125 * and not lodging the data in the pagecache.
126 *
127 * Return:
128 * * Negative error code if no data has been written at all of
129 *   vfs_fsync_range() failed for a synchronous write
130 * * Number of bytes written, even for truncated writes
131 */
132ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
133{
134	struct file *file = iocb->ki_filp;
135	struct inode *inode = file->f_mapping->host;
136	struct netfs_inode *ictx = netfs_inode(inode);
137	unsigned long long end;
138	ssize_t ret;
139
140	_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
141
142	if (!iov_iter_count(from))
143		return 0;
144
145	trace_netfs_write_iter(iocb, from);
146	netfs_stat(&netfs_n_rh_dio_write);
147
148	ret = netfs_start_io_direct(inode);
149	if (ret < 0)
150		return ret;
151	ret = generic_write_checks(iocb, from);
152	if (ret <= 0)
153		goto out;
154	ret = file_remove_privs(file);
155	if (ret < 0)
156		goto out;
157	ret = file_update_time(file);
158	if (ret < 0)
159		goto out;
160	ret = kiocb_invalidate_pages(iocb, iov_iter_count(from));
161	if (ret < 0)
162		goto out;
163	end = iocb->ki_pos + iov_iter_count(from);
164	if (end > ictx->zero_point)
165		ictx->zero_point = end;
166
167	fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
168			   FSCACHE_INVAL_DIO_WRITE);
169	ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
170out:
171	netfs_end_io_direct(inode);
172	return ret;
173}
174EXPORT_SYMBOL(netfs_unbuffered_write_iter);
175