1// SPDX-License-Identifier: GPL-2.0-or-later
2/* NFS filesystem cache interface
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/nfs_fs.h>
13#include <linux/nfs_fs_sb.h>
14#include <linux/in6.h>
15#include <linux/seq_file.h>
16#include <linux/slab.h>
17#include <linux/iversion.h>
18#include <linux/xarray.h>
19#include <linux/fscache.h>
20#include <linux/netfs.h>
21
22#include "internal.h"
23#include "iostat.h"
24#include "fscache.h"
25#include "nfstrace.h"
26
27#define NFS_MAX_KEY_LEN 1000
28
29static bool nfs_append_int(char *key, int *_len, unsigned long long x)
30{
31	if (*_len > NFS_MAX_KEY_LEN)
32		return false;
33	if (x == 0)
34		key[(*_len)++] = ',';
35	else
36		*_len += sprintf(key + *_len, ",%llx", x);
37	return true;
38}
39
40/*
41 * Get the per-client index cookie for an NFS client if the appropriate mount
42 * flag was set
43 * - We always try and get an index cookie for the client, but get filehandle
44 *   cookies on a per-superblock basis, depending on the mount flags
45 */
46static bool nfs_fscache_get_client_key(struct nfs_client *clp,
47				       char *key, int *_len)
48{
49	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
50	const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
51
52	*_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
53			  ",%u.%u,%x",
54			  clp->rpc_ops->version,
55			  clp->cl_minorversion,
56			  clp->cl_addr.ss_family);
57
58	switch (clp->cl_addr.ss_family) {
59	case AF_INET:
60		if (!nfs_append_int(key, _len, sin->sin_port) ||
61		    !nfs_append_int(key, _len, sin->sin_addr.s_addr))
62			return false;
63		return true;
64
65	case AF_INET6:
66		if (!nfs_append_int(key, _len, sin6->sin6_port) ||
67		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
68		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
69		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
70		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
71			return false;
72		return true;
73
74	default:
75		printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
76		       clp->cl_addr.ss_family);
77		return false;
78	}
79}
80
81/*
82 * Get the cache cookie for an NFS superblock.
83 *
84 * The default uniquifier is just an empty string, but it may be overridden
85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
86 * superblock across an automount point of some nature.
87 */
88int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
89{
90	struct fscache_volume *vcookie;
91	struct nfs_server *nfss = NFS_SB(sb);
92	unsigned int len = 3;
93	char *key;
94
95	if (uniq) {
96		nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
97		if (!nfss->fscache_uniq)
98			return -ENOMEM;
99	}
100
101	key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
102	if (!key)
103		return -ENOMEM;
104
105	memcpy(key, "nfs", 3);
106	if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
107	    !nfs_append_int(key, &len, nfss->fsid.major) ||
108	    !nfs_append_int(key, &len, nfss->fsid.minor) ||
109	    !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
110	    !nfs_append_int(key, &len, nfss->flags) ||
111	    !nfs_append_int(key, &len, nfss->rsize) ||
112	    !nfs_append_int(key, &len, nfss->wsize) ||
113	    !nfs_append_int(key, &len, nfss->acregmin) ||
114	    !nfs_append_int(key, &len, nfss->acregmax) ||
115	    !nfs_append_int(key, &len, nfss->acdirmin) ||
116	    !nfs_append_int(key, &len, nfss->acdirmax) ||
117	    !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
118		goto out;
119
120	if (ulen > 0) {
121		if (ulen > NFS_MAX_KEY_LEN - len)
122			goto out;
123		key[len++] = ',';
124		memcpy(key + len, uniq, ulen);
125		len += ulen;
126	}
127	key[len] = 0;
128
129	/* create a cache index for looking up filehandles */
130	vcookie = fscache_acquire_volume(key,
131					 NULL, /* preferred_cache */
132					 NULL, 0 /* coherency_data */);
133	if (IS_ERR(vcookie)) {
134		if (vcookie != ERR_PTR(-EBUSY)) {
135			kfree(key);
136			return PTR_ERR(vcookie);
137		}
138		pr_err("NFS: Cache volume key already in use (%s)\n", key);
139		vcookie = NULL;
140	}
141	nfss->fscache = vcookie;
142
143out:
144	kfree(key);
145	return 0;
146}
147
148/*
149 * release a per-superblock cookie
150 */
151void nfs_fscache_release_super_cookie(struct super_block *sb)
152{
153	struct nfs_server *nfss = NFS_SB(sb);
154
155	fscache_relinquish_volume(nfss->fscache, NULL, false);
156	nfss->fscache = NULL;
157	kfree(nfss->fscache_uniq);
158}
159
160/*
161 * Initialise the per-inode cache cookie pointer for an NFS inode.
162 */
163void nfs_fscache_init_inode(struct inode *inode)
164{
165	struct nfs_fscache_inode_auxdata auxdata;
166	struct nfs_server *nfss = NFS_SERVER(inode);
167	struct nfs_inode *nfsi = NFS_I(inode);
168
169	netfs_inode(inode)->cache = NULL;
170	if (!(nfss->fscache && S_ISREG(inode->i_mode)))
171		return;
172
173	nfs_fscache_update_auxdata(&auxdata, inode);
174
175	netfs_inode(inode)->cache = fscache_acquire_cookie(
176					       nfss->fscache,
177					       0,
178					       nfsi->fh.data, /* index_key */
179					       nfsi->fh.size,
180					       &auxdata,      /* aux_data */
181					       sizeof(auxdata),
182					       i_size_read(inode));
183
184	if (netfs_inode(inode)->cache)
185		mapping_set_release_always(inode->i_mapping);
186}
187
188/*
189 * Release a per-inode cookie.
190 */
191void nfs_fscache_clear_inode(struct inode *inode)
192{
193	fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false);
194	netfs_inode(inode)->cache = NULL;
195}
196
197/*
198 * Enable or disable caching for a file that is being opened as appropriate.
199 * The cookie is allocated when the inode is initialised, but is not enabled at
200 * that time.  Enablement is deferred to file-open time to avoid stat() and
201 * access() thrashing the cache.
202 *
203 * For now, with NFS, only regular files that are open read-only will be able
204 * to use the cache.
205 *
206 * We enable the cache for an inode if we open it read-only and it isn't
207 * currently open for writing.  We disable the cache if the inode is open
208 * write-only.
209 *
210 * The caller uses the file struct to pin i_writecount on the inode before
211 * calling us when a file is opened for writing, so we can make use of that.
212 *
213 * Note that this may be invoked multiple times in parallel by parallel
214 * nfs_open() functions.
215 */
216void nfs_fscache_open_file(struct inode *inode, struct file *filp)
217{
218	struct nfs_fscache_inode_auxdata auxdata;
219	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
220	bool open_for_write = inode_is_open_for_write(inode);
221
222	if (!fscache_cookie_valid(cookie))
223		return;
224
225	fscache_use_cookie(cookie, open_for_write);
226	if (open_for_write) {
227		nfs_fscache_update_auxdata(&auxdata, inode);
228		fscache_invalidate(cookie, &auxdata, i_size_read(inode),
229				   FSCACHE_INVAL_DIO_WRITE);
230	}
231}
232EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
233
234void nfs_fscache_release_file(struct inode *inode, struct file *filp)
235{
236	struct nfs_fscache_inode_auxdata auxdata;
237	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
238	loff_t i_size = i_size_read(inode);
239
240	nfs_fscache_update_auxdata(&auxdata, inode);
241	fscache_unuse_cookie(cookie, &auxdata, &i_size);
242}
243
244int nfs_netfs_read_folio(struct file *file, struct folio *folio)
245{
246	if (!netfs_inode(folio_inode(folio))->cache)
247		return -ENOBUFS;
248
249	return netfs_read_folio(file, folio);
250}
251
252int nfs_netfs_readahead(struct readahead_control *ractl)
253{
254	struct inode *inode = ractl->mapping->host;
255
256	if (!netfs_inode(inode)->cache)
257		return -ENOBUFS;
258
259	netfs_readahead(ractl);
260	return 0;
261}
262
263static atomic_t nfs_netfs_debug_id;
264static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
265{
266	rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
267	rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
268
269	return 0;
270}
271
272static void nfs_netfs_free_request(struct netfs_io_request *rreq)
273{
274	put_nfs_open_context(rreq->netfs_priv);
275}
276
277static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
278{
279	struct nfs_netfs_io_data *netfs;
280
281	netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
282	if (!netfs)
283		return NULL;
284	netfs->sreq = sreq;
285	refcount_set(&netfs->refcount, 1);
286	return netfs;
287}
288
289static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
290{
291	size_t	rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
292
293	sreq->len = min(sreq->len, rsize);
294	return true;
295}
296
297static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
298{
299	struct nfs_netfs_io_data	*netfs;
300	struct nfs_pageio_descriptor	pgio;
301	struct inode *inode = sreq->rreq->inode;
302	struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
303	struct page *page;
304	unsigned long idx;
305	int err;
306	pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
307	pgoff_t last = ((sreq->start + sreq->len -
308			 sreq->transferred - 1) >> PAGE_SHIFT);
309
310	nfs_pageio_init_read(&pgio, inode, false,
311			     &nfs_async_read_completion_ops);
312
313	netfs = nfs_netfs_alloc(sreq);
314	if (!netfs)
315		return netfs_subreq_terminated(sreq, -ENOMEM, false);
316
317	pgio.pg_netfs = netfs; /* used in completion */
318
319	xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
320		/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs  */
321		err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
322		if (err < 0) {
323			netfs->error = err;
324			goto out;
325		}
326	}
327out:
328	nfs_pageio_complete_read(&pgio);
329	nfs_netfs_put(netfs);
330}
331
332void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
333{
334	struct nfs_netfs_io_data        *netfs = hdr->netfs;
335
336	if (!netfs)
337		return;
338
339	nfs_netfs_get(netfs);
340}
341
342int nfs_netfs_folio_unlock(struct folio *folio)
343{
344	struct inode *inode = folio_file_mapping(folio)->host;
345
346	/*
347	 * If fscache is enabled, netfs will unlock pages.
348	 */
349	if (netfs_inode(inode)->cache)
350		return 0;
351
352	return 1;
353}
354
355void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
356{
357	struct nfs_netfs_io_data        *netfs = hdr->netfs;
358	struct netfs_io_subrequest      *sreq;
359
360	if (!netfs)
361		return;
362
363	sreq = netfs->sreq;
364	if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
365		__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
366
367	if (hdr->error)
368		netfs->error = hdr->error;
369	else
370		atomic64_add(hdr->res.count, &netfs->transferred);
371
372	nfs_netfs_put(netfs);
373	hdr->netfs = NULL;
374}
375
376const struct netfs_request_ops nfs_netfs_ops = {
377	.init_request		= nfs_netfs_init_request,
378	.free_request		= nfs_netfs_free_request,
379	.issue_read		= nfs_netfs_issue_read,
380	.clamp_length		= nfs_netfs_clamp_length
381};
382