1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common NFS I/O  operations for the pnfs file based
4 * layout drivers.
5 *
6 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
7 *
8 * Tom Haynes <loghyr@primarydata.com>
9 */
10
11#include <linux/nfs_fs.h>
12#include <linux/nfs_page.h>
13#include <linux/sunrpc/addr.h>
14#include <linux/module.h>
15
16#include "nfs4session.h"
17#include "internal.h"
18#include "pnfs.h"
19
20#define NFSDBG_FACILITY		NFSDBG_PNFS
21
22void pnfs_generic_rw_release(void *data)
23{
24	struct nfs_pgio_header *hdr = data;
25
26	nfs_put_client(hdr->ds_clp);
27	hdr->mds_ops->rpc_release(data);
28}
29EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
30
31/* Fake up some data that will cause nfs_commit_release to retry the writes. */
32void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
33{
34	struct nfs_writeverf *verf = data->res.verf;
35
36	data->task.tk_status = 0;
37	memset(&verf->verifier, 0, sizeof(verf->verifier));
38	verf->committed = NFS_UNSTABLE;
39}
40EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
41
42void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
43{
44	struct nfs_commit_data *wdata = data;
45
46	/* Note this may cause RPC to be resent */
47	wdata->mds_ops->rpc_call_done(task, data);
48}
49EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
50
51void pnfs_generic_commit_release(void *calldata)
52{
53	struct nfs_commit_data *data = calldata;
54
55	data->completion_ops->completion(data);
56	pnfs_put_lseg(data->lseg);
57	nfs_put_client(data->ds_clp);
58	nfs_commitdata_release(data);
59}
60EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
61
62static struct pnfs_layout_segment *
63pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket)
64{
65	if (list_empty(&bucket->committing) && list_empty(&bucket->written)) {
66		struct pnfs_layout_segment *freeme = bucket->lseg;
67		bucket->lseg = NULL;
68		return freeme;
69	}
70	return NULL;
71}
72
73/* The generic layer is about to remove the req from the commit list.
74 * If this will make the bucket empty, it will need to put the lseg reference.
75 * Note this must be called holding nfsi->commit_mutex
76 */
77void
78pnfs_generic_clear_request_commit(struct nfs_page *req,
79				  struct nfs_commit_info *cinfo)
80{
81	struct pnfs_commit_bucket *bucket = NULL;
82
83	if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
84		goto out;
85	cinfo->ds->nwritten--;
86	if (list_is_singular(&req->wb_list))
87		bucket = list_first_entry(&req->wb_list,
88					  struct pnfs_commit_bucket, written);
89out:
90	nfs_request_remove_commit_list(req, cinfo);
91	if (bucket)
92		pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
93}
94EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
95
96struct pnfs_commit_array *
97pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags)
98{
99	struct pnfs_commit_array *p;
100	struct pnfs_commit_bucket *b;
101
102	p = kmalloc(struct_size(p, buckets, n), gfp_flags);
103	if (!p)
104		return NULL;
105	p->nbuckets = n;
106	INIT_LIST_HEAD(&p->cinfo_list);
107	INIT_LIST_HEAD(&p->lseg_list);
108	p->lseg = NULL;
109	for (b = &p->buckets[0]; n != 0; b++, n--) {
110		INIT_LIST_HEAD(&b->written);
111		INIT_LIST_HEAD(&b->committing);
112		b->lseg = NULL;
113		b->direct_verf.committed = NFS_INVALID_STABLE_HOW;
114	}
115	return p;
116}
117EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array);
118
119void
120pnfs_free_commit_array(struct pnfs_commit_array *p)
121{
122	kfree_rcu(p, rcu);
123}
124EXPORT_SYMBOL_GPL(pnfs_free_commit_array);
125
126static struct pnfs_commit_array *
127pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo,
128		struct pnfs_layout_segment *lseg)
129{
130	struct pnfs_commit_array *array;
131
132	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
133		if (array->lseg == lseg)
134			return array;
135	}
136	return NULL;
137}
138
139struct pnfs_commit_array *
140pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
141		struct pnfs_commit_array *new,
142		struct pnfs_layout_segment *lseg)
143{
144	struct pnfs_commit_array *array;
145
146	array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
147	if (array)
148		return array;
149	new->lseg = lseg;
150	refcount_set(&new->refcount, 1);
151	list_add_rcu(&new->cinfo_list, &fl_cinfo->commits);
152	list_add(&new->lseg_list, &lseg->pls_commits);
153	return new;
154}
155EXPORT_SYMBOL_GPL(pnfs_add_commit_array);
156
157static struct pnfs_commit_array *
158pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
159		struct pnfs_layout_segment *lseg)
160{
161	struct pnfs_commit_array *array;
162
163	rcu_read_lock();
164	array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
165	if (!array) {
166		rcu_read_unlock();
167		fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg);
168		rcu_read_lock();
169		array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
170	}
171	rcu_read_unlock();
172	return array;
173}
174
175static void
176pnfs_release_commit_array_locked(struct pnfs_commit_array *array)
177{
178	list_del_rcu(&array->cinfo_list);
179	list_del(&array->lseg_list);
180	pnfs_free_commit_array(array);
181}
182
183static void
184pnfs_put_commit_array_locked(struct pnfs_commit_array *array)
185{
186	if (refcount_dec_and_test(&array->refcount))
187		pnfs_release_commit_array_locked(array);
188}
189
190static void
191pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode)
192{
193	if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) {
194		pnfs_release_commit_array_locked(array);
195		spin_unlock(&inode->i_lock);
196	}
197}
198
199static struct pnfs_commit_array *
200pnfs_get_commit_array(struct pnfs_commit_array *array)
201{
202	if (refcount_inc_not_zero(&array->refcount))
203		return array;
204	return NULL;
205}
206
207static void
208pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array)
209{
210	array->lseg = NULL;
211	list_del_init(&array->lseg_list);
212	pnfs_put_commit_array_locked(array);
213}
214
215void
216pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
217		struct pnfs_layout_segment *lseg)
218{
219	struct pnfs_commit_array *array, *tmp;
220
221	list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list)
222		pnfs_remove_and_free_commit_array(array);
223}
224EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg);
225
226void
227pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo)
228{
229	struct pnfs_commit_array *array, *tmp;
230
231	list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list)
232		pnfs_remove_and_free_commit_array(array);
233}
234EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy);
235
236/*
237 * Locks the nfs_page requests for commit and moves them to
238 * @bucket->committing.
239 */
240static int
241pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
242				struct nfs_commit_info *cinfo,
243				int max)
244{
245	struct list_head *src = &bucket->written;
246	struct list_head *dst = &bucket->committing;
247	int ret;
248
249	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
250	ret = nfs_scan_commit_list(src, dst, cinfo, max);
251	if (ret) {
252		cinfo->ds->nwritten -= ret;
253		cinfo->ds->ncommitting += ret;
254	}
255	return ret;
256}
257
258static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo,
259				  struct pnfs_commit_bucket *buckets,
260				  unsigned int nbuckets,
261				  int max)
262{
263	unsigned int i;
264	int rv = 0, cnt;
265
266	for (i = 0; i < nbuckets && max != 0; i++) {
267		cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max);
268		rv += cnt;
269		max -= cnt;
270	}
271	return rv;
272}
273
274/* Move reqs from written to committing lists, returning count
275 * of number moved.
276 */
277int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max)
278{
279	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
280	struct pnfs_commit_array *array;
281	int rv = 0, cnt;
282
283	rcu_read_lock();
284	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
285		if (!array->lseg || !pnfs_get_commit_array(array))
286			continue;
287		rcu_read_unlock();
288		cnt = pnfs_bucket_scan_array(cinfo, array->buckets,
289				array->nbuckets, max);
290		rcu_read_lock();
291		pnfs_put_commit_array(array, cinfo->inode);
292		rv += cnt;
293		max -= cnt;
294		if (!max)
295			break;
296	}
297	rcu_read_unlock();
298	return rv;
299}
300EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
301
302static unsigned int
303pnfs_bucket_recover_commit_reqs(struct list_head *dst,
304			        struct pnfs_commit_bucket *buckets,
305				unsigned int nbuckets,
306				struct nfs_commit_info *cinfo)
307{
308	struct pnfs_commit_bucket *b;
309	struct pnfs_layout_segment *freeme;
310	unsigned int nwritten, ret = 0;
311	unsigned int i;
312
313restart:
314	for (i = 0, b = buckets; i < nbuckets; i++, b++) {
315		nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
316		if (!nwritten)
317			continue;
318		ret += nwritten;
319		freeme = pnfs_free_bucket_lseg(b);
320		if (freeme) {
321			pnfs_put_lseg(freeme);
322			goto restart;
323		}
324	}
325	return ret;
326}
327
328/* Pull everything off the committing lists and dump into @dst.  */
329void pnfs_generic_recover_commit_reqs(struct list_head *dst,
330				      struct nfs_commit_info *cinfo)
331{
332	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
333	struct pnfs_commit_array *array;
334	unsigned int nwritten;
335
336	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
337	rcu_read_lock();
338	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
339		if (!array->lseg || !pnfs_get_commit_array(array))
340			continue;
341		rcu_read_unlock();
342		nwritten = pnfs_bucket_recover_commit_reqs(dst,
343							   array->buckets,
344							   array->nbuckets,
345							   cinfo);
346		rcu_read_lock();
347		pnfs_put_commit_array(array, cinfo->inode);
348		fl_cinfo->nwritten -= nwritten;
349	}
350	rcu_read_unlock();
351}
352EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
353
354static struct nfs_page *
355pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
356			       unsigned int nbuckets, struct folio *folio)
357{
358	struct nfs_page *req;
359	struct pnfs_commit_bucket *b;
360	unsigned int i;
361
362	/* Linearly search the commit lists for each bucket until a matching
363	 * request is found */
364	for (i = 0, b = buckets; i < nbuckets; i++, b++) {
365		list_for_each_entry(req, &b->written, wb_list) {
366			if (nfs_page_to_folio(req) == folio)
367				return req->wb_head;
368		}
369		list_for_each_entry(req, &b->committing, wb_list) {
370			if (nfs_page_to_folio(req) == folio)
371				return req->wb_head;
372		}
373	}
374	return NULL;
375}
376
377/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request
378 *				   for @folio
379 * @cinfo - commit info for current inode
380 * @folio - page to search for matching head request
381 *
382 * Return: the head request if one is found, otherwise %NULL.
383 */
384struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
385						 struct folio *folio)
386{
387	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
388	struct pnfs_commit_array *array;
389	struct nfs_page *req;
390
391	list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) {
392		req = pnfs_bucket_search_commit_reqs(array->buckets,
393						     array->nbuckets, folio);
394		if (req)
395			return req;
396	}
397	return NULL;
398}
399EXPORT_SYMBOL_GPL(pnfs_generic_search_commit_reqs);
400
401static struct pnfs_layout_segment *
402pnfs_bucket_get_committing(struct list_head *head,
403			   struct pnfs_commit_bucket *bucket,
404			   struct nfs_commit_info *cinfo)
405{
406	struct pnfs_layout_segment *lseg;
407	struct list_head *pos;
408
409	list_for_each(pos, &bucket->committing)
410		cinfo->ds->ncommitting--;
411	list_splice_init(&bucket->committing, head);
412	lseg = pnfs_free_bucket_lseg(bucket);
413	if (!lseg)
414		lseg = pnfs_get_lseg(bucket->lseg);
415	return lseg;
416}
417
418static struct nfs_commit_data *
419pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
420			     struct nfs_commit_info *cinfo)
421{
422	struct nfs_commit_data *data = nfs_commitdata_alloc();
423
424	if (!data)
425		return NULL;
426	data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
427	return data;
428}
429
430static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets,
431				      unsigned int nbuckets,
432				      struct nfs_commit_info *cinfo,
433				      unsigned int idx)
434{
435	struct pnfs_commit_bucket *bucket;
436	struct pnfs_layout_segment *freeme;
437	LIST_HEAD(pages);
438
439	for (bucket = buckets; idx < nbuckets; bucket++, idx++) {
440		if (list_empty(&bucket->committing))
441			continue;
442		mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
443		freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo);
444		mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
445		nfs_retry_commit(&pages, freeme, cinfo, idx);
446		pnfs_put_lseg(freeme);
447	}
448}
449
450static unsigned int
451pnfs_bucket_alloc_ds_commits(struct list_head *list,
452			     struct pnfs_commit_bucket *buckets,
453			     unsigned int nbuckets,
454			     struct nfs_commit_info *cinfo)
455{
456	struct pnfs_commit_bucket *bucket;
457	struct nfs_commit_data *data;
458	unsigned int i;
459	unsigned int nreq = 0;
460
461	for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) {
462		if (list_empty(&bucket->committing))
463			continue;
464		mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
465		if (!list_empty(&bucket->committing)) {
466			data = pnfs_bucket_fetch_commitdata(bucket, cinfo);
467			if (!data)
468				goto out_error;
469			data->ds_commit_index = i;
470			list_add_tail(&data->list, list);
471			nreq++;
472		}
473		mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
474	}
475	return nreq;
476out_error:
477	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
478	/* Clean up on error */
479	pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i);
480	return nreq;
481}
482
483static unsigned int
484pnfs_alloc_ds_commits_list(struct list_head *list,
485			   struct pnfs_ds_commit_info *fl_cinfo,
486			   struct nfs_commit_info *cinfo)
487{
488	struct pnfs_commit_array *array;
489	unsigned int ret = 0;
490
491	rcu_read_lock();
492	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
493		if (!array->lseg || !pnfs_get_commit_array(array))
494			continue;
495		rcu_read_unlock();
496		ret += pnfs_bucket_alloc_ds_commits(list, array->buckets,
497				array->nbuckets, cinfo);
498		rcu_read_lock();
499		pnfs_put_commit_array(array, cinfo->inode);
500	}
501	rcu_read_unlock();
502	return ret;
503}
504
505/* This follows nfs_commit_list pretty closely */
506int
507pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
508			     int how, struct nfs_commit_info *cinfo,
509			     int (*initiate_commit)(struct nfs_commit_data *data,
510						    int how))
511{
512	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
513	struct nfs_commit_data *data, *tmp;
514	LIST_HEAD(list);
515	unsigned int nreq = 0;
516
517	if (!list_empty(mds_pages)) {
518		data = nfs_commitdata_alloc();
519		if (!data) {
520			nfs_retry_commit(mds_pages, NULL, cinfo, -1);
521			return -ENOMEM;
522		}
523		data->ds_commit_index = -1;
524		list_splice_init(mds_pages, &data->pages);
525		list_add_tail(&data->list, &list);
526		nreq++;
527	}
528
529	nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo);
530	if (nreq == 0)
531		goto out;
532
533	list_for_each_entry_safe(data, tmp, &list, list) {
534		list_del(&data->list);
535		if (data->ds_commit_index < 0) {
536			nfs_init_commit(data, NULL, NULL, cinfo);
537			nfs_initiate_commit(NFS_CLIENT(inode), data,
538					    NFS_PROTO(data->inode),
539					    data->mds_ops, how,
540					    RPC_TASK_CRED_NOREF);
541		} else {
542			nfs_init_commit(data, NULL, data->lseg, cinfo);
543			initiate_commit(data, how);
544		}
545	}
546out:
547	return PNFS_ATTEMPTED;
548}
549EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
550
551/*
552 * Data server cache
553 *
554 * Data servers can be mapped to different device ids.
555 * nfs4_pnfs_ds reference counting
556 *   - set to 1 on allocation
557 *   - incremented when a device id maps a data server already in the cache.
558 *   - decremented when deviceid is removed from the cache.
559 */
560static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
561static LIST_HEAD(nfs4_data_server_cache);
562
563/* Debug routines */
564static void
565print_ds(struct nfs4_pnfs_ds *ds)
566{
567	if (ds == NULL) {
568		printk(KERN_WARNING "%s NULL device\n", __func__);
569		return;
570	}
571	printk(KERN_WARNING "        ds %s\n"
572		"        ref count %d\n"
573		"        client %p\n"
574		"        cl_exchange_flags %x\n",
575		ds->ds_remotestr,
576		refcount_read(&ds->ds_count), ds->ds_clp,
577		ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
578}
579
580static bool
581same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
582{
583	struct sockaddr_in *a, *b;
584	struct sockaddr_in6 *a6, *b6;
585
586	if (addr1->sa_family != addr2->sa_family)
587		return false;
588
589	switch (addr1->sa_family) {
590	case AF_INET:
591		a = (struct sockaddr_in *)addr1;
592		b = (struct sockaddr_in *)addr2;
593
594		if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
595		    a->sin_port == b->sin_port)
596			return true;
597		break;
598
599	case AF_INET6:
600		a6 = (struct sockaddr_in6 *)addr1;
601		b6 = (struct sockaddr_in6 *)addr2;
602
603		/* LINKLOCAL addresses must have matching scope_id */
604		if (ipv6_addr_src_scope(&a6->sin6_addr) ==
605		    IPV6_ADDR_SCOPE_LINKLOCAL &&
606		    a6->sin6_scope_id != b6->sin6_scope_id)
607			return false;
608
609		if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
610		    a6->sin6_port == b6->sin6_port)
611			return true;
612		break;
613
614	default:
615		dprintk("%s: unhandled address family: %u\n",
616			__func__, addr1->sa_family);
617		return false;
618	}
619
620	return false;
621}
622
623/*
624 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
625 * declare a match.
626 */
627static bool
628_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
629			       const struct list_head *dsaddrs2)
630{
631	struct nfs4_pnfs_ds_addr *da1, *da2;
632	struct sockaddr *sa1, *sa2;
633	bool match = false;
634
635	list_for_each_entry(da1, dsaddrs1, da_node) {
636		sa1 = (struct sockaddr *)&da1->da_addr;
637		match = false;
638		list_for_each_entry(da2, dsaddrs2, da_node) {
639			sa2 = (struct sockaddr *)&da2->da_addr;
640			match = same_sockaddr(sa1, sa2);
641			if (match)
642				break;
643		}
644		if (!match)
645			break;
646	}
647	return match;
648}
649
650/*
651 * Lookup DS by addresses.  nfs4_ds_cache_lock is held
652 */
653static struct nfs4_pnfs_ds *
654_data_server_lookup_locked(const struct list_head *dsaddrs)
655{
656	struct nfs4_pnfs_ds *ds;
657
658	list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
659		if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
660			return ds;
661	return NULL;
662}
663
664static struct nfs4_pnfs_ds_addr *nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)
665{
666	struct nfs4_pnfs_ds_addr *da = kzalloc(sizeof(*da), gfp_flags);
667	if (da)
668		INIT_LIST_HEAD(&da->da_node);
669	return da;
670}
671
672static void nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr *da)
673{
674	kfree(da->da_remotestr);
675	kfree(da->da_netid);
676	kfree(da);
677}
678
679static void destroy_ds(struct nfs4_pnfs_ds *ds)
680{
681	struct nfs4_pnfs_ds_addr *da;
682
683	dprintk("--> %s\n", __func__);
684	ifdebug(FACILITY)
685		print_ds(ds);
686
687	nfs_put_client(ds->ds_clp);
688
689	while (!list_empty(&ds->ds_addrs)) {
690		da = list_first_entry(&ds->ds_addrs,
691				      struct nfs4_pnfs_ds_addr,
692				      da_node);
693		list_del_init(&da->da_node);
694		nfs4_pnfs_ds_addr_free(da);
695	}
696
697	kfree(ds->ds_remotestr);
698	kfree(ds);
699}
700
701void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
702{
703	if (refcount_dec_and_lock(&ds->ds_count,
704				&nfs4_ds_cache_lock)) {
705		list_del_init(&ds->ds_node);
706		spin_unlock(&nfs4_ds_cache_lock);
707		destroy_ds(ds);
708	}
709}
710EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
711
712/*
713 * Create a string with a human readable address and port to avoid
714 * complicated setup around many dprinks.
715 */
716static char *
717nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
718{
719	struct nfs4_pnfs_ds_addr *da;
720	char *remotestr;
721	size_t len;
722	char *p;
723
724	len = 3;        /* '{', '}' and eol */
725	list_for_each_entry(da, dsaddrs, da_node) {
726		len += strlen(da->da_remotestr) + 1;    /* string plus comma */
727	}
728
729	remotestr = kzalloc(len, gfp_flags);
730	if (!remotestr)
731		return NULL;
732
733	p = remotestr;
734	*(p++) = '{';
735	len--;
736	list_for_each_entry(da, dsaddrs, da_node) {
737		size_t ll = strlen(da->da_remotestr);
738
739		if (ll > len)
740			goto out_err;
741
742		memcpy(p, da->da_remotestr, ll);
743		p += ll;
744		len -= ll;
745
746		if (len < 1)
747			goto out_err;
748		(*p++) = ',';
749		len--;
750	}
751	if (len < 2)
752		goto out_err;
753	*(p++) = '}';
754	*p = '\0';
755	return remotestr;
756out_err:
757	kfree(remotestr);
758	return NULL;
759}
760
761/*
762 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
763 * uncached and return cached struct nfs4_pnfs_ds.
764 */
765struct nfs4_pnfs_ds *
766nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
767{
768	struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
769	char *remotestr;
770
771	if (list_empty(dsaddrs)) {
772		dprintk("%s: no addresses defined\n", __func__);
773		goto out;
774	}
775
776	ds = kzalloc(sizeof(*ds), gfp_flags);
777	if (!ds)
778		goto out;
779
780	/* this is only used for debugging, so it's ok if its NULL */
781	remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
782
783	spin_lock(&nfs4_ds_cache_lock);
784	tmp_ds = _data_server_lookup_locked(dsaddrs);
785	if (tmp_ds == NULL) {
786		INIT_LIST_HEAD(&ds->ds_addrs);
787		list_splice_init(dsaddrs, &ds->ds_addrs);
788		ds->ds_remotestr = remotestr;
789		refcount_set(&ds->ds_count, 1);
790		INIT_LIST_HEAD(&ds->ds_node);
791		ds->ds_clp = NULL;
792		list_add(&ds->ds_node, &nfs4_data_server_cache);
793		dprintk("%s add new data server %s\n", __func__,
794			ds->ds_remotestr);
795	} else {
796		kfree(remotestr);
797		kfree(ds);
798		refcount_inc(&tmp_ds->ds_count);
799		dprintk("%s data server %s found, inc'ed ds_count to %d\n",
800			__func__, tmp_ds->ds_remotestr,
801			refcount_read(&tmp_ds->ds_count));
802		ds = tmp_ds;
803	}
804	spin_unlock(&nfs4_ds_cache_lock);
805out:
806	return ds;
807}
808EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
809
810static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
811{
812	might_sleep();
813	return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
814}
815
816static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
817{
818	smp_mb__before_atomic();
819	clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
820}
821
822static struct nfs_client *(*get_v3_ds_connect)(
823			struct nfs_server *mds_srv,
824			const struct sockaddr_storage *ds_addr,
825			int ds_addrlen,
826			int ds_proto,
827			unsigned int ds_timeo,
828			unsigned int ds_retrans);
829
830static bool load_v3_ds_connect(void)
831{
832	if (!get_v3_ds_connect) {
833		get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
834		WARN_ON_ONCE(!get_v3_ds_connect);
835	}
836
837	return(get_v3_ds_connect != NULL);
838}
839
840void nfs4_pnfs_v3_ds_connect_unload(void)
841{
842	if (get_v3_ds_connect) {
843		symbol_put(nfs3_set_ds_client);
844		get_v3_ds_connect = NULL;
845	}
846}
847
848static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
849				 struct nfs4_pnfs_ds *ds,
850				 unsigned int timeo,
851				 unsigned int retrans)
852{
853	struct nfs_client *clp = ERR_PTR(-EIO);
854	struct nfs4_pnfs_ds_addr *da;
855	unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
856	int status = 0;
857
858	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
859
860	if (!load_v3_ds_connect())
861		return -EPROTONOSUPPORT;
862
863	list_for_each_entry(da, &ds->ds_addrs, da_node) {
864		dprintk("%s: DS %s: trying address %s\n",
865			__func__, ds->ds_remotestr, da->da_remotestr);
866
867		if (!IS_ERR(clp)) {
868			struct xprt_create xprt_args = {
869				.ident = da->da_transport,
870				.net = clp->cl_net,
871				.dstaddr = (struct sockaddr *)&da->da_addr,
872				.addrlen = da->da_addrlen,
873				.servername = clp->cl_hostname,
874				.connect_timeout = connect_timeout,
875				.reconnect_timeout = connect_timeout,
876			};
877
878			if (da->da_transport != clp->cl_proto)
879				continue;
880			if (da->da_addr.ss_family != clp->cl_addr.ss_family)
881				continue;
882			/* Add this address as an alias */
883			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
884					rpc_clnt_test_and_add_xprt, NULL);
885			continue;
886		}
887		clp = get_v3_ds_connect(mds_srv,
888				&da->da_addr,
889				da->da_addrlen, da->da_transport,
890				timeo, retrans);
891		if (IS_ERR(clp))
892			continue;
893		clp->cl_rpcclient->cl_softerr = 0;
894		clp->cl_rpcclient->cl_softrtry = 0;
895	}
896
897	if (IS_ERR(clp)) {
898		status = PTR_ERR(clp);
899		goto out;
900	}
901
902	smp_wmb();
903	WRITE_ONCE(ds->ds_clp, clp);
904	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
905out:
906	return status;
907}
908
909static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
910				 struct nfs4_pnfs_ds *ds,
911				 unsigned int timeo,
912				 unsigned int retrans,
913				 u32 minor_version)
914{
915	struct nfs_client *clp = ERR_PTR(-EIO);
916	struct nfs4_pnfs_ds_addr *da;
917	int status = 0;
918
919	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
920
921	list_for_each_entry(da, &ds->ds_addrs, da_node) {
922		char servername[48];
923
924		dprintk("%s: DS %s: trying address %s\n",
925			__func__, ds->ds_remotestr, da->da_remotestr);
926
927		if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
928			struct xprt_create xprt_args = {
929				.ident = da->da_transport,
930				.net = clp->cl_net,
931				.dstaddr = (struct sockaddr *)&da->da_addr,
932				.addrlen = da->da_addrlen,
933				.servername = clp->cl_hostname,
934				.xprtsec = clp->cl_xprtsec,
935			};
936			struct nfs4_add_xprt_data xprtdata = {
937				.clp = clp,
938			};
939			struct rpc_add_xprt_test rpcdata = {
940				.add_xprt_test = clp->cl_mvops->session_trunk,
941				.data = &xprtdata,
942			};
943
944			if (da->da_transport != clp->cl_proto &&
945					clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
946				continue;
947			if (da->da_transport == XPRT_TRANSPORT_TCP &&
948				mds_srv->nfs_client->cl_proto ==
949					XPRT_TRANSPORT_TCP_TLS) {
950				struct sockaddr *addr =
951					(struct sockaddr *)&da->da_addr;
952				struct sockaddr_in *sin =
953					(struct sockaddr_in *)&da->da_addr;
954				struct sockaddr_in6 *sin6 =
955					(struct sockaddr_in6 *)&da->da_addr;
956
957				/* for NFS with TLS we need to supply a correct
958				 * servername of the trunked transport, not the
959				 * servername of the main transport stored in
960				 * clp->cl_hostname. And set the protocol to
961				 * indicate to use TLS
962				 */
963				servername[0] = '\0';
964				switch(addr->sa_family) {
965				case AF_INET:
966					snprintf(servername, sizeof(servername),
967						"%pI4", &sin->sin_addr.s_addr);
968					break;
969				case AF_INET6:
970					snprintf(servername, sizeof(servername),
971						"%pI6", &sin6->sin6_addr);
972					break;
973				default:
974					/* do not consider this address */
975					continue;
976				}
977				xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
978				xprt_args.servername = servername;
979			}
980			if (da->da_addr.ss_family != clp->cl_addr.ss_family)
981				continue;
982
983			/**
984			* Test this address for session trunking and
985			* add as an alias
986			*/
987			xprtdata.cred = nfs4_get_clid_cred(clp);
988			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
989					  rpc_clnt_setup_test_and_add_xprt,
990					  &rpcdata);
991			if (xprtdata.cred)
992				put_cred(xprtdata.cred);
993		} else {
994			if (da->da_transport == XPRT_TRANSPORT_TCP &&
995				mds_srv->nfs_client->cl_proto ==
996					XPRT_TRANSPORT_TCP_TLS)
997				da->da_transport = XPRT_TRANSPORT_TCP_TLS;
998			clp = nfs4_set_ds_client(mds_srv,
999						&da->da_addr,
1000						da->da_addrlen,
1001						da->da_transport, timeo,
1002						retrans, minor_version);
1003			if (IS_ERR(clp))
1004				continue;
1005
1006			status = nfs4_init_ds_session(clp,
1007					mds_srv->nfs_client->cl_lease_time);
1008			if (status) {
1009				nfs_put_client(clp);
1010				clp = ERR_PTR(-EIO);
1011				continue;
1012			}
1013
1014		}
1015	}
1016
1017	if (IS_ERR(clp)) {
1018		status = PTR_ERR(clp);
1019		goto out;
1020	}
1021
1022	smp_wmb();
1023	WRITE_ONCE(ds->ds_clp, clp);
1024	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
1025out:
1026	return status;
1027}
1028
1029/*
1030 * Create an rpc connection to the nfs4_pnfs_ds data server.
1031 * Currently only supports IPv4 and IPv6 addresses.
1032 * If connection fails, make devid unavailable and return a -errno.
1033 */
1034int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
1035			  struct nfs4_deviceid_node *devid, unsigned int timeo,
1036			  unsigned int retrans, u32 version, u32 minor_version)
1037{
1038	int err;
1039
1040	do {
1041		err = nfs4_wait_ds_connect(ds);
1042		if (err || ds->ds_clp)
1043			goto out;
1044		if (nfs4_test_deviceid_unavailable(devid))
1045			return -ENODEV;
1046	} while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
1047
1048	if (ds->ds_clp)
1049		goto connect_done;
1050
1051	switch (version) {
1052	case 3:
1053		err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
1054		break;
1055	case 4:
1056		err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
1057					       minor_version);
1058		break;
1059	default:
1060		dprintk("%s: unsupported DS version %d\n", __func__, version);
1061		err = -EPROTONOSUPPORT;
1062	}
1063
1064connect_done:
1065	nfs4_clear_ds_conn_bit(ds);
1066out:
1067	/*
1068	 * At this point the ds->ds_clp should be ready, but it might have
1069	 * hit an error.
1070	 */
1071	if (!err) {
1072		if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
1073			WARN_ON_ONCE(ds->ds_clp ||
1074				!nfs4_test_deviceid_unavailable(devid));
1075			return -EINVAL;
1076		}
1077		err = nfs_client_init_status(ds->ds_clp);
1078	}
1079
1080	return err;
1081}
1082EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
1083
1084/*
1085 * Currently only supports ipv4, ipv6 and one multi-path address.
1086 */
1087struct nfs4_pnfs_ds_addr *
1088nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
1089{
1090	struct nfs4_pnfs_ds_addr *da = NULL;
1091	char *buf, *portstr;
1092	__be16 port;
1093	ssize_t nlen, rlen;
1094	int tmp[2];
1095	char *netid;
1096	size_t len;
1097	char *startsep = "";
1098	char *endsep = "";
1099
1100
1101	/* r_netid */
1102	nlen = xdr_stream_decode_string_dup(xdr, &netid, XDR_MAX_NETOBJ,
1103					    gfp_flags);
1104	if (unlikely(nlen < 0))
1105		goto out_err;
1106
1107	/* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
1108	/* port is ".ABC.DEF", 8 chars max */
1109	rlen = xdr_stream_decode_string_dup(xdr, &buf, INET6_ADDRSTRLEN +
1110					    IPV6_SCOPE_ID_LEN + 8, gfp_flags);
1111	if (unlikely(rlen < 0))
1112		goto out_free_netid;
1113
1114	/* replace port '.' with '-' */
1115	portstr = strrchr(buf, '.');
1116	if (!portstr) {
1117		dprintk("%s: Failed finding expected dot in port\n",
1118			__func__);
1119		goto out_free_buf;
1120	}
1121	*portstr = '-';
1122
1123	/* find '.' between address and port */
1124	portstr = strrchr(buf, '.');
1125	if (!portstr) {
1126		dprintk("%s: Failed finding expected dot between address and "
1127			"port\n", __func__);
1128		goto out_free_buf;
1129	}
1130	*portstr = '\0';
1131
1132	da = nfs4_pnfs_ds_addr_alloc(gfp_flags);
1133	if (unlikely(!da))
1134		goto out_free_buf;
1135
1136	if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
1137		      sizeof(da->da_addr))) {
1138		dprintk("%s: error parsing address %s\n", __func__, buf);
1139		goto out_free_da;
1140	}
1141
1142	portstr++;
1143	sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
1144	port = htons((tmp[0] << 8) | (tmp[1]));
1145
1146	switch (da->da_addr.ss_family) {
1147	case AF_INET:
1148		((struct sockaddr_in *)&da->da_addr)->sin_port = port;
1149		da->da_addrlen = sizeof(struct sockaddr_in);
1150		break;
1151
1152	case AF_INET6:
1153		((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
1154		da->da_addrlen = sizeof(struct sockaddr_in6);
1155		startsep = "[";
1156		endsep = "]";
1157		break;
1158
1159	default:
1160		dprintk("%s: unsupported address family: %u\n",
1161			__func__, da->da_addr.ss_family);
1162		goto out_free_da;
1163	}
1164
1165	da->da_transport = xprt_find_transport_ident(netid);
1166	if (da->da_transport < 0) {
1167		dprintk("%s: ERROR: unknown r_netid \"%s\"\n",
1168			__func__, netid);
1169		goto out_free_da;
1170	}
1171
1172	da->da_netid = netid;
1173
1174	/* save human readable address */
1175	len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
1176	da->da_remotestr = kzalloc(len, gfp_flags);
1177
1178	/* NULL is ok, only used for dprintk */
1179	if (da->da_remotestr)
1180		snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
1181			 buf, endsep, ntohs(port));
1182
1183	dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
1184	kfree(buf);
1185	return da;
1186
1187out_free_da:
1188	kfree(da);
1189out_free_buf:
1190	dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
1191	kfree(buf);
1192out_free_netid:
1193	kfree(netid);
1194out_err:
1195	return NULL;
1196}
1197EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
1198
1199void
1200pnfs_layout_mark_request_commit(struct nfs_page *req,
1201				struct pnfs_layout_segment *lseg,
1202				struct nfs_commit_info *cinfo,
1203				u32 ds_commit_idx)
1204{
1205	struct list_head *list;
1206	struct pnfs_commit_array *array;
1207	struct pnfs_commit_bucket *bucket;
1208
1209	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1210	array = pnfs_lookup_commit_array(cinfo->ds, lseg);
1211	if (!array || !pnfs_is_valid_lseg(lseg))
1212		goto out_resched;
1213	bucket = &array->buckets[ds_commit_idx];
1214	list = &bucket->written;
1215	/* Non-empty buckets hold a reference on the lseg.  That ref
1216	 * is normally transferred to the COMMIT call and released
1217	 * there.  It could also be released if the last req is pulled
1218	 * off due to a rewrite, in which case it will be done in
1219	 * pnfs_common_clear_request_commit
1220	 */
1221	if (!bucket->lseg)
1222		bucket->lseg = pnfs_get_lseg(lseg);
1223	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1224	cinfo->ds->nwritten++;
1225
1226	nfs_request_add_commit_list_locked(req, list, cinfo);
1227	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1228	nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
1229	return;
1230out_resched:
1231	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1232	cinfo->completion_ops->resched_write(cinfo, req);
1233}
1234EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
1235
1236int
1237pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
1238{
1239	int ret;
1240
1241	if (!pnfs_layoutcommit_outstanding(inode))
1242		return 0;
1243	ret = nfs_commit_inode(inode, FLUSH_SYNC);
1244	if (ret < 0)
1245		return ret;
1246	if (datasync)
1247		return 0;
1248	return pnfs_layoutcommit_inode(inode, true);
1249}
1250EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
1251
1252