1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/lockd/clntproc.c
4 *
5 * RPC procedures for the client side NLM implementation
6 *
7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/fs.h>
15#include <linux/filelock.h>
16#include <linux/nfs_fs.h>
17#include <linux/utsname.h>
18#include <linux/freezer.h>
19#include <linux/sunrpc/clnt.h>
20#include <linux/sunrpc/svc.h>
21#include <linux/lockd/lockd.h>
22
23#include "trace.h"
24
25#define NLMDBG_FACILITY		NLMDBG_CLIENT
26#define NLMCLNT_GRACE_WAIT	(5*HZ)
27#define NLMCLNT_POLL_TIMEOUT	(30*HZ)
28#define NLMCLNT_MAX_RETRIES	3
29
30static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
31static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
32static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
33static int	nlm_stat_to_errno(__be32 stat);
34static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
35static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
36
37static const struct rpc_call_ops nlmclnt_unlock_ops;
38static const struct rpc_call_ops nlmclnt_cancel_ops;
39
40/*
41 * Cookie counter for NLM requests
42 */
43static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
44
45void nlmclnt_next_cookie(struct nlm_cookie *c)
46{
47	u32	cookie = atomic_inc_return(&nlm_cookie);
48
49	memcpy(c->data, &cookie, 4);
50	c->len=4;
51}
52
53static struct nlm_lockowner *
54nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
55{
56	refcount_inc(&lockowner->count);
57	return lockowner;
58}
59
60static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
61{
62	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
63		return;
64	list_del(&lockowner->list);
65	spin_unlock(&lockowner->host->h_lock);
66	nlmclnt_release_host(lockowner->host);
67	kfree(lockowner);
68}
69
70static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
71{
72	struct nlm_lockowner *lockowner;
73	list_for_each_entry(lockowner, &host->h_lockowners, list) {
74		if (lockowner->pid == pid)
75			return -EBUSY;
76	}
77	return 0;
78}
79
80static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
81{
82	uint32_t res;
83	do {
84		res = host->h_pidcount++;
85	} while (nlm_pidbusy(host, res) < 0);
86	return res;
87}
88
89static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
90{
91	struct nlm_lockowner *lockowner;
92	list_for_each_entry(lockowner, &host->h_lockowners, list) {
93		if (lockowner->owner != owner)
94			continue;
95		return nlmclnt_get_lockowner(lockowner);
96	}
97	return NULL;
98}
99
100static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
101{
102	struct nlm_lockowner *res, *new = NULL;
103
104	spin_lock(&host->h_lock);
105	res = __nlmclnt_find_lockowner(host, owner);
106	if (res == NULL) {
107		spin_unlock(&host->h_lock);
108		new = kmalloc(sizeof(*new), GFP_KERNEL);
109		spin_lock(&host->h_lock);
110		res = __nlmclnt_find_lockowner(host, owner);
111		if (res == NULL && new != NULL) {
112			res = new;
113			refcount_set(&new->count, 1);
114			new->owner = owner;
115			new->pid = __nlm_alloc_pid(host);
116			new->host = nlm_get_host(host);
117			list_add(&new->list, &host->h_lockowners);
118			new = NULL;
119		}
120	}
121	spin_unlock(&host->h_lock);
122	kfree(new);
123	return res;
124}
125
126/*
127 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
128 */
129static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
130{
131	struct nlm_args	*argp = &req->a_args;
132	struct nlm_lock	*lock = &argp->lock;
133	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
134
135	nlmclnt_next_cookie(&argp->cookie);
136	memcpy(&lock->fh, NFS_FH(file_inode(fl->c.flc_file)),
137	       sizeof(struct nfs_fh));
138	lock->caller  = nodename;
139	lock->oh.data = req->a_owner;
140	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
141				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
142				nodename);
143	lock->svid = fl->fl_u.nfs_fl.owner->pid;
144	lock->fl.fl_start = fl->fl_start;
145	lock->fl.fl_end = fl->fl_end;
146	lock->fl.c.flc_type = fl->c.flc_type;
147}
148
149static void nlmclnt_release_lockargs(struct nlm_rqst *req)
150{
151	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
152}
153
154/**
155 * nlmclnt_proc - Perform a single client-side lock request
156 * @host: address of a valid nlm_host context representing the NLM server
157 * @cmd: fcntl-style file lock operation to perform
158 * @fl: address of arguments for the lock operation
159 * @data: address of data to be sent to callback operations
160 *
161 */
162int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
163{
164	struct nlm_rqst		*call;
165	int			status;
166	const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
167
168	call = nlm_alloc_call(host);
169	if (call == NULL)
170		return -ENOMEM;
171
172	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
173		nlmclnt_ops->nlmclnt_alloc_call(data);
174
175	nlmclnt_locks_init_private(fl, host);
176	if (!fl->fl_u.nfs_fl.owner) {
177		/* lockowner allocation has failed */
178		nlmclnt_release_call(call);
179		return -ENOMEM;
180	}
181	/* Set up the argument struct */
182	nlmclnt_setlockargs(call, fl);
183	call->a_callback_data = data;
184
185	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
186		if (fl->c.flc_type != F_UNLCK) {
187			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
188			status = nlmclnt_lock(call, fl);
189		} else
190			status = nlmclnt_unlock(call, fl);
191	} else if (IS_GETLK(cmd))
192		status = nlmclnt_test(call, fl);
193	else
194		status = -EINVAL;
195	fl->fl_ops->fl_release_private(fl);
196	fl->fl_ops = NULL;
197
198	dprintk("lockd: clnt proc returns %d\n", status);
199	return status;
200}
201EXPORT_SYMBOL_GPL(nlmclnt_proc);
202
203/*
204 * Allocate an NLM RPC call struct
205 */
206struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
207{
208	struct nlm_rqst	*call;
209
210	for(;;) {
211		call = kzalloc(sizeof(*call), GFP_KERNEL);
212		if (call != NULL) {
213			refcount_set(&call->a_count, 1);
214			locks_init_lock(&call->a_args.lock.fl);
215			locks_init_lock(&call->a_res.lock.fl);
216			call->a_host = nlm_get_host(host);
217			return call;
218		}
219		if (signalled())
220			break;
221		printk("nlm_alloc_call: failed, waiting for memory\n");
222		schedule_timeout_interruptible(5*HZ);
223	}
224	return NULL;
225}
226
227void nlmclnt_release_call(struct nlm_rqst *call)
228{
229	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
230
231	if (!refcount_dec_and_test(&call->a_count))
232		return;
233	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
234		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
235	nlmclnt_release_host(call->a_host);
236	nlmclnt_release_lockargs(call);
237	kfree(call);
238}
239
240static void nlmclnt_rpc_release(void *data)
241{
242	nlmclnt_release_call(data);
243}
244
245static int nlm_wait_on_grace(wait_queue_head_t *queue)
246{
247	DEFINE_WAIT(wait);
248	int status = -EINTR;
249
250	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
251	if (!signalled ()) {
252		schedule_timeout(NLMCLNT_GRACE_WAIT);
253		try_to_freeze();
254		if (!signalled ())
255			status = 0;
256	}
257	finish_wait(queue, &wait);
258	return status;
259}
260
261/*
262 * Generic NLM call
263 */
264static int
265nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
266{
267	struct nlm_host	*host = req->a_host;
268	struct rpc_clnt	*clnt;
269	struct nlm_args	*argp = &req->a_args;
270	struct nlm_res	*resp = &req->a_res;
271	struct rpc_message msg = {
272		.rpc_argp	= argp,
273		.rpc_resp	= resp,
274		.rpc_cred	= cred,
275	};
276	int		status;
277
278	dprintk("lockd: call procedure %d on %s\n",
279			(int)proc, host->h_name);
280
281	do {
282		if (host->h_reclaiming && !argp->reclaim)
283			goto in_grace_period;
284
285		/* If we have no RPC client yet, create one. */
286		if ((clnt = nlm_bind_host(host)) == NULL)
287			return -ENOLCK;
288		msg.rpc_proc = &clnt->cl_procinfo[proc];
289
290		/* Perform the RPC call. If an error occurs, try again */
291		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
292			dprintk("lockd: rpc_call returned error %d\n", -status);
293			switch (status) {
294			case -EPROTONOSUPPORT:
295				status = -EINVAL;
296				break;
297			case -ECONNREFUSED:
298			case -ETIMEDOUT:
299			case -ENOTCONN:
300				nlm_rebind_host(host);
301				status = -EAGAIN;
302				break;
303			case -ERESTARTSYS:
304				return signalled () ? -EINTR : status;
305			default:
306				break;
307			}
308			break;
309		} else
310		if (resp->status == nlm_lck_denied_grace_period) {
311			dprintk("lockd: server in grace period\n");
312			if (argp->reclaim) {
313				printk(KERN_WARNING
314				     "lockd: spurious grace period reject?!\n");
315				return -ENOLCK;
316			}
317		} else {
318			if (!argp->reclaim) {
319				/* We appear to be out of the grace period */
320				wake_up_all(&host->h_gracewait);
321			}
322			dprintk("lockd: server returns status %d\n",
323				ntohl(resp->status));
324			return 0;	/* Okay, call complete */
325		}
326
327in_grace_period:
328		/*
329		 * The server has rebooted and appears to be in the grace
330		 * period during which locks are only allowed to be
331		 * reclaimed.
332		 * We can only back off and try again later.
333		 */
334		status = nlm_wait_on_grace(&host->h_gracewait);
335	} while (status == 0);
336
337	return status;
338}
339
340/*
341 * Generic NLM call, async version.
342 */
343static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
344{
345	struct nlm_host	*host = req->a_host;
346	struct rpc_clnt	*clnt;
347	struct rpc_task_setup task_setup_data = {
348		.rpc_message = msg,
349		.callback_ops = tk_ops,
350		.callback_data = req,
351		.flags = RPC_TASK_ASYNC,
352	};
353
354	dprintk("lockd: call procedure %d on %s (async)\n",
355			(int)proc, host->h_name);
356
357	/* If we have no RPC client yet, create one. */
358	clnt = nlm_bind_host(host);
359	if (clnt == NULL)
360		goto out_err;
361	msg->rpc_proc = &clnt->cl_procinfo[proc];
362	task_setup_data.rpc_client = clnt;
363
364        /* bootstrap and kick off the async RPC call */
365	return rpc_run_task(&task_setup_data);
366out_err:
367	tk_ops->rpc_release(req);
368	return ERR_PTR(-ENOLCK);
369}
370
371static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
372{
373	struct rpc_task *task;
374
375	task = __nlm_async_call(req, proc, msg, tk_ops);
376	if (IS_ERR(task))
377		return PTR_ERR(task);
378	rpc_put_task(task);
379	return 0;
380}
381
382/*
383 * NLM asynchronous call.
384 */
385int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
386{
387	struct rpc_message msg = {
388		.rpc_argp	= &req->a_args,
389		.rpc_resp	= &req->a_res,
390	};
391	return nlm_do_async_call(req, proc, &msg, tk_ops);
392}
393
394int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
395{
396	struct rpc_message msg = {
397		.rpc_argp	= &req->a_res,
398	};
399	return nlm_do_async_call(req, proc, &msg, tk_ops);
400}
401
402/*
403 * NLM client asynchronous call.
404 *
405 * Note that although the calls are asynchronous, and are therefore
406 *      guaranteed to complete, we still always attempt to wait for
407 *      completion in order to be able to correctly track the lock
408 *      state.
409 */
410static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
411{
412	struct rpc_message msg = {
413		.rpc_argp	= &req->a_args,
414		.rpc_resp	= &req->a_res,
415		.rpc_cred	= cred,
416	};
417	struct rpc_task *task;
418	int err;
419
420	task = __nlm_async_call(req, proc, &msg, tk_ops);
421	if (IS_ERR(task))
422		return PTR_ERR(task);
423	err = rpc_wait_for_completion_task(task);
424	rpc_put_task(task);
425	return err;
426}
427
428/*
429 * TEST for the presence of a conflicting lock
430 */
431static int
432nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
433{
434	int	status;
435
436	status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
437			      NLMPROC_TEST);
438	if (status < 0)
439		goto out;
440
441	switch (req->a_res.status) {
442		case nlm_granted:
443			fl->c.flc_type = F_UNLCK;
444			break;
445		case nlm_lck_denied:
446			/*
447			 * Report the conflicting lock back to the application.
448			 */
449			fl->fl_start = req->a_res.lock.fl.fl_start;
450			fl->fl_end = req->a_res.lock.fl.fl_end;
451			fl->c.flc_type = req->a_res.lock.fl.c.flc_type;
452			fl->c.flc_pid = -req->a_res.lock.fl.c.flc_pid;
453			break;
454		default:
455			status = nlm_stat_to_errno(req->a_res.status);
456	}
457out:
458	trace_nlmclnt_test(&req->a_args.lock,
459			   (const struct sockaddr *)&req->a_host->h_addr,
460			   req->a_host->h_addrlen, req->a_res.status);
461	nlmclnt_release_call(req);
462	return status;
463}
464
465static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
466{
467	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
468	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
469	new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
470	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
471	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
472}
473
474static void nlmclnt_locks_release_private(struct file_lock *fl)
475{
476	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
477	list_del(&fl->fl_u.nfs_fl.list);
478	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
479	nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
480}
481
482static const struct file_lock_operations nlmclnt_lock_ops = {
483	.fl_copy_lock = nlmclnt_locks_copy_lock,
484	.fl_release_private = nlmclnt_locks_release_private,
485};
486
487static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
488{
489	fl->fl_u.nfs_fl.state = 0;
490	fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host,
491						       fl->c.flc_owner);
492	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
493	fl->fl_ops = &nlmclnt_lock_ops;
494}
495
496static int do_vfs_lock(struct file_lock *fl)
497{
498	return locks_lock_file_wait(fl->c.flc_file, fl);
499}
500
501/*
502 * LOCK: Try to create a lock
503 *
504 *			Programmer Harassment Alert
505 *
506 * When given a blocking lock request in a sync RPC call, the HPUX lockd
507 * will faithfully return LCK_BLOCKED but never cares to notify us when
508 * the lock could be granted. This way, our local process could hang
509 * around forever waiting for the callback.
510 *
511 *  Solution A:	Implement busy-waiting
512 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
513 *
514 * For now I am implementing solution A, because I hate the idea of
515 * re-implementing lockd for a third time in two months. The async
516 * calls shouldn't be too hard to do, however.
517 *
518 * This is one of the lovely things about standards in the NFS area:
519 * they're so soft and squishy you can't really blame HP for doing this.
520 */
521static int
522nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
523{
524	const struct cred *cred = nfs_file_cred(fl->c.flc_file);
525	struct nlm_host	*host = req->a_host;
526	struct nlm_res	*resp = &req->a_res;
527	struct nlm_wait block;
528	unsigned char flags = fl->c.flc_flags;
529	unsigned char type;
530	__be32 b_status;
531	int status = -ENOLCK;
532
533	if (nsm_monitor(host) < 0)
534		goto out;
535	req->a_args.state = nsm_local_state;
536
537	fl->c.flc_flags |= FL_ACCESS;
538	status = do_vfs_lock(fl);
539	fl->c.flc_flags = flags;
540	if (status < 0)
541		goto out;
542
543	nlmclnt_prepare_block(&block, host, fl);
544again:
545	/*
546	 * Initialise resp->status to a valid non-zero value,
547	 * since 0 == nlm_lck_granted
548	 */
549	resp->status = nlm_lck_blocked;
550
551	/*
552	 * A GRANTED callback can come at any time -- even before the reply
553	 * to the LOCK request arrives, so we queue the wait before
554	 * requesting the lock.
555	 */
556	nlmclnt_queue_block(&block);
557	for (;;) {
558		/* Reboot protection */
559		fl->fl_u.nfs_fl.state = host->h_state;
560		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
561		if (status < 0)
562			break;
563		/* Did a reclaimer thread notify us of a server reboot? */
564		if (resp->status == nlm_lck_denied_grace_period)
565			continue;
566		if (resp->status != nlm_lck_blocked)
567			break;
568		/* Wait on an NLM blocking lock */
569		status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
570		if (status < 0)
571			break;
572		if (block.b_status != nlm_lck_blocked)
573			break;
574	}
575	b_status = nlmclnt_dequeue_block(&block);
576	if (resp->status == nlm_lck_blocked)
577		resp->status = b_status;
578
579	/* if we were interrupted while blocking, then cancel the lock request
580	 * and exit
581	 */
582	if (resp->status == nlm_lck_blocked) {
583		if (!req->a_args.block)
584			goto out_unlock;
585		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
586			goto out;
587	}
588
589	if (resp->status == nlm_granted) {
590		down_read(&host->h_rwsem);
591		/* Check whether or not the server has rebooted */
592		if (fl->fl_u.nfs_fl.state != host->h_state) {
593			up_read(&host->h_rwsem);
594			goto again;
595		}
596		/* Ensure the resulting lock will get added to granted list */
597		fl->c.flc_flags |= FL_SLEEP;
598		if (do_vfs_lock(fl) < 0)
599			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
600		up_read(&host->h_rwsem);
601		fl->c.flc_flags = flags;
602		status = 0;
603	}
604	if (status < 0)
605		goto out_unlock;
606	/*
607	 * EAGAIN doesn't make sense for sleeping locks, and in some
608	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
609	 * turn it into an ENOLCK.
610	 */
611	if (resp->status == nlm_lck_denied && (flags & FL_SLEEP))
612		status = -ENOLCK;
613	else
614		status = nlm_stat_to_errno(resp->status);
615out:
616	trace_nlmclnt_lock(&req->a_args.lock,
617			   (const struct sockaddr *)&req->a_host->h_addr,
618			   req->a_host->h_addrlen, req->a_res.status);
619	nlmclnt_release_call(req);
620	return status;
621out_unlock:
622	/* Fatal error: ensure that we remove the lock altogether */
623	trace_nlmclnt_lock(&req->a_args.lock,
624			   (const struct sockaddr *)&req->a_host->h_addr,
625			   req->a_host->h_addrlen, req->a_res.status);
626	dprintk("lockd: lock attempt ended in fatal error.\n"
627		"       Attempting to unlock.\n");
628	type = fl->c.flc_type;
629	fl->c.flc_type = F_UNLCK;
630	down_read(&host->h_rwsem);
631	do_vfs_lock(fl);
632	up_read(&host->h_rwsem);
633	fl->c.flc_type = type;
634	fl->c.flc_flags = flags;
635	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
636	return status;
637}
638
639/*
640 * RECLAIM: Try to reclaim a lock
641 */
642int
643nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
644		struct nlm_rqst *req)
645{
646	int		status;
647
648	memset(req, 0, sizeof(*req));
649	locks_init_lock(&req->a_args.lock.fl);
650	locks_init_lock(&req->a_res.lock.fl);
651	req->a_host  = host;
652
653	/* Set up the argument struct */
654	nlmclnt_setlockargs(req, fl);
655	req->a_args.reclaim = 1;
656
657	status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
658			      NLMPROC_LOCK);
659	if (status >= 0 && req->a_res.status == nlm_granted)
660		return 0;
661
662	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
663				"(errno %d, status %d)\n",
664				fl->c.flc_pid,
665				status, ntohl(req->a_res.status));
666
667	/*
668	 * FIXME: This is a serious failure. We can
669	 *
670	 *  a.	Ignore the problem
671	 *  b.	Send the owning process some signal (Linux doesn't have
672	 *	SIGLOST, though...)
673	 *  c.	Retry the operation
674	 *
675	 * Until someone comes up with a simple implementation
676	 * for b or c, I'll choose option a.
677	 */
678
679	return -ENOLCK;
680}
681
682/*
683 * UNLOCK: remove an existing lock
684 */
685static int
686nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
687{
688	struct nlm_host	*host = req->a_host;
689	struct nlm_res	*resp = &req->a_res;
690	int status;
691	unsigned char flags = fl->c.flc_flags;
692
693	/*
694	 * Note: the server is supposed to either grant us the unlock
695	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
696	 * case, we want to unlock.
697	 */
698	fl->c.flc_flags |= FL_EXISTS;
699	down_read(&host->h_rwsem);
700	status = do_vfs_lock(fl);
701	up_read(&host->h_rwsem);
702	fl->c.flc_flags = flags;
703	if (status == -ENOENT) {
704		status = 0;
705		goto out;
706	}
707
708	refcount_inc(&req->a_count);
709	status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
710				    NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
711	if (status < 0)
712		goto out;
713
714	if (resp->status == nlm_granted)
715		goto out;
716
717	if (resp->status != nlm_lck_denied_nolocks)
718		printk("lockd: unexpected unlock status: %d\n",
719			ntohl(resp->status));
720	/* What to do now? I'm out of my depth... */
721	status = -ENOLCK;
722out:
723	trace_nlmclnt_unlock(&req->a_args.lock,
724			     (const struct sockaddr *)&req->a_host->h_addr,
725			     req->a_host->h_addrlen, req->a_res.status);
726	nlmclnt_release_call(req);
727	return status;
728}
729
730static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
731{
732	struct nlm_rqst	*req = data;
733	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
734	bool defer_call = false;
735
736	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
737		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
738
739	if (!defer_call)
740		rpc_call_start(task);
741}
742
743static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
744{
745	struct nlm_rqst	*req = data;
746	u32 status = ntohl(req->a_res.status);
747
748	if (RPC_SIGNALLED(task))
749		goto die;
750
751	if (task->tk_status < 0) {
752		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
753		switch (task->tk_status) {
754		case -EACCES:
755		case -EIO:
756			goto die;
757		default:
758			goto retry_rebind;
759		}
760	}
761	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
762		rpc_delay(task, NLMCLNT_GRACE_WAIT);
763		goto retry_unlock;
764	}
765	if (status != NLM_LCK_GRANTED)
766		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
767die:
768	return;
769 retry_rebind:
770	nlm_rebind_host(req->a_host);
771 retry_unlock:
772	rpc_restart_call(task);
773}
774
775static const struct rpc_call_ops nlmclnt_unlock_ops = {
776	.rpc_call_prepare = nlmclnt_unlock_prepare,
777	.rpc_call_done = nlmclnt_unlock_callback,
778	.rpc_release = nlmclnt_rpc_release,
779};
780
781/*
782 * Cancel a blocked lock request.
783 * We always use an async RPC call for this in order not to hang a
784 * process that has been Ctrl-C'ed.
785 */
786static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
787{
788	struct nlm_rqst	*req;
789	int status;
790
791	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
792		"       Attempting to cancel lock.\n");
793
794	req = nlm_alloc_call(host);
795	if (!req)
796		return -ENOMEM;
797	req->a_flags = RPC_TASK_ASYNC;
798
799	nlmclnt_setlockargs(req, fl);
800	req->a_args.block = block;
801
802	refcount_inc(&req->a_count);
803	status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
804				    NLMPROC_CANCEL, &nlmclnt_cancel_ops);
805	if (status == 0 && req->a_res.status == nlm_lck_denied)
806		status = -ENOLCK;
807	nlmclnt_release_call(req);
808	return status;
809}
810
811static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
812{
813	struct nlm_rqst	*req = data;
814	u32 status = ntohl(req->a_res.status);
815
816	if (RPC_SIGNALLED(task))
817		goto die;
818
819	if (task->tk_status < 0) {
820		dprintk("lockd: CANCEL call error %d, retrying.\n",
821					task->tk_status);
822		goto retry_cancel;
823	}
824
825	switch (status) {
826	case NLM_LCK_GRANTED:
827	case NLM_LCK_DENIED_GRACE_PERIOD:
828	case NLM_LCK_DENIED:
829		/* Everything's good */
830		break;
831	case NLM_LCK_DENIED_NOLOCKS:
832		dprintk("lockd: CANCEL failed (server has no locks)\n");
833		goto retry_cancel;
834	default:
835		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
836			status);
837	}
838
839die:
840	return;
841
842retry_cancel:
843	/* Don't ever retry more than 3 times */
844	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
845		goto die;
846	nlm_rebind_host(req->a_host);
847	rpc_restart_call(task);
848	rpc_delay(task, 30 * HZ);
849}
850
851static const struct rpc_call_ops nlmclnt_cancel_ops = {
852	.rpc_call_done = nlmclnt_cancel_callback,
853	.rpc_release = nlmclnt_rpc_release,
854};
855
856/*
857 * Convert an NLM status code to a generic kernel errno
858 */
859static int
860nlm_stat_to_errno(__be32 status)
861{
862	switch(ntohl(status)) {
863	case NLM_LCK_GRANTED:
864		return 0;
865	case NLM_LCK_DENIED:
866		return -EAGAIN;
867	case NLM_LCK_DENIED_NOLOCKS:
868	case NLM_LCK_DENIED_GRACE_PERIOD:
869		return -ENOLCK;
870	case NLM_LCK_BLOCKED:
871		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
872		return -ENOLCK;
873#ifdef CONFIG_LOCKD_V4
874	case NLM_DEADLCK:
875		return -EDEADLK;
876	case NLM_ROFS:
877		return -EROFS;
878	case NLM_STALE_FH:
879		return -ESTALE;
880	case NLM_FBIG:
881		return -EOVERFLOW;
882	case NLM_FAILED:
883		return -ENOLCK;
884#endif
885	}
886	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
887		 ntohl(status));
888	return -ENOLCK;
889}
890