1/*
2 *  fs/nfs/nfs4state.c
3 *
4 *  Client-side XDR for NFSv4.
5 *
6 *  Copyright (c) 2002 The Regents of the University of Michigan.
7 *  All rights reserved.
8 *
9 *  Kendrick Smith <kmsmith@umich.edu>
10 *
11 *  Redistribution and use in source and binary forms, with or without
12 *  modification, are permitted provided that the following conditions
13 *  are met:
14 *
15 *  1. Redistributions of source code must retain the above copyright
16 *     notice, this list of conditions and the following disclaimer.
17 *  2. Redistributions in binary form must reproduce the above copyright
18 *     notice, this list of conditions and the following disclaimer in the
19 *     documentation and/or other materials provided with the distribution.
20 *  3. Neither the name of the University nor the names of its
21 *     contributors may be used to endorse or promote products derived
22 *     from this software without specific prior written permission.
23 *
24 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model.  For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
46#include <linux/kthread.h>
47#include <linux/module.h>
48#include <linux/random.h>
49#include <linux/workqueue.h>
50#include <linux/bitops.h>
51
52#include "nfs4_fs.h"
53#include "callback.h"
54#include "delegation.h"
55#include "internal.h"
56
57#define OPENOWNER_POOL_SIZE	8
58
59const nfs4_stateid zero_stateid;
60
61static LIST_HEAD(nfs4_clientid_list);
62
63int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
64{
65	struct nfs4_setclientid_res clid;
66	unsigned short port;
67	int status;
68
69	port = nfs_callback_tcpport;
70	if (clp->cl_addr.ss_family == AF_INET6)
71		port = nfs_callback_tcpport6;
72
73	status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
74	if (status != 0)
75		goto out;
76	status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
77	if (status != 0)
78		goto out;
79	clp->cl_clientid = clid.clientid;
80	nfs4_schedule_state_renewal(clp);
81out:
82	return status;
83}
84
85struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
86{
87	struct rpc_cred *cred = NULL;
88
89	if (clp->cl_machine_cred != NULL)
90		cred = get_rpccred(clp->cl_machine_cred);
91	return cred;
92}
93
94static void nfs4_clear_machine_cred(struct nfs_client *clp)
95{
96	struct rpc_cred *cred;
97
98	spin_lock(&clp->cl_lock);
99	cred = clp->cl_machine_cred;
100	clp->cl_machine_cred = NULL;
101	spin_unlock(&clp->cl_lock);
102	if (cred != NULL)
103		put_rpccred(cred);
104}
105
106struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
107{
108	struct nfs4_state_owner *sp;
109	struct rb_node *pos;
110	struct rpc_cred *cred = NULL;
111
112	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
113		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
114		if (list_empty(&sp->so_states))
115			continue;
116		cred = get_rpccred(sp->so_cred);
117		break;
118	}
119	return cred;
120}
121
122#if defined(CONFIG_NFS_V4_1)
123
124static int nfs41_setup_state_renewal(struct nfs_client *clp)
125{
126	int status;
127	struct nfs_fsinfo fsinfo;
128
129	status = nfs4_proc_get_lease_time(clp, &fsinfo);
130	if (status == 0) {
131		/* Update lease time and schedule renewal */
132		spin_lock(&clp->cl_lock);
133		clp->cl_lease_time = fsinfo.lease_time * HZ;
134		clp->cl_last_renewal = jiffies;
135		spin_unlock(&clp->cl_lock);
136
137		nfs4_schedule_state_renewal(clp);
138	}
139
140	return status;
141}
142
143static void nfs4_end_drain_session(struct nfs_client *clp)
144{
145	struct nfs4_session *ses = clp->cl_session;
146	int max_slots;
147
148	if (ses == NULL)
149		return;
150	if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
151		spin_lock(&ses->fc_slot_table.slot_tbl_lock);
152		max_slots = ses->fc_slot_table.max_slots;
153		while (max_slots--) {
154			struct rpc_task *task;
155
156			task = rpc_wake_up_next(&ses->fc_slot_table.
157						slot_tbl_waitq);
158			if (!task)
159				break;
160			rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
161		}
162		spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
163	}
164}
165
166static int nfs4_begin_drain_session(struct nfs_client *clp)
167{
168	struct nfs4_session *ses = clp->cl_session;
169	struct nfs4_slot_table *tbl = &ses->fc_slot_table;
170
171	spin_lock(&tbl->slot_tbl_lock);
172	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
173	if (tbl->highest_used_slotid != -1) {
174		INIT_COMPLETION(ses->complete);
175		spin_unlock(&tbl->slot_tbl_lock);
176		return wait_for_completion_interruptible(&ses->complete);
177	}
178	spin_unlock(&tbl->slot_tbl_lock);
179	return 0;
180}
181
182int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
183{
184	int status;
185
186	nfs4_begin_drain_session(clp);
187	status = nfs4_proc_exchange_id(clp, cred);
188	if (status != 0)
189		goto out;
190	status = nfs4_proc_create_session(clp);
191	if (status != 0)
192		goto out;
193	nfs41_setup_state_renewal(clp);
194	nfs_mark_client_ready(clp, NFS_CS_READY);
195out:
196	return status;
197}
198
199struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
200{
201	struct rpc_cred *cred;
202
203	spin_lock(&clp->cl_lock);
204	cred = nfs4_get_machine_cred_locked(clp);
205	spin_unlock(&clp->cl_lock);
206	return cred;
207}
208
209#endif /* CONFIG_NFS_V4_1 */
210
211struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
212{
213	struct nfs4_state_owner *sp;
214	struct rb_node *pos;
215	struct rpc_cred *cred;
216
217	spin_lock(&clp->cl_lock);
218	cred = nfs4_get_machine_cred_locked(clp);
219	if (cred != NULL)
220		goto out;
221	pos = rb_first(&clp->cl_state_owners);
222	if (pos != NULL) {
223		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
224		cred = get_rpccred(sp->so_cred);
225	}
226out:
227	spin_unlock(&clp->cl_lock);
228	return cred;
229}
230
231static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
232		__u64 minval, int maxbits)
233{
234	struct rb_node **p, *parent;
235	struct nfs_unique_id *pos;
236	__u64 mask = ~0ULL;
237
238	if (maxbits < 64)
239		mask = (1ULL << maxbits) - 1ULL;
240
241	/* Ensure distribution is more or less flat */
242	get_random_bytes(&new->id, sizeof(new->id));
243	new->id &= mask;
244	if (new->id < minval)
245		new->id += minval;
246retry:
247	p = &root->rb_node;
248	parent = NULL;
249
250	while (*p != NULL) {
251		parent = *p;
252		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
253
254		if (new->id < pos->id)
255			p = &(*p)->rb_left;
256		else if (new->id > pos->id)
257			p = &(*p)->rb_right;
258		else
259			goto id_exists;
260	}
261	rb_link_node(&new->rb_node, parent, p);
262	rb_insert_color(&new->rb_node, root);
263	return;
264id_exists:
265	for (;;) {
266		new->id++;
267		if (new->id < minval || (new->id & mask) != new->id) {
268			new->id = minval;
269			break;
270		}
271		parent = rb_next(parent);
272		if (parent == NULL)
273			break;
274		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
275		if (new->id < pos->id)
276			break;
277	}
278	goto retry;
279}
280
281static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
282{
283	rb_erase(&id->rb_node, root);
284}
285
286static struct nfs4_state_owner *
287nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
288{
289	struct nfs_client *clp = server->nfs_client;
290	struct rb_node **p = &clp->cl_state_owners.rb_node,
291		       *parent = NULL;
292	struct nfs4_state_owner *sp, *res = NULL;
293
294	while (*p != NULL) {
295		parent = *p;
296		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
297
298		if (server < sp->so_server) {
299			p = &parent->rb_left;
300			continue;
301		}
302		if (server > sp->so_server) {
303			p = &parent->rb_right;
304			continue;
305		}
306		if (cred < sp->so_cred)
307			p = &parent->rb_left;
308		else if (cred > sp->so_cred)
309			p = &parent->rb_right;
310		else {
311			atomic_inc(&sp->so_count);
312			res = sp;
313			break;
314		}
315	}
316	return res;
317}
318
319static struct nfs4_state_owner *
320nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
321{
322	struct rb_node **p = &clp->cl_state_owners.rb_node,
323		       *parent = NULL;
324	struct nfs4_state_owner *sp;
325
326	while (*p != NULL) {
327		parent = *p;
328		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
329
330		if (new->so_server < sp->so_server) {
331			p = &parent->rb_left;
332			continue;
333		}
334		if (new->so_server > sp->so_server) {
335			p = &parent->rb_right;
336			continue;
337		}
338		if (new->so_cred < sp->so_cred)
339			p = &parent->rb_left;
340		else if (new->so_cred > sp->so_cred)
341			p = &parent->rb_right;
342		else {
343			atomic_inc(&sp->so_count);
344			return sp;
345		}
346	}
347	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
348	rb_link_node(&new->so_client_node, parent, p);
349	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
350	return new;
351}
352
353static void
354nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
355{
356	if (!RB_EMPTY_NODE(&sp->so_client_node))
357		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
358	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
359}
360
361/*
362 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
363 * create a new state_owner.
364 *
365 */
366static struct nfs4_state_owner *
367nfs4_alloc_state_owner(void)
368{
369	struct nfs4_state_owner *sp;
370
371	sp = kzalloc(sizeof(*sp),GFP_NOFS);
372	if (!sp)
373		return NULL;
374	spin_lock_init(&sp->so_lock);
375	INIT_LIST_HEAD(&sp->so_states);
376	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
377	sp->so_seqid.sequence = &sp->so_sequence;
378	spin_lock_init(&sp->so_sequence.lock);
379	INIT_LIST_HEAD(&sp->so_sequence.list);
380	atomic_set(&sp->so_count, 1);
381	return sp;
382}
383
384static void
385nfs4_drop_state_owner(struct nfs4_state_owner *sp)
386{
387	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
388		struct nfs_client *clp = sp->so_server->nfs_client;
389
390		spin_lock(&clp->cl_lock);
391		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
392		RB_CLEAR_NODE(&sp->so_client_node);
393		spin_unlock(&clp->cl_lock);
394	}
395}
396
397struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
398{
399	struct nfs_client *clp = server->nfs_client;
400	struct nfs4_state_owner *sp, *new;
401
402	spin_lock(&clp->cl_lock);
403	sp = nfs4_find_state_owner(server, cred);
404	spin_unlock(&clp->cl_lock);
405	if (sp != NULL)
406		return sp;
407	new = nfs4_alloc_state_owner();
408	if (new == NULL)
409		return NULL;
410	new->so_server = server;
411	new->so_cred = cred;
412	spin_lock(&clp->cl_lock);
413	sp = nfs4_insert_state_owner(clp, new);
414	spin_unlock(&clp->cl_lock);
415	if (sp == new)
416		get_rpccred(cred);
417	else {
418		rpc_destroy_wait_queue(&new->so_sequence.wait);
419		kfree(new);
420	}
421	return sp;
422}
423
424void nfs4_put_state_owner(struct nfs4_state_owner *sp)
425{
426	struct nfs_client *clp = sp->so_server->nfs_client;
427	struct rpc_cred *cred = sp->so_cred;
428
429	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
430		return;
431	nfs4_remove_state_owner(clp, sp);
432	spin_unlock(&clp->cl_lock);
433	rpc_destroy_wait_queue(&sp->so_sequence.wait);
434	put_rpccred(cred);
435	kfree(sp);
436}
437
438static struct nfs4_state *
439nfs4_alloc_open_state(void)
440{
441	struct nfs4_state *state;
442
443	state = kzalloc(sizeof(*state), GFP_NOFS);
444	if (!state)
445		return NULL;
446	atomic_set(&state->count, 1);
447	INIT_LIST_HEAD(&state->lock_states);
448	spin_lock_init(&state->state_lock);
449	seqlock_init(&state->seqlock);
450	return state;
451}
452
453void
454nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
455{
456	if (state->state == fmode)
457		return;
458	/* NB! List reordering - see the reclaim code for why.  */
459	if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
460		if (fmode & FMODE_WRITE)
461			list_move(&state->open_states, &state->owner->so_states);
462		else
463			list_move_tail(&state->open_states, &state->owner->so_states);
464	}
465	state->state = fmode;
466}
467
468static struct nfs4_state *
469__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
470{
471	struct nfs_inode *nfsi = NFS_I(inode);
472	struct nfs4_state *state;
473
474	list_for_each_entry(state, &nfsi->open_states, inode_states) {
475		if (state->owner != owner)
476			continue;
477		if (atomic_inc_not_zero(&state->count))
478			return state;
479	}
480	return NULL;
481}
482
483static void
484nfs4_free_open_state(struct nfs4_state *state)
485{
486	kfree(state);
487}
488
489struct nfs4_state *
490nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
491{
492	struct nfs4_state *state, *new;
493	struct nfs_inode *nfsi = NFS_I(inode);
494
495	spin_lock(&inode->i_lock);
496	state = __nfs4_find_state_byowner(inode, owner);
497	spin_unlock(&inode->i_lock);
498	if (state)
499		goto out;
500	new = nfs4_alloc_open_state();
501	spin_lock(&owner->so_lock);
502	spin_lock(&inode->i_lock);
503	state = __nfs4_find_state_byowner(inode, owner);
504	if (state == NULL && new != NULL) {
505		state = new;
506		state->owner = owner;
507		atomic_inc(&owner->so_count);
508		list_add(&state->inode_states, &nfsi->open_states);
509		state->inode = igrab(inode);
510		spin_unlock(&inode->i_lock);
511		/* Note: The reclaim code dictates that we add stateless
512		 * and read-only stateids to the end of the list */
513		list_add_tail(&state->open_states, &owner->so_states);
514		spin_unlock(&owner->so_lock);
515	} else {
516		spin_unlock(&inode->i_lock);
517		spin_unlock(&owner->so_lock);
518		if (new)
519			nfs4_free_open_state(new);
520	}
521out:
522	return state;
523}
524
525void nfs4_put_open_state(struct nfs4_state *state)
526{
527	struct inode *inode = state->inode;
528	struct nfs4_state_owner *owner = state->owner;
529
530	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
531		return;
532	spin_lock(&inode->i_lock);
533	list_del(&state->inode_states);
534	list_del(&state->open_states);
535	spin_unlock(&inode->i_lock);
536	spin_unlock(&owner->so_lock);
537	iput(inode);
538	nfs4_free_open_state(state);
539	nfs4_put_state_owner(owner);
540}
541
542/*
543 * Close the current file.
544 */
545static void __nfs4_close(struct path *path, struct nfs4_state *state,
546		fmode_t fmode, gfp_t gfp_mask, int wait)
547{
548	struct nfs4_state_owner *owner = state->owner;
549	int call_close = 0;
550	fmode_t newstate;
551
552	atomic_inc(&owner->so_count);
553	/* Protect against nfs4_find_state() */
554	spin_lock(&owner->so_lock);
555	switch (fmode & (FMODE_READ | FMODE_WRITE)) {
556		case FMODE_READ:
557			state->n_rdonly--;
558			break;
559		case FMODE_WRITE:
560			state->n_wronly--;
561			break;
562		case FMODE_READ|FMODE_WRITE:
563			state->n_rdwr--;
564	}
565	newstate = FMODE_READ|FMODE_WRITE;
566	if (state->n_rdwr == 0) {
567		if (state->n_rdonly == 0) {
568			newstate &= ~FMODE_READ;
569			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
570			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
571		}
572		if (state->n_wronly == 0) {
573			newstate &= ~FMODE_WRITE;
574			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
575			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
576		}
577		if (newstate == 0)
578			clear_bit(NFS_DELEGATED_STATE, &state->flags);
579	}
580	nfs4_state_set_mode_locked(state, newstate);
581	spin_unlock(&owner->so_lock);
582
583	if (!call_close) {
584		nfs4_put_open_state(state);
585		nfs4_put_state_owner(owner);
586	} else
587		nfs4_do_close(path, state, gfp_mask, wait);
588}
589
590void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
591{
592	__nfs4_close(path, state, fmode, GFP_NOFS, 0);
593}
594
595void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
596{
597	__nfs4_close(path, state, fmode, GFP_KERNEL, 1);
598}
599
600/*
601 * Search the state->lock_states for an existing lock_owner
602 * that is compatible with current->files
603 */
604static struct nfs4_lock_state *
605__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
606{
607	struct nfs4_lock_state *pos;
608	list_for_each_entry(pos, &state->lock_states, ls_locks) {
609		if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
610			continue;
611		switch (pos->ls_owner.lo_type) {
612		case NFS4_POSIX_LOCK_TYPE:
613			if (pos->ls_owner.lo_u.posix_owner != fl_owner)
614				continue;
615			break;
616		case NFS4_FLOCK_LOCK_TYPE:
617			if (pos->ls_owner.lo_u.flock_owner != fl_pid)
618				continue;
619		}
620		atomic_inc(&pos->ls_count);
621		return pos;
622	}
623	return NULL;
624}
625
626/*
627 * Return a compatible lock_state. If no initialized lock_state structure
628 * exists, return an uninitialized one.
629 *
630 */
631static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
632{
633	struct nfs4_lock_state *lsp;
634	struct nfs_client *clp = state->owner->so_server->nfs_client;
635
636	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
637	if (lsp == NULL)
638		return NULL;
639	rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
640	spin_lock_init(&lsp->ls_sequence.lock);
641	INIT_LIST_HEAD(&lsp->ls_sequence.list);
642	lsp->ls_seqid.sequence = &lsp->ls_sequence;
643	atomic_set(&lsp->ls_count, 1);
644	lsp->ls_state = state;
645	lsp->ls_owner.lo_type = type;
646	switch (lsp->ls_owner.lo_type) {
647	case NFS4_FLOCK_LOCK_TYPE:
648		lsp->ls_owner.lo_u.flock_owner = fl_pid;
649		break;
650	case NFS4_POSIX_LOCK_TYPE:
651		lsp->ls_owner.lo_u.posix_owner = fl_owner;
652		break;
653	default:
654		kfree(lsp);
655		return NULL;
656	}
657	spin_lock(&clp->cl_lock);
658	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
659	spin_unlock(&clp->cl_lock);
660	INIT_LIST_HEAD(&lsp->ls_locks);
661	return lsp;
662}
663
664static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
665{
666	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
667
668	spin_lock(&clp->cl_lock);
669	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
670	spin_unlock(&clp->cl_lock);
671	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
672	kfree(lsp);
673}
674
675/*
676 * Return a compatible lock_state. If no initialized lock_state structure
677 * exists, return an uninitialized one.
678 *
679 */
680static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
681{
682	struct nfs4_lock_state *lsp, *new = NULL;
683
684	for(;;) {
685		spin_lock(&state->state_lock);
686		lsp = __nfs4_find_lock_state(state, owner, pid, type);
687		if (lsp != NULL)
688			break;
689		if (new != NULL) {
690			list_add(&new->ls_locks, &state->lock_states);
691			set_bit(LK_STATE_IN_USE, &state->flags);
692			lsp = new;
693			new = NULL;
694			break;
695		}
696		spin_unlock(&state->state_lock);
697		new = nfs4_alloc_lock_state(state, owner, pid, type);
698		if (new == NULL)
699			return NULL;
700	}
701	spin_unlock(&state->state_lock);
702	if (new != NULL)
703		nfs4_free_lock_state(new);
704	return lsp;
705}
706
707/*
708 * Release reference to lock_state, and free it if we see that
709 * it is no longer in use
710 */
711void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
712{
713	struct nfs4_state *state;
714
715	if (lsp == NULL)
716		return;
717	state = lsp->ls_state;
718	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
719		return;
720	list_del(&lsp->ls_locks);
721	if (list_empty(&state->lock_states))
722		clear_bit(LK_STATE_IN_USE, &state->flags);
723	spin_unlock(&state->state_lock);
724	if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
725		nfs4_release_lockowner(lsp);
726	nfs4_free_lock_state(lsp);
727}
728
729static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
730{
731	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
732
733	dst->fl_u.nfs4_fl.owner = lsp;
734	atomic_inc(&lsp->ls_count);
735}
736
737static void nfs4_fl_release_lock(struct file_lock *fl)
738{
739	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
740}
741
742static const struct file_lock_operations nfs4_fl_lock_ops = {
743	.fl_copy_lock = nfs4_fl_copy_lock,
744	.fl_release_private = nfs4_fl_release_lock,
745};
746
747int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
748{
749	struct nfs4_lock_state *lsp;
750
751	if (fl->fl_ops != NULL)
752		return 0;
753	if (fl->fl_flags & FL_POSIX)
754		lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
755	else if (fl->fl_flags & FL_FLOCK)
756		lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
757	else
758		return -EINVAL;
759	if (lsp == NULL)
760		return -ENOMEM;
761	fl->fl_u.nfs4_fl.owner = lsp;
762	fl->fl_ops = &nfs4_fl_lock_ops;
763	return 0;
764}
765
766/*
767 * Byte-range lock aware utility to initialize the stateid of read/write
768 * requests.
769 */
770void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
771{
772	struct nfs4_lock_state *lsp;
773	int seq;
774
775	do {
776		seq = read_seqbegin(&state->seqlock);
777		memcpy(dst, &state->stateid, sizeof(*dst));
778	} while (read_seqretry(&state->seqlock, seq));
779	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
780		return;
781
782	spin_lock(&state->state_lock);
783	lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
784	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
785		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
786	spin_unlock(&state->state_lock);
787	nfs4_put_lock_state(lsp);
788}
789
790struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
791{
792	struct nfs_seqid *new;
793
794	new = kmalloc(sizeof(*new), gfp_mask);
795	if (new != NULL) {
796		new->sequence = counter;
797		INIT_LIST_HEAD(&new->list);
798	}
799	return new;
800}
801
802void nfs_release_seqid(struct nfs_seqid *seqid)
803{
804	if (!list_empty(&seqid->list)) {
805		struct rpc_sequence *sequence = seqid->sequence->sequence;
806
807		spin_lock(&sequence->lock);
808		list_del_init(&seqid->list);
809		spin_unlock(&sequence->lock);
810		rpc_wake_up(&sequence->wait);
811	}
812}
813
814void nfs_free_seqid(struct nfs_seqid *seqid)
815{
816	nfs_release_seqid(seqid);
817	kfree(seqid);
818}
819
820/*
821 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
822 * failed with a seqid incrementing error -
823 * see comments nfs_fs.h:seqid_mutating_error()
824 */
825static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
826{
827	BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
828	switch (status) {
829		case 0:
830			break;
831		case -NFS4ERR_BAD_SEQID:
832			if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
833				return;
834			printk(KERN_WARNING "NFS: v4 server returned a bad"
835					" sequence-id error on an"
836					" unconfirmed sequence %p!\n",
837					seqid->sequence);
838		case -NFS4ERR_STALE_CLIENTID:
839		case -NFS4ERR_STALE_STATEID:
840		case -NFS4ERR_BAD_STATEID:
841		case -NFS4ERR_BADXDR:
842		case -NFS4ERR_RESOURCE:
843		case -NFS4ERR_NOFILEHANDLE:
844			/* Non-seqid mutating errors */
845			return;
846	};
847	/*
848	 * Note: no locking needed as we are guaranteed to be first
849	 * on the sequence list
850	 */
851	seqid->sequence->counter++;
852}
853
854void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
855{
856	struct nfs4_state_owner *sp = container_of(seqid->sequence,
857					struct nfs4_state_owner, so_seqid);
858	struct nfs_server *server = sp->so_server;
859
860	if (status == -NFS4ERR_BAD_SEQID)
861		nfs4_drop_state_owner(sp);
862	if (!nfs4_has_session(server->nfs_client))
863		nfs_increment_seqid(status, seqid);
864}
865
866/*
867 * Increment the seqid if the LOCK/LOCKU succeeded, or
868 * failed with a seqid incrementing error -
869 * see comments nfs_fs.h:seqid_mutating_error()
870 */
871void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
872{
873	nfs_increment_seqid(status, seqid);
874}
875
876int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
877{
878	struct rpc_sequence *sequence = seqid->sequence->sequence;
879	int status = 0;
880
881	spin_lock(&sequence->lock);
882	if (list_empty(&seqid->list))
883		list_add_tail(&seqid->list, &sequence->list);
884	if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
885		goto unlock;
886	rpc_sleep_on(&sequence->wait, task, NULL);
887	status = -EAGAIN;
888unlock:
889	spin_unlock(&sequence->lock);
890	return status;
891}
892
893static int nfs4_run_state_manager(void *);
894
895static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
896{
897	smp_mb__before_clear_bit();
898	clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
899	smp_mb__after_clear_bit();
900	wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
901	rpc_wake_up(&clp->cl_rpcwaitq);
902}
903
904/*
905 * Schedule the nfs_client asynchronous state management routine
906 */
907void nfs4_schedule_state_manager(struct nfs_client *clp)
908{
909	struct task_struct *task;
910
911	if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
912		return;
913	__module_get(THIS_MODULE);
914	atomic_inc(&clp->cl_count);
915	task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
916				rpc_peeraddr2str(clp->cl_rpcclient,
917							RPC_DISPLAY_ADDR));
918	if (!IS_ERR(task))
919		return;
920	nfs4_clear_state_manager_bit(clp);
921	nfs_put_client(clp);
922	module_put(THIS_MODULE);
923}
924
925/*
926 * Schedule a state recovery attempt
927 */
928void nfs4_schedule_state_recovery(struct nfs_client *clp)
929{
930	if (!clp)
931		return;
932	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
933		set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
934	nfs4_schedule_state_manager(clp);
935}
936
937int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
938{
939
940	set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
941	/* Don't recover state that expired before the reboot */
942	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
943		clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
944		return 0;
945	}
946	set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
947	set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
948	return 1;
949}
950
951int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
952{
953	set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
954	clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
955	set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
956	set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
957	return 1;
958}
959
960static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
961{
962	struct inode *inode = state->inode;
963	struct nfs_inode *nfsi = NFS_I(inode);
964	struct file_lock *fl;
965	int status = 0;
966
967	if (inode->i_flock == NULL)
968		return 0;
969
970	/* Guard against delegation returns and new lock/unlock calls */
971	down_write(&nfsi->rwsem);
972	/* Protect inode->i_flock using the BKL */
973	lock_kernel();
974	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
975		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
976			continue;
977		if (nfs_file_open_context(fl->fl_file)->state != state)
978			continue;
979		unlock_kernel();
980		status = ops->recover_lock(state, fl);
981		switch (status) {
982			case 0:
983				break;
984			case -ESTALE:
985			case -NFS4ERR_ADMIN_REVOKED:
986			case -NFS4ERR_STALE_STATEID:
987			case -NFS4ERR_BAD_STATEID:
988			case -NFS4ERR_EXPIRED:
989			case -NFS4ERR_NO_GRACE:
990			case -NFS4ERR_STALE_CLIENTID:
991			case -NFS4ERR_BADSESSION:
992			case -NFS4ERR_BADSLOT:
993			case -NFS4ERR_BAD_HIGH_SLOT:
994			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
995				goto out;
996			default:
997				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
998						__func__, status);
999			case -ENOMEM:
1000			case -NFS4ERR_DENIED:
1001			case -NFS4ERR_RECLAIM_BAD:
1002			case -NFS4ERR_RECLAIM_CONFLICT:
1003				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
1004				status = 0;
1005		}
1006		lock_kernel();
1007	}
1008	unlock_kernel();
1009out:
1010	up_write(&nfsi->rwsem);
1011	return status;
1012}
1013
1014static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1015{
1016	struct nfs4_state *state;
1017	struct nfs4_lock_state *lock;
1018	int status = 0;
1019
1020	/* Note: we rely on the sp->so_states list being ordered
1021	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1022	 * states first.
1023	 * This is needed to ensure that the server won't give us any
1024	 * read delegations that we have to return if, say, we are
1025	 * recovering after a network partition or a reboot from a
1026	 * server that doesn't support a grace period.
1027	 */
1028restart:
1029	spin_lock(&sp->so_lock);
1030	list_for_each_entry(state, &sp->so_states, open_states) {
1031		if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1032			continue;
1033		if (state->state == 0)
1034			continue;
1035		atomic_inc(&state->count);
1036		spin_unlock(&sp->so_lock);
1037		status = ops->recover_open(sp, state);
1038		if (status >= 0) {
1039			status = nfs4_reclaim_locks(state, ops);
1040			if (status >= 0) {
1041				list_for_each_entry(lock, &state->lock_states, ls_locks) {
1042					if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1043						printk("%s: Lock reclaim failed!\n",
1044							__func__);
1045				}
1046				nfs4_put_open_state(state);
1047				goto restart;
1048			}
1049		}
1050		switch (status) {
1051			default:
1052				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1053						__func__, status);
1054			case -ENOENT:
1055			case -ENOMEM:
1056			case -ESTALE:
1057				/*
1058				 * Open state on this file cannot be recovered
1059				 * All we can do is revert to using the zero stateid.
1060				 */
1061				memset(state->stateid.data, 0,
1062					sizeof(state->stateid.data));
1063				/* Mark the file as being 'closed' */
1064				state->state = 0;
1065				break;
1066			case -NFS4ERR_ADMIN_REVOKED:
1067			case -NFS4ERR_STALE_STATEID:
1068			case -NFS4ERR_BAD_STATEID:
1069			case -NFS4ERR_RECLAIM_BAD:
1070			case -NFS4ERR_RECLAIM_CONFLICT:
1071				nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1072				break;
1073			case -NFS4ERR_EXPIRED:
1074			case -NFS4ERR_NO_GRACE:
1075				nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1076			case -NFS4ERR_STALE_CLIENTID:
1077			case -NFS4ERR_BADSESSION:
1078			case -NFS4ERR_BADSLOT:
1079			case -NFS4ERR_BAD_HIGH_SLOT:
1080			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1081				goto out_err;
1082		}
1083		nfs4_put_open_state(state);
1084		goto restart;
1085	}
1086	spin_unlock(&sp->so_lock);
1087	return 0;
1088out_err:
1089	nfs4_put_open_state(state);
1090	return status;
1091}
1092
1093static void nfs4_clear_open_state(struct nfs4_state *state)
1094{
1095	struct nfs4_lock_state *lock;
1096
1097	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1098	clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1099	clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1100	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1101	list_for_each_entry(lock, &state->lock_states, ls_locks) {
1102		lock->ls_seqid.flags = 0;
1103		lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1104	}
1105}
1106
1107static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1108{
1109	struct nfs4_state_owner *sp;
1110	struct rb_node *pos;
1111	struct nfs4_state *state;
1112
1113	/* Reset all sequence ids to zero */
1114	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1115		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1116		sp->so_seqid.flags = 0;
1117		spin_lock(&sp->so_lock);
1118		list_for_each_entry(state, &sp->so_states, open_states) {
1119			if (mark_reclaim(clp, state))
1120				nfs4_clear_open_state(state);
1121		}
1122		spin_unlock(&sp->so_lock);
1123	}
1124}
1125
1126static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1127{
1128	/* Mark all delegations for reclaim */
1129	nfs_delegation_mark_reclaim(clp);
1130	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1131}
1132
1133static void nfs4_reclaim_complete(struct nfs_client *clp,
1134				 const struct nfs4_state_recovery_ops *ops)
1135{
1136	/* Notify the server we're done reclaiming our state */
1137	if (ops->reclaim_complete)
1138		(void)ops->reclaim_complete(clp);
1139}
1140
1141static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1142{
1143	struct nfs4_state_owner *sp;
1144	struct rb_node *pos;
1145	struct nfs4_state *state;
1146
1147	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1148		return 0;
1149
1150	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1151		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1152		spin_lock(&sp->so_lock);
1153		list_for_each_entry(state, &sp->so_states, open_states) {
1154			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1155				continue;
1156			nfs4_state_mark_reclaim_nograce(clp, state);
1157		}
1158		spin_unlock(&sp->so_lock);
1159	}
1160
1161	nfs_delegation_reap_unclaimed(clp);
1162	return 1;
1163}
1164
1165static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1166{
1167	if (!nfs4_state_clear_reclaim_reboot(clp))
1168		return;
1169	nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
1170}
1171
1172static void nfs_delegation_clear_all(struct nfs_client *clp)
1173{
1174	nfs_delegation_mark_reclaim(clp);
1175	nfs_delegation_reap_unclaimed(clp);
1176}
1177
1178static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1179{
1180	nfs_delegation_clear_all(clp);
1181	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1182}
1183
1184static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1185{
1186	switch (error) {
1187		case -NFS4ERR_CB_PATH_DOWN:
1188			nfs_handle_cb_pathdown(clp);
1189			return 0;
1190		case -NFS4ERR_NO_GRACE:
1191			nfs4_state_end_reclaim_reboot(clp);
1192			return 0;
1193		case -NFS4ERR_STALE_CLIENTID:
1194		case -NFS4ERR_LEASE_MOVED:
1195			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1196			nfs4_state_clear_reclaim_reboot(clp);
1197			nfs4_state_start_reclaim_reboot(clp);
1198			break;
1199		case -NFS4ERR_EXPIRED:
1200			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1201			nfs4_state_start_reclaim_nograce(clp);
1202			break;
1203		case -NFS4ERR_BADSESSION:
1204		case -NFS4ERR_BADSLOT:
1205		case -NFS4ERR_BAD_HIGH_SLOT:
1206		case -NFS4ERR_DEADSESSION:
1207		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1208		case -NFS4ERR_SEQ_FALSE_RETRY:
1209		case -NFS4ERR_SEQ_MISORDERED:
1210			set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1211			/* Zero session reset errors */
1212			return 0;
1213	}
1214	return error;
1215}
1216
1217static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1218{
1219	struct rb_node *pos;
1220	int status = 0;
1221
1222restart:
1223	spin_lock(&clp->cl_lock);
1224	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1225		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1226		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1227			continue;
1228		atomic_inc(&sp->so_count);
1229		spin_unlock(&clp->cl_lock);
1230		status = nfs4_reclaim_open_state(sp, ops);
1231		if (status < 0) {
1232			set_bit(ops->owner_flag_bit, &sp->so_flags);
1233			nfs4_put_state_owner(sp);
1234			return nfs4_recovery_handle_error(clp, status);
1235		}
1236		nfs4_put_state_owner(sp);
1237		goto restart;
1238	}
1239	spin_unlock(&clp->cl_lock);
1240	return status;
1241}
1242
1243static int nfs4_check_lease(struct nfs_client *clp)
1244{
1245	struct rpc_cred *cred;
1246	const struct nfs4_state_maintenance_ops *ops =
1247		clp->cl_mvops->state_renewal_ops;
1248	int status = -NFS4ERR_EXPIRED;
1249
1250	/* Is the client already known to have an expired lease? */
1251	if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1252		return 0;
1253	spin_lock(&clp->cl_lock);
1254	cred = ops->get_state_renewal_cred_locked(clp);
1255	spin_unlock(&clp->cl_lock);
1256	if (cred == NULL) {
1257		cred = nfs4_get_setclientid_cred(clp);
1258		if (cred == NULL)
1259			goto out;
1260	}
1261	status = ops->renew_lease(clp, cred);
1262	put_rpccred(cred);
1263out:
1264	return nfs4_recovery_handle_error(clp, status);
1265}
1266
1267static int nfs4_reclaim_lease(struct nfs_client *clp)
1268{
1269	struct rpc_cred *cred;
1270	const struct nfs4_state_recovery_ops *ops =
1271		clp->cl_mvops->reboot_recovery_ops;
1272	int status = -ENOENT;
1273
1274	cred = ops->get_clid_cred(clp);
1275	if (cred != NULL) {
1276		status = ops->establish_clid(clp, cred);
1277		put_rpccred(cred);
1278		/* Handle case where the user hasn't set up machine creds */
1279		if (status == -EACCES && cred == clp->cl_machine_cred) {
1280			nfs4_clear_machine_cred(clp);
1281			status = -EAGAIN;
1282		}
1283		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1284			status = -EPROTONOSUPPORT;
1285	}
1286	return status;
1287}
1288
1289#ifdef CONFIG_NFS_V4_1
1290void nfs41_handle_recall_slot(struct nfs_client *clp)
1291{
1292	set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1293	nfs4_schedule_state_recovery(clp);
1294}
1295
1296static void nfs4_reset_all_state(struct nfs_client *clp)
1297{
1298	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1299		clp->cl_boot_time = CURRENT_TIME;
1300		nfs4_state_start_reclaim_nograce(clp);
1301		nfs4_schedule_state_recovery(clp);
1302	}
1303}
1304
1305static void nfs41_handle_server_reboot(struct nfs_client *clp)
1306{
1307	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1308		nfs4_state_start_reclaim_reboot(clp);
1309		nfs4_schedule_state_recovery(clp);
1310	}
1311}
1312
1313static void nfs41_handle_state_revoked(struct nfs_client *clp)
1314{
1315	/* Temporary */
1316	nfs4_reset_all_state(clp);
1317}
1318
1319static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1320{
1321	/* This will need to handle layouts too */
1322	nfs_expire_all_delegations(clp);
1323}
1324
1325static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1326{
1327	nfs_expire_all_delegations(clp);
1328	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1329		nfs4_schedule_state_recovery(clp);
1330}
1331
1332void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1333{
1334	if (!flags)
1335		return;
1336	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1337		nfs41_handle_server_reboot(clp);
1338	else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1339			    SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1340			    SEQ4_STATUS_ADMIN_STATE_REVOKED |
1341			    SEQ4_STATUS_LEASE_MOVED))
1342		nfs41_handle_state_revoked(clp);
1343	else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1344		nfs41_handle_recallable_state_revoked(clp);
1345	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1346			    SEQ4_STATUS_BACKCHANNEL_FAULT |
1347			    SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1348		nfs41_handle_cb_path_down(clp);
1349}
1350
1351static int nfs4_reset_session(struct nfs_client *clp)
1352{
1353	int status;
1354
1355	nfs4_begin_drain_session(clp);
1356	status = nfs4_proc_destroy_session(clp->cl_session);
1357	if (status && status != -NFS4ERR_BADSESSION &&
1358	    status != -NFS4ERR_DEADSESSION) {
1359		status = nfs4_recovery_handle_error(clp, status);
1360		goto out;
1361	}
1362
1363	memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1364	status = nfs4_proc_create_session(clp);
1365	if (status) {
1366		status = nfs4_recovery_handle_error(clp, status);
1367		goto out;
1368	}
1369	/* create_session negotiated new slot table */
1370	clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1371
1372	 /* Let the state manager reestablish state */
1373	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1374		nfs41_setup_state_renewal(clp);
1375out:
1376	return status;
1377}
1378
1379static int nfs4_recall_slot(struct nfs_client *clp)
1380{
1381	struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1382	struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1383	struct nfs4_slot *new, *old;
1384	int i;
1385
1386	nfs4_begin_drain_session(clp);
1387	new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
1388		      GFP_NOFS);
1389        if (!new)
1390		return -ENOMEM;
1391
1392	spin_lock(&fc_tbl->slot_tbl_lock);
1393	for (i = 0; i < fc_tbl->target_max_slots; i++)
1394		new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1395	old = fc_tbl->slots;
1396	fc_tbl->slots = new;
1397	fc_tbl->max_slots = fc_tbl->target_max_slots;
1398	fc_tbl->target_max_slots = 0;
1399	fc_attrs->max_reqs = fc_tbl->max_slots;
1400	spin_unlock(&fc_tbl->slot_tbl_lock);
1401
1402	kfree(old);
1403	nfs4_end_drain_session(clp);
1404	return 0;
1405}
1406
1407#else /* CONFIG_NFS_V4_1 */
1408static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1409static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1410static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1411#endif /* CONFIG_NFS_V4_1 */
1412
1413/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1414 * on EXCHANGE_ID for v4.1
1415 */
1416static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1417{
1418	if (nfs4_has_session(clp)) {
1419		switch (status) {
1420		case -NFS4ERR_DELAY:
1421		case -NFS4ERR_CLID_INUSE:
1422		case -EAGAIN:
1423		case -EKEYEXPIRED:
1424			break;
1425
1426		case -NFS4ERR_NOT_SAME:
1427		default:
1428			return;
1429		}
1430	}
1431	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1432}
1433
1434static void nfs4_state_manager(struct nfs_client *clp)
1435{
1436	int status = 0;
1437
1438	/* Ensure exclusive access to NFSv4 state */
1439	for(;;) {
1440		if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1441			/* We're going to have to re-establish a clientid */
1442			status = nfs4_reclaim_lease(clp);
1443			if (status) {
1444				nfs4_set_lease_expired(clp, status);
1445				if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1446							&clp->cl_state))
1447					continue;
1448				if (clp->cl_cons_state ==
1449							NFS_CS_SESSION_INITING)
1450					nfs_mark_client_ready(clp, status);
1451				goto out_error;
1452			}
1453			clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1454			set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1455		}
1456
1457		if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1458			status = nfs4_check_lease(clp);
1459			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1460				continue;
1461			if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1462				goto out_error;
1463		}
1464
1465		/* Initialize or reset the session */
1466		if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1467		   && nfs4_has_session(clp)) {
1468			status = nfs4_reset_session(clp);
1469			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1470				continue;
1471			if (status < 0)
1472				goto out_error;
1473		}
1474
1475		/* First recover reboot state... */
1476		if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1477			status = nfs4_do_reclaim(clp,
1478				clp->cl_mvops->reboot_recovery_ops);
1479			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1480			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1481				continue;
1482			nfs4_state_end_reclaim_reboot(clp);
1483			if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1484				continue;
1485			if (status < 0)
1486				goto out_error;
1487		}
1488
1489		/* Now recover expired state... */
1490		if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1491			status = nfs4_do_reclaim(clp,
1492				clp->cl_mvops->nograce_recovery_ops);
1493			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1494			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1495			    test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1496				continue;
1497			if (status < 0)
1498				goto out_error;
1499		}
1500
1501		nfs4_end_drain_session(clp);
1502		if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1503			nfs_client_return_marked_delegations(clp);
1504			continue;
1505		}
1506		/* Recall session slots */
1507		if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1508		   && nfs4_has_session(clp)) {
1509			status = nfs4_recall_slot(clp);
1510			if (status < 0)
1511				goto out_error;
1512			continue;
1513		}
1514
1515
1516		nfs4_clear_state_manager_bit(clp);
1517		/* Did we race with an attempt to give us more work? */
1518		if (clp->cl_state == 0)
1519			break;
1520		if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1521			break;
1522	}
1523	return;
1524out_error:
1525	printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1526			" with error %d\n", clp->cl_hostname, -status);
1527	nfs4_end_drain_session(clp);
1528	nfs4_clear_state_manager_bit(clp);
1529}
1530
1531static int nfs4_run_state_manager(void *ptr)
1532{
1533	struct nfs_client *clp = ptr;
1534
1535	allow_signal(SIGKILL);
1536	nfs4_state_manager(clp);
1537	nfs_put_client(clp);
1538	module_put_and_exit(0);
1539	return 0;
1540}
1541
1542/*
1543 * Local variables:
1544 *  c-basic-offset: 8
1545 * End:
1546 */
1547