• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/nfs/

Lines Matching refs:clp

61 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
63 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66 status = nfs4_proc_setclientid_confirm(clp, cred);
68 nfs4_schedule_state_renewal(clp);
73 nfs4_alloc_lockowner_id(struct nfs_client *clp)
75 return clp->cl_lockowner_id ++;
79 nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred)
83 if (!list_empty(&clp->cl_unused)) {
84 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
87 list_move(&sp->so_list, &clp->cl_state_owners);
88 clp->cl_nunused--;
93 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
98 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
107 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
111 if (!list_empty(&clp->cl_state_owners)) {
112 sp = list_entry(clp->cl_state_owners.next,
120 nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred)
124 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
129 list_move(&sp->so_list, &clp->cl_state_owners);
163 struct nfs_client *clp = sp->so_client;
164 spin_lock(&clp->cl_lock);
166 spin_unlock(&clp->cl_lock);
170 * Note: must be called with clp->cl_sem held in order to prevent races
175 struct nfs_client *clp = server->nfs_client;
180 spin_lock(&clp->cl_lock);
181 sp = nfs4_find_state_owner(clp, cred);
183 sp = nfs4_client_grab_unused(clp, cred);
185 list_add(&new->so_list, &clp->cl_state_owners);
186 new->so_client = clp;
187 new->so_id = nfs4_alloc_lockowner_id(clp);
192 spin_unlock(&clp->cl_lock);
201 * Must be called with clp->cl_sem held in order to avoid races
206 struct nfs_client *clp = sp->so_client;
209 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
211 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
215 list_move(&sp->so_list, &clp->cl_unused);
216 clp->cl_nunused++;
217 spin_unlock(&clp->cl_lock);
223 spin_unlock(&clp->cl_lock);
321 * reference to clp->cl_sem!
409 struct nfs_client *clp = state->owner->so_client;
417 spin_lock(&clp->cl_lock);
418 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
419 spin_unlock(&clp->cl_lock);
428 * The caller must be holding clp->cl_sem
621 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
624 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
626 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
627 rpc_wake_up(&clp->cl_rpcwaitq);
633 static void nfs4_recover_state(struct nfs_client *clp)
638 atomic_inc(&clp->cl_count);
639 task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
640 NIPQUAD(clp->cl_addr.sin_addr));
643 nfs4_clear_recover_bit(clp);
644 nfs_put_client(clp);
651 void nfs4_schedule_state_recovery(struct nfs_client *clp)
653 if (!clp)
655 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
656 nfs4_recover_state(clp);
748 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
755 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
772 struct nfs_client *clp = ptr;
782 down_write(&clp->cl_sem);
784 if (list_empty(&clp->cl_superblocks))
789 cred = nfs4_get_renew_cred(clp);
792 status = nfs4_proc_renew(clp, cred);
804 clp->cl_boot_time = CURRENT_TIME;
805 cred = nfs4_get_setclientid_cred(clp);
808 nfs4_state_mark_reclaim(clp);
811 status = nfs4_init_client(clp, cred);
817 nfs_delegation_mark_reclaim(clp);
819 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
832 nfs_delegation_reap_unclaimed(clp);
834 up_write(&clp->cl_sem);
837 nfs_handle_cb_pathdown(clp);
838 nfs4_clear_recover_bit(clp);
839 nfs_put_client(clp);
844 NIPQUAD(clp->cl_addr.sin_addr), -status);
845 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);