Deleted Added
sdiff udiff text old ( 224083 ) new ( 224086 )
full compact
1/*-
2 * Copyright (c) 2009 Rick Macklem, University of Guelph
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/fs/nfsserver/nfs_nfsdstate.c 224083 2011-07-16 08:05:41Z zack $");
30
31#ifndef APPLEKEXT
32#include <fs/nfs/nfsport.h>
33
34struct nfsrv_stablefirst nfsrv_stablefirst;
35int nfsrv_issuedelegs = 0;
36int nfsrv_dolocallocks = 0;
37struct nfsv4lock nfsv4rootfs_lock;
38
39extern int newnfs_numnfsd;
40extern struct nfsstats newnfsstats;
41extern int nfsrv_lease;
42extern struct timeval nfsboottime;
43extern u_int32_t newnfs_true, newnfs_false;
44NFSV4ROOTLOCKMUTEX;
45NFSSTATESPINLOCK;
46
47/*
48 * Hash lists for nfs V4.
49 * (Some would put them in the .h file, but I don't like declaring storage
50 * in a .h)
51 */
52struct nfsclienthashhead nfsclienthash[NFSCLIENTHASHSIZE];
53struct nfslockhashhead nfslockhash[NFSLOCKHASHSIZE];
54#endif /* !APPLEKEXT */
55
56static u_int32_t nfsrv_openpluslock = 0, nfsrv_delegatecnt = 0;
57static time_t nfsrvboottime;
58static int nfsrv_writedelegifpos = 1;
59static int nfsrv_returnoldstateid = 0, nfsrv_clients = 0;
60static int nfsrv_clienthighwater = NFSRV_CLIENTHIGHWATER;
61static int nfsrv_nogsscallback = 0;
62
63/* local functions */
64static void nfsrv_dumpaclient(struct nfsclient *clp,
65 struct nfsd_dumpclients *dumpp);
66static void nfsrv_freeopenowner(struct nfsstate *stp, int cansleep,
67 NFSPROC_T *p);
68static int nfsrv_freeopen(struct nfsstate *stp, vnode_t vp, int cansleep,
69 NFSPROC_T *p);
70static void nfsrv_freelockowner(struct nfsstate *stp, vnode_t vp, int cansleep,
71 NFSPROC_T *p);
72static void nfsrv_freeallnfslocks(struct nfsstate *stp, vnode_t vp,
73 int cansleep, NFSPROC_T *p);
74static void nfsrv_freenfslock(struct nfslock *lop);
75static void nfsrv_freenfslockfile(struct nfslockfile *lfp);
76static void nfsrv_freedeleg(struct nfsstate *);
77static int nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp,
78 u_int32_t flags, struct nfsstate **stpp);
79static void nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp,
80 struct nfsstate **stpp);
81static int nfsrv_getlockfh(vnode_t vp, u_short flags,
82 struct nfslockfile **new_lfpp, fhandle_t *nfhp, NFSPROC_T *p);
83static int nfsrv_getlockfile(u_short flags, struct nfslockfile **new_lfpp,
84 struct nfslockfile **lfpp, fhandle_t *nfhp, int lockit);
85static void nfsrv_insertlock(struct nfslock *new_lop,
86 struct nfslock *insert_lop, struct nfsstate *stp, struct nfslockfile *lfp);
87static void nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp,
88 struct nfslock **other_lopp, struct nfslockfile *lfp);
89static int nfsrv_getipnumber(u_char *cp);
90static int nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags,
91 nfsv4stateid_t *stateidp, int specialid);
92static int nfsrv_checkgrace(u_int32_t flags);
93static int nfsrv_docallback(struct nfsclient *clp, int procnum,
94 nfsv4stateid_t *stateidp, int trunc, fhandle_t *fhp,
95 struct nfsvattr *nap, nfsattrbit_t *attrbitp, NFSPROC_T *p);
96static u_int32_t nfsrv_nextclientindex(void);
97static u_int32_t nfsrv_nextstateindex(struct nfsclient *clp);
98static void nfsrv_markstable(struct nfsclient *clp);
99static int nfsrv_checkstable(struct nfsclient *clp);
100static int nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, struct
101 vnode *vp, NFSPROC_T *p);
102static int nfsrv_delegconflict(struct nfsstate *stp, int *haslockp,
103 NFSPROC_T *p, vnode_t vp);
104static int nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp,
105 struct nfsclient *clp, int *haslockp, NFSPROC_T *p);
106static int nfsrv_notsamecredname(struct nfsrv_descript *nd,
107 struct nfsclient *clp);
108static time_t nfsrv_leaseexpiry(void);
109static void nfsrv_delaydelegtimeout(struct nfsstate *stp);
110static int nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid,
111 struct nfsstate *stp, struct nfsrvcache *op);
112static int nfsrv_nootherstate(struct nfsstate *stp);
113static int nfsrv_locallock(vnode_t vp, struct nfslockfile *lfp, int flags,
114 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p);
115static void nfsrv_localunlock(vnode_t vp, struct nfslockfile *lfp,
116 uint64_t init_first, uint64_t init_end, NFSPROC_T *p);
117static int nfsrv_dolocal(vnode_t vp, struct nfslockfile *lfp, int flags,
118 int oldflags, uint64_t first, uint64_t end, struct nfslockconflict *cfp,
119 NFSPROC_T *p);
120static void nfsrv_locallock_rollback(vnode_t vp, struct nfslockfile *lfp,
121 NFSPROC_T *p);
122static void nfsrv_locallock_commit(struct nfslockfile *lfp, int flags,
123 uint64_t first, uint64_t end);
124static void nfsrv_locklf(struct nfslockfile *lfp);
125static void nfsrv_unlocklf(struct nfslockfile *lfp);
126
127/*
128 * Scan the client list for a match and either return the current one,
129 * create a new entry or return an error.
130 * If returning a non-error, the clp structure must either be linked into
131 * the client list or free'd.
132 */
133APPLESTATIC int
134nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp,
135 nfsquad_t *clientidp, nfsquad_t *confirmp, NFSPROC_T *p)
136{
137 struct nfsclient *clp = NULL, *new_clp = *new_clpp;
138 int i;
139 struct nfsstate *stp, *tstp;
140 struct sockaddr_in *sad, *rad;
141 int zapit = 0, gotit, hasstate = 0, igotlock;
142 static u_int64_t confirm_index = 0;
143
144 /*
145 * Check for state resource limit exceeded.
146 */
147 if (nfsrv_openpluslock > NFSRV_V4STATELIMIT)
148 return (NFSERR_RESOURCE);
149
150 if (nfsrv_issuedelegs == 0 ||
151 ((nd->nd_flag & ND_GSS) != 0 && nfsrv_nogsscallback != 0))
152 /*
153 * Don't do callbacks when delegations are disabled or
154 * for AUTH_GSS unless enabled via nfsrv_nogsscallback.
155 * If establishing a callback connection is attempted
156 * when a firewall is blocking the callback path, the
157 * server may wait too long for the connect attempt to
158 * succeed during the Open. Some clients, such as Linux,
159 * may timeout and give up on the Open before the server
160 * replies. Also, since AUTH_GSS callbacks are not
161 * yet interoperability tested, they might cause the
162 * server to crap out, if they get past the Init call to
163 * the client.
164 */
165 new_clp->lc_program = 0;
166
167 /* Lock out other nfsd threads */
168 NFSLOCKV4ROOTMUTEX();
169 nfsv4_relref(&nfsv4rootfs_lock);
170 do {
171 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL,
172 NFSV4ROOTLOCKMUTEXPTR, NULL);
173 } while (!igotlock);
174 NFSUNLOCKV4ROOTMUTEX();
175
176 /*
177 * Search for a match in the client list.
178 */
179 gotit = i = 0;
180 while (i < NFSCLIENTHASHSIZE && !gotit) {
181 LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) {
182 if (new_clp->lc_idlen == clp->lc_idlen &&
183 !NFSBCMP(new_clp->lc_id, clp->lc_id, clp->lc_idlen)) {
184 gotit = 1;
185 break;
186 }
187 }
188 i++;
189 }
190 if (!gotit ||
191 (clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_ADMINREVOKED))) {
192 /*
193 * Get rid of the old one.
194 */
195 if (i != NFSCLIENTHASHSIZE) {
196 LIST_REMOVE(clp, lc_hash);
197 nfsrv_cleanclient(clp, p);
198 nfsrv_freedeleglist(&clp->lc_deleg);
199 nfsrv_freedeleglist(&clp->lc_olddeleg);
200 zapit = 1;
201 }
202 /*
203 * Add it after assigning a client id to it.
204 */
205 new_clp->lc_flags |= LCL_NEEDSCONFIRM;
206 confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index;
207 clientidp->lval[0] = new_clp->lc_clientid.lval[0] =
208 (u_int32_t)nfsrvboottime;
209 clientidp->lval[1] = new_clp->lc_clientid.lval[1] =
210 nfsrv_nextclientindex();
211 new_clp->lc_stateindex = 0;
212 new_clp->lc_statemaxindex = 0;
213 new_clp->lc_cbref = 0;
214 new_clp->lc_expiry = nfsrv_leaseexpiry();
215 LIST_INIT(&new_clp->lc_open);
216 LIST_INIT(&new_clp->lc_deleg);
217 LIST_INIT(&new_clp->lc_olddeleg);
218 for (i = 0; i < NFSSTATEHASHSIZE; i++)
219 LIST_INIT(&new_clp->lc_stateid[i]);
220 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
221 lc_hash);
222 newnfsstats.srvclients++;
223 nfsrv_openpluslock++;
224 nfsrv_clients++;
225 NFSLOCKV4ROOTMUTEX();
226 nfsv4_unlock(&nfsv4rootfs_lock, 1);
227 NFSUNLOCKV4ROOTMUTEX();
228 if (zapit)
229 nfsrv_zapclient(clp, p);
230 *new_clpp = NULL;
231 return (0);
232 }
233
234 /*
235 * Now, handle the cases where the id is already issued.
236 */
237 if (nfsrv_notsamecredname(nd, clp)) {
238 /*
239 * Check to see if there is expired state that should go away.
240 */
241 if (clp->lc_expiry < NFSD_MONOSEC &&
242 (!LIST_EMPTY(&clp->lc_open) || !LIST_EMPTY(&clp->lc_deleg))) {
243 nfsrv_cleanclient(clp, p);
244 nfsrv_freedeleglist(&clp->lc_deleg);
245 }
246
247 /*
248 * If there is outstanding state, then reply NFSERR_CLIDINUSE per
249 * RFC3530 Sec. 8.1.2 last para.
250 */
251 if (!LIST_EMPTY(&clp->lc_deleg)) {
252 hasstate = 1;
253 } else if (LIST_EMPTY(&clp->lc_open)) {
254 hasstate = 0;
255 } else {
256 hasstate = 0;
257 /* Look for an Open on the OpenOwner */
258 LIST_FOREACH(stp, &clp->lc_open, ls_list) {
259 if (!LIST_EMPTY(&stp->ls_open)) {
260 hasstate = 1;
261 break;
262 }
263 }
264 }
265 if (hasstate) {
266 /*
267 * If the uid doesn't match, return NFSERR_CLIDINUSE after
268 * filling out the correct ipaddr and portnum.
269 */
270 sad = NFSSOCKADDR(new_clp->lc_req.nr_nam, struct sockaddr_in *);
271 rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *);
272 sad->sin_addr.s_addr = rad->sin_addr.s_addr;
273 sad->sin_port = rad->sin_port;
274 NFSLOCKV4ROOTMUTEX();
275 nfsv4_unlock(&nfsv4rootfs_lock, 1);
276 NFSUNLOCKV4ROOTMUTEX();
277 return (NFSERR_CLIDINUSE);
278 }
279 }
280
281 if (NFSBCMP(new_clp->lc_verf, clp->lc_verf, NFSX_VERF)) {
282 /*
283 * If the verifier has changed, the client has rebooted
284 * and a new client id is issued. The old state info
285 * can be thrown away once the SETCLIENTID_CONFIRM occurs.
286 */
287 LIST_REMOVE(clp, lc_hash);
288 new_clp->lc_flags |= LCL_NEEDSCONFIRM;
289 confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index;
290 clientidp->lval[0] = new_clp->lc_clientid.lval[0] =
291 nfsrvboottime;
292 clientidp->lval[1] = new_clp->lc_clientid.lval[1] =
293 nfsrv_nextclientindex();
294 new_clp->lc_stateindex = 0;
295 new_clp->lc_statemaxindex = 0;
296 new_clp->lc_cbref = 0;
297 new_clp->lc_expiry = nfsrv_leaseexpiry();
298
299 /*
300 * Save the state until confirmed.
301 */
302 LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list);
303 LIST_FOREACH(tstp, &new_clp->lc_open, ls_list)
304 tstp->ls_clp = new_clp;
305 LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list);
306 LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list)
307 tstp->ls_clp = new_clp;
308 LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg,
309 ls_list);
310 LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list)
311 tstp->ls_clp = new_clp;
312 for (i = 0; i < NFSSTATEHASHSIZE; i++) {
313 LIST_NEWHEAD(&new_clp->lc_stateid[i],
314 &clp->lc_stateid[i], ls_hash);
315 LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_list)
316 tstp->ls_clp = new_clp;
317 }
318 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
319 lc_hash);
320 newnfsstats.srvclients++;
321 nfsrv_openpluslock++;
322 nfsrv_clients++;
323 NFSLOCKV4ROOTMUTEX();
324 nfsv4_unlock(&nfsv4rootfs_lock, 1);
325 NFSUNLOCKV4ROOTMUTEX();
326
327 /*
328 * Must wait until any outstanding callback on the old clp
329 * completes.
330 */
331 while (clp->lc_cbref) {
332 clp->lc_flags |= LCL_WAKEUPWANTED;
333 (void) tsleep((caddr_t)clp, PZERO - 1,
334 "nfsd clp", 10 * hz);
335 }
336 nfsrv_zapclient(clp, p);
337 *new_clpp = NULL;
338 return (0);
339 }
340 /*
341 * id and verifier match, so update the net address info
342 * and get rid of any existing callback authentication
343 * handle, so a new one will be acquired.
344 */
345 LIST_REMOVE(clp, lc_hash);
346 new_clp->lc_flags |= (LCL_NEEDSCONFIRM | LCL_DONTCLEAN);
347 new_clp->lc_expiry = nfsrv_leaseexpiry();
348 confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index;
349 clientidp->lval[0] = new_clp->lc_clientid.lval[0] =
350 clp->lc_clientid.lval[0];
351 clientidp->lval[1] = new_clp->lc_clientid.lval[1] =
352 clp->lc_clientid.lval[1];
353 new_clp->lc_delegtime = clp->lc_delegtime;
354 new_clp->lc_stateindex = clp->lc_stateindex;
355 new_clp->lc_statemaxindex = clp->lc_statemaxindex;
356 new_clp->lc_cbref = 0;
357 LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list);
358 LIST_FOREACH(tstp, &new_clp->lc_open, ls_list)
359 tstp->ls_clp = new_clp;
360 LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list);
361 LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list)
362 tstp->ls_clp = new_clp;
363 LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg, ls_list);
364 LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list)
365 tstp->ls_clp = new_clp;
366 for (i = 0; i < NFSSTATEHASHSIZE; i++) {
367 LIST_NEWHEAD(&new_clp->lc_stateid[i], &clp->lc_stateid[i],
368 ls_hash);
369 LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_list)
370 tstp->ls_clp = new_clp;
371 }
372 LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
373 lc_hash);
374 newnfsstats.srvclients++;
375 nfsrv_openpluslock++;
376 nfsrv_clients++;
377 NFSLOCKV4ROOTMUTEX();
378 nfsv4_unlock(&nfsv4rootfs_lock, 1);
379 NFSUNLOCKV4ROOTMUTEX();
380
381 /*
382 * Must wait until any outstanding callback on the old clp
383 * completes.
384 */
385 while (clp->lc_cbref) {
386 clp->lc_flags |= LCL_WAKEUPWANTED;
387 (void) tsleep((caddr_t)clp, PZERO - 1, "nfsd clp", 10 * hz);
388 }
389 nfsrv_zapclient(clp, p);
390 *new_clpp = NULL;
391 return (0);
392}
393
394/*
395 * Check to see if the client id exists and optionally confirm it.
396 */
397APPLESTATIC int
398nfsrv_getclient(nfsquad_t clientid, int opflags, struct nfsclient **clpp,
399 nfsquad_t confirm, struct nfsrv_descript *nd, NFSPROC_T *p)
400{
401 struct nfsclient *clp;
402 struct nfsstate *stp;
403 int i;
404 struct nfsclienthashhead *hp;
405 int error = 0, igotlock, doneok;
406
407 if (clpp)
408 *clpp = NULL;
409 if (nfsrvboottime != clientid.lval[0])
410 return (NFSERR_STALECLIENTID);
411
412 /*
413 * If called with opflags == CLOPS_RENEW, the State Lock is
414 * already held. Otherwise, we need to get either that or,
415 * for the case of Confirm, lock out the nfsd threads.
416 */
417 if (opflags & CLOPS_CONFIRM) {
418 NFSLOCKV4ROOTMUTEX();
419 nfsv4_relref(&nfsv4rootfs_lock);
420 do {
421 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL,
422 NFSV4ROOTLOCKMUTEXPTR, NULL);
423 } while (!igotlock);
424 NFSUNLOCKV4ROOTMUTEX();
425 } else if (opflags != CLOPS_RENEW) {
426 NFSLOCKSTATE();
427 }
428
429 hp = NFSCLIENTHASH(clientid);
430 LIST_FOREACH(clp, hp, lc_hash) {
431 if (clp->lc_clientid.lval[1] == clientid.lval[1])
432 break;
433 }
434 if (clp == LIST_END(hp)) {
435 if (opflags & CLOPS_CONFIRM)
436 error = NFSERR_STALECLIENTID;
437 else
438 error = NFSERR_EXPIRED;
439 } else if (clp->lc_flags & LCL_ADMINREVOKED) {
440 /*
441 * If marked admin revoked, just return the error.
442 */
443 error = NFSERR_ADMINREVOKED;
444 }
445 if (error) {
446 if (opflags & CLOPS_CONFIRM) {
447 NFSLOCKV4ROOTMUTEX();
448 nfsv4_unlock(&nfsv4rootfs_lock, 1);
449 NFSUNLOCKV4ROOTMUTEX();
450 } else if (opflags != CLOPS_RENEW) {
451 NFSUNLOCKSTATE();
452 }
453 return (error);
454 }
455
456 /*
457 * Perform any operations specified by the opflags.
458 */
459 if (opflags & CLOPS_CONFIRM) {
460 if (clp->lc_confirm.qval != confirm.qval)
461 error = NFSERR_STALECLIENTID;
462 else if (nfsrv_notsamecredname(nd, clp))
463 error = NFSERR_CLIDINUSE;
464
465 if (!error) {
466 if ((clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_DONTCLEAN)) ==
467 LCL_NEEDSCONFIRM) {
468 /*
469 * Hang onto the delegations (as old delegations)
470 * for an Open with CLAIM_DELEGATE_PREV unless in
471 * grace, but get rid of the rest of the state.
472 */
473 nfsrv_cleanclient(clp, p);
474 nfsrv_freedeleglist(&clp->lc_olddeleg);
475 if (nfsrv_checkgrace(0)) {
476 /* In grace, so just delete delegations */
477 nfsrv_freedeleglist(&clp->lc_deleg);
478 } else {
479 LIST_FOREACH(stp, &clp->lc_deleg, ls_list)
480 stp->ls_flags |= NFSLCK_OLDDELEG;
481 clp->lc_delegtime = NFSD_MONOSEC +
482 nfsrv_lease + NFSRV_LEASEDELTA;
483 LIST_NEWHEAD(&clp->lc_olddeleg, &clp->lc_deleg,
484 ls_list);
485 }
486 }
487 clp->lc_flags &= ~(LCL_NEEDSCONFIRM | LCL_DONTCLEAN);
488 if (clp->lc_program)
489 clp->lc_flags |= LCL_NEEDSCBNULL;
490 }
491 } else if (clp->lc_flags & LCL_NEEDSCONFIRM) {
492 error = NFSERR_EXPIRED;
493 }
494
495 /*
496 * If called by the Renew Op, we must check the principal.
497 */
498 if (!error && (opflags & CLOPS_RENEWOP)) {
499 if (nfsrv_notsamecredname(nd, clp)) {
500 doneok = 0;
501 for (i = 0; i < NFSSTATEHASHSIZE && doneok == 0; i++) {
502 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) {
503 if ((stp->ls_flags & NFSLCK_OPEN) &&
504 stp->ls_uid == nd->nd_cred->cr_uid) {
505 doneok = 1;
506 break;
507 }
508 }
509 }
510 if (!doneok)
511 error = NFSERR_ACCES;
512 }
513 if (!error && (clp->lc_flags & LCL_CBDOWN))
514 error = NFSERR_CBPATHDOWN;
515 }
516 if ((!error || error == NFSERR_CBPATHDOWN) &&
517 (opflags & CLOPS_RENEW)) {
518 clp->lc_expiry = nfsrv_leaseexpiry();
519 }
520 if (opflags & CLOPS_CONFIRM) {
521 NFSLOCKV4ROOTMUTEX();
522 nfsv4_unlock(&nfsv4rootfs_lock, 1);
523 NFSUNLOCKV4ROOTMUTEX();
524 } else if (opflags != CLOPS_RENEW) {
525 NFSUNLOCKSTATE();
526 }
527 if (clpp)
528 *clpp = clp;
529 return (error);
530}
531
532/*
533 * Called from the new nfssvc syscall to admin revoke a clientid.
534 * Returns 0 for success, error otherwise.
535 */
536APPLESTATIC int
537nfsrv_adminrevoke(struct nfsd_clid *revokep, NFSPROC_T *p)
538{
539 struct nfsclient *clp = NULL;
540 int i;
541 int gotit, igotlock;
542
543 /*
544 * First, lock out the nfsd so that state won't change while the
545 * revocation record is being written to the stable storage restart
546 * file.
547 */
548 NFSLOCKV4ROOTMUTEX();
549 do {
550 igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL,
551 NFSV4ROOTLOCKMUTEXPTR, NULL);
552 } while (!igotlock);
553 NFSUNLOCKV4ROOTMUTEX();
554
555 /*
556 * Search for a match in the client list.
557 */
558 gotit = i = 0;
559 while (i < NFSCLIENTHASHSIZE && !gotit) {
560 LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) {
561 if (revokep->nclid_idlen == clp->lc_idlen &&
562 !NFSBCMP(revokep->nclid_id, clp->lc_id, clp->lc_idlen)) {
563 gotit = 1;
564 break;
565 }
566 }
567 i++;
568 }
569 if (!gotit) {
570 NFSLOCKV4ROOTMUTEX();
571 nfsv4_unlock(&nfsv4rootfs_lock, 0);
572 NFSUNLOCKV4ROOTMUTEX();
573 return (EPERM);
574 }
575
576 /*
577 * Now, write out the revocation record
578 */
579 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p);
580 nfsrv_backupstable();
581
582 /*
583 * and clear out the state, marking the clientid revoked.
584 */
585 clp->lc_flags &= ~LCL_CALLBACKSON;
586 clp->lc_flags |= LCL_ADMINREVOKED;
587 nfsrv_cleanclient(clp, p);
588 nfsrv_freedeleglist(&clp->lc_deleg);
589 nfsrv_freedeleglist(&clp->lc_olddeleg);
590 NFSLOCKV4ROOTMUTEX();
591 nfsv4_unlock(&nfsv4rootfs_lock, 0);
592 NFSUNLOCKV4ROOTMUTEX();
593 return (0);
594}
595
596/*
597 * Dump out stats for all clients. Called from nfssvc(2), that is used
598 * newnfsstats.
599 */
600APPLESTATIC void
601nfsrv_dumpclients(struct nfsd_dumpclients *dumpp, int maxcnt)
602{
603 struct nfsclient *clp;
604 int i = 0, cnt = 0;
605
606 /*
607 * First, get a reference on the nfsv4rootfs_lock so that an
608 * exclusive lock cannot be acquired while dumping the clients.
609 */
610 NFSLOCKV4ROOTMUTEX();
611 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL);
612 NFSUNLOCKV4ROOTMUTEX();
613 NFSLOCKSTATE();
614 /*
615 * Rattle through the client lists until done.
616 */
617 while (i < NFSCLIENTHASHSIZE && cnt < maxcnt) {
618 clp = LIST_FIRST(&nfsclienthash[i]);
619 while (clp != LIST_END(&nfsclienthash[i]) && cnt < maxcnt) {
620 nfsrv_dumpaclient(clp, &dumpp[cnt]);
621 cnt++;
622 clp = LIST_NEXT(clp, lc_hash);
623 }
624 i++;
625 }
626 if (cnt < maxcnt)
627 dumpp[cnt].ndcl_clid.nclid_idlen = 0;
628 NFSUNLOCKSTATE();
629 NFSLOCKV4ROOTMUTEX();
630 nfsv4_relref(&nfsv4rootfs_lock);
631 NFSUNLOCKV4ROOTMUTEX();
632}
633
634/*
635 * Dump stats for a client. Must be called with the NFSSTATELOCK and spl'd.
636 */
637static void
638nfsrv_dumpaclient(struct nfsclient *clp, struct nfsd_dumpclients *dumpp)
639{
640 struct nfsstate *stp, *openstp, *lckownstp;
641 struct nfslock *lop;
642 struct sockaddr *sad;
643 struct sockaddr_in *rad;
644 struct sockaddr_in6 *rad6;
645
646 dumpp->ndcl_nopenowners = dumpp->ndcl_nlockowners = 0;
647 dumpp->ndcl_nopens = dumpp->ndcl_nlocks = 0;
648 dumpp->ndcl_ndelegs = dumpp->ndcl_nolddelegs = 0;
649 dumpp->ndcl_flags = clp->lc_flags;
650 dumpp->ndcl_clid.nclid_idlen = clp->lc_idlen;
651 NFSBCOPY(clp->lc_id, dumpp->ndcl_clid.nclid_id, clp->lc_idlen);
652 sad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr *);
653 dumpp->ndcl_addrfam = sad->sa_family;
654 if (sad->sa_family == AF_INET) {
655 rad = (struct sockaddr_in *)sad;
656 dumpp->ndcl_cbaddr.sin_addr = rad->sin_addr;
657 } else {
658 rad6 = (struct sockaddr_in6 *)sad;
659 dumpp->ndcl_cbaddr.sin6_addr = rad6->sin6_addr;
660 }
661
662 /*
663 * Now, scan the state lists and total up the opens and locks.
664 */
665 LIST_FOREACH(stp, &clp->lc_open, ls_list) {
666 dumpp->ndcl_nopenowners++;
667 LIST_FOREACH(openstp, &stp->ls_open, ls_list) {
668 dumpp->ndcl_nopens++;
669 LIST_FOREACH(lckownstp, &openstp->ls_open, ls_list) {
670 dumpp->ndcl_nlockowners++;
671 LIST_FOREACH(lop, &lckownstp->ls_lock, lo_lckowner) {
672 dumpp->ndcl_nlocks++;
673 }
674 }
675 }
676 }
677
678 /*
679 * and the delegation lists.
680 */
681 LIST_FOREACH(stp, &clp->lc_deleg, ls_list) {
682 dumpp->ndcl_ndelegs++;
683 }
684 LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) {
685 dumpp->ndcl_nolddelegs++;
686 }
687}
688
689/*
690 * Dump out lock stats for a file.
691 */
692APPLESTATIC void
693nfsrv_dumplocks(vnode_t vp, struct nfsd_dumplocks *ldumpp, int maxcnt,
694 NFSPROC_T *p)
695{
696 struct nfsstate *stp;
697 struct nfslock *lop;
698 int cnt = 0;
699 struct nfslockfile *lfp;
700 struct sockaddr *sad;
701 struct sockaddr_in *rad;
702 struct sockaddr_in6 *rad6;
703 int ret;
704 fhandle_t nfh;
705
706 ret = nfsrv_getlockfh(vp, 0, NULL, &nfh, p);
707 /*
708 * First, get a reference on the nfsv4rootfs_lock so that an
709 * exclusive lock on it cannot be acquired while dumping the locks.
710 */
711 NFSLOCKV4ROOTMUTEX();
712 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL);
713 NFSUNLOCKV4ROOTMUTEX();
714 NFSLOCKSTATE();
715 if (!ret)
716 ret = nfsrv_getlockfile(0, NULL, &lfp, &nfh, 0);
717 if (ret) {
718 ldumpp[0].ndlck_clid.nclid_idlen = 0;
719 NFSUNLOCKSTATE();
720 NFSLOCKV4ROOTMUTEX();
721 nfsv4_relref(&nfsv4rootfs_lock);
722 NFSUNLOCKV4ROOTMUTEX();
723 return;
724 }
725
726 /*
727 * For each open share on file, dump it out.
728 */
729 stp = LIST_FIRST(&lfp->lf_open);
730 while (stp != LIST_END(&lfp->lf_open) && cnt < maxcnt) {
731 ldumpp[cnt].ndlck_flags = stp->ls_flags;
732 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid;
733 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0];
734 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1];
735 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2];
736 ldumpp[cnt].ndlck_owner.nclid_idlen =
737 stp->ls_openowner->ls_ownerlen;
738 NFSBCOPY(stp->ls_openowner->ls_owner,
739 ldumpp[cnt].ndlck_owner.nclid_id,
740 stp->ls_openowner->ls_ownerlen);
741 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen;
742 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id,
743 stp->ls_clp->lc_idlen);
744 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *);
745 ldumpp[cnt].ndlck_addrfam = sad->sa_family;
746 if (sad->sa_family == AF_INET) {
747 rad = (struct sockaddr_in *)sad;
748 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr;
749 } else {
750 rad6 = (struct sockaddr_in6 *)sad;
751 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr;
752 }
753 stp = LIST_NEXT(stp, ls_file);
754 cnt++;
755 }
756
757 /*
758 * and all locks.
759 */
760 lop = LIST_FIRST(&lfp->lf_lock);
761 while (lop != LIST_END(&lfp->lf_lock) && cnt < maxcnt) {
762 stp = lop->lo_stp;
763 ldumpp[cnt].ndlck_flags = lop->lo_flags;
764 ldumpp[cnt].ndlck_first = lop->lo_first;
765 ldumpp[cnt].ndlck_end = lop->lo_end;
766 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid;
767 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0];
768 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1];
769 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2];
770 ldumpp[cnt].ndlck_owner.nclid_idlen = stp->ls_ownerlen;
771 NFSBCOPY(stp->ls_owner, ldumpp[cnt].ndlck_owner.nclid_id,
772 stp->ls_ownerlen);
773 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen;
774 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id,
775 stp->ls_clp->lc_idlen);
776 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *);
777 ldumpp[cnt].ndlck_addrfam = sad->sa_family;
778 if (sad->sa_family == AF_INET) {
779 rad = (struct sockaddr_in *)sad;
780 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr;
781 } else {
782 rad6 = (struct sockaddr_in6 *)sad;
783 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr;
784 }
785 lop = LIST_NEXT(lop, lo_lckfile);
786 cnt++;
787 }
788
789 /*
790 * and the delegations.
791 */
792 stp = LIST_FIRST(&lfp->lf_deleg);
793 while (stp != LIST_END(&lfp->lf_deleg) && cnt < maxcnt) {
794 ldumpp[cnt].ndlck_flags = stp->ls_flags;
795 ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid;
796 ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0];
797 ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1];
798 ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2];
799 ldumpp[cnt].ndlck_owner.nclid_idlen = 0;
800 ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen;
801 NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id,
802 stp->ls_clp->lc_idlen);
803 sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *);
804 ldumpp[cnt].ndlck_addrfam = sad->sa_family;
805 if (sad->sa_family == AF_INET) {
806 rad = (struct sockaddr_in *)sad;
807 ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr;
808 } else {
809 rad6 = (struct sockaddr_in6 *)sad;
810 ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr;
811 }
812 stp = LIST_NEXT(stp, ls_file);
813 cnt++;
814 }
815
816 /*
817 * If list isn't full, mark end of list by setting the client name
818 * to zero length.
819 */
820 if (cnt < maxcnt)
821 ldumpp[cnt].ndlck_clid.nclid_idlen = 0;
822 NFSUNLOCKSTATE();
823 NFSLOCKV4ROOTMUTEX();
824 nfsv4_relref(&nfsv4rootfs_lock);
825 NFSUNLOCKV4ROOTMUTEX();
826}
827
828/*
829 * Server timer routine. It can scan any linked list, so long
830 * as it holds the spin/mutex lock and there is no exclusive lock on
831 * nfsv4rootfs_lock.
832 * (For OpenBSD, a kthread is ok. For FreeBSD, I think it is ok
833 * to do this from a callout, since the spin locks work. For
834 * Darwin, I'm not sure what will work correctly yet.)
835 * Should be called once per second.
836 */
837APPLESTATIC void
838nfsrv_servertimer(void)
839{
840 struct nfsclient *clp, *nclp;
841 struct nfsstate *stp, *nstp;
842 int got_ref, i;
843
844 /*
845 * Make sure nfsboottime is set. This is used by V3 as well
846 * as V4. Note that nfsboottime is not nfsrvboottime, which is
847 * only used by the V4 server for leases.
848 */
849 if (nfsboottime.tv_sec == 0)
850 NFSSETBOOTTIME(nfsboottime);
851
852 /*
853 * If server hasn't started yet, just return.
854 */
855 NFSLOCKSTATE();
856 if (nfsrv_stablefirst.nsf_eograce == 0) {
857 NFSUNLOCKSTATE();
858 return;
859 }
860 if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE)) {
861 if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) &&
862 NFSD_MONOSEC > nfsrv_stablefirst.nsf_eograce)
863 nfsrv_stablefirst.nsf_flags |=
864 (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK);
865 NFSUNLOCKSTATE();
866 return;
867 }
868
869 /*
870 * Try and get a reference count on the nfsv4rootfs_lock so that
871 * no nfsd thread can acquire an exclusive lock on it before this
872 * call is done. If it is already exclusively locked, just return.
873 */
874 NFSLOCKV4ROOTMUTEX();
875 got_ref = nfsv4_getref_nonblock(&nfsv4rootfs_lock);
876 NFSUNLOCKV4ROOTMUTEX();
877 if (got_ref == 0) {
878 NFSUNLOCKSTATE();
879 return;
880 }
881
882 /*
883 * For each client...
884 */
885 for (i = 0; i < NFSCLIENTHASHSIZE; i++) {
886 clp = LIST_FIRST(&nfsclienthash[i]);
887 while (clp != LIST_END(&nfsclienthash[i])) {
888 nclp = LIST_NEXT(clp, lc_hash);
889 if (!(clp->lc_flags & LCL_EXPIREIT)) {
890 if (((clp->lc_expiry + NFSRV_STALELEASE) < NFSD_MONOSEC
891 && ((LIST_EMPTY(&clp->lc_deleg)
892 && LIST_EMPTY(&clp->lc_open)) ||
893 nfsrv_clients > nfsrv_clienthighwater)) ||
894 (clp->lc_expiry + NFSRV_MOULDYLEASE) < NFSD_MONOSEC ||
895 (clp->lc_expiry < NFSD_MONOSEC &&
896 (nfsrv_openpluslock * 10 / 9) > NFSRV_V4STATELIMIT)) {
897 /*
898 * Lease has expired several nfsrv_lease times ago:
899 * PLUS
900 * - no state is associated with it
901 * OR
902 * - above high water mark for number of clients
903 * (nfsrv_clienthighwater should be large enough
904 * that this only occurs when clients fail to
905 * use the same nfs_client_id4.id. Maybe somewhat
906 * higher that the maximum number of clients that
907 * will mount this server?)
908 * OR
909 * Lease has expired a very long time ago
910 * OR
911 * Lease has expired PLUS the number of opens + locks
912 * has exceeded 90% of capacity
913 *
914 * --> Mark for expiry. The actual expiry will be done
915 * by an nfsd sometime soon.
916 */
917 clp->lc_flags |= LCL_EXPIREIT;
918 nfsrv_stablefirst.nsf_flags |=
919 (NFSNSF_NEEDLOCK | NFSNSF_EXPIREDCLIENT);
920 } else {
921 /*
922 * If there are no opens, increment no open tick cnt
923 * If time exceeds NFSNOOPEN, mark it to be thrown away
924 * otherwise, if there is an open, reset no open time
925 * Hopefully, this will avoid excessive re-creation
926 * of open owners and subsequent open confirms.
927 */
928 stp = LIST_FIRST(&clp->lc_open);
929 while (stp != LIST_END(&clp->lc_open)) {
930 nstp = LIST_NEXT(stp, ls_list);
931 if (LIST_EMPTY(&stp->ls_open)) {
932 stp->ls_noopens++;
933 if (stp->ls_noopens > NFSNOOPEN ||
934 (nfsrv_openpluslock * 2) >
935 NFSRV_V4STATELIMIT)
936 nfsrv_stablefirst.nsf_flags |=
937 NFSNSF_NOOPENS;
938 } else {
939 stp->ls_noopens = 0;
940 }
941 stp = nstp;
942 }
943 }
944 }
945 clp = nclp;
946 }
947 }
948 NFSUNLOCKSTATE();
949 NFSLOCKV4ROOTMUTEX();
950 nfsv4_relref(&nfsv4rootfs_lock);
951 NFSUNLOCKV4ROOTMUTEX();
952}
953
954/*
955 * The following set of functions free up the various data structures.
956 */
957/*
958 * Clear out all open/lock state related to this nfsclient.
959 * Caller must hold an exclusive lock on nfsv4rootfs_lock, so that
960 * there are no other active nfsd threads.
961 */
962APPLESTATIC void
963nfsrv_cleanclient(struct nfsclient *clp, NFSPROC_T *p)
964{
965 struct nfsstate *stp, *nstp;
966
967 LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp)
968 nfsrv_freeopenowner(stp, 1, p);
969}
970
971/*
972 * Free a client that has been cleaned. It should also already have been
973 * removed from the lists.
974 * (Just to be safe w.r.t. newnfs_disconnect(), call this function when
975 * softclock interrupts are enabled.)
976 */
977APPLESTATIC void
978nfsrv_zapclient(struct nfsclient *clp, NFSPROC_T *p)
979{
980
981#ifdef notyet
982 if ((clp->lc_flags & (LCL_GSS | LCL_CALLBACKSON)) ==
983 (LCL_GSS | LCL_CALLBACKSON) &&
984 (clp->lc_hand.nfsh_flag & NFSG_COMPLETE) &&
985 clp->lc_handlelen > 0) {
986 clp->lc_hand.nfsh_flag &= ~NFSG_COMPLETE;
987 clp->lc_hand.nfsh_flag |= NFSG_DESTROYED;
988 (void) nfsrv_docallback(clp, NFSV4PROC_CBNULL,
989 NULL, 0, NULL, NULL, NULL, p);
990 }
991#endif
992 newnfs_disconnect(&clp->lc_req);
993 NFSSOCKADDRFREE(clp->lc_req.nr_nam);
994 NFSFREEMUTEX(&clp->lc_req.nr_mtx);
995 free((caddr_t)clp, M_NFSDCLIENT);
996 NFSLOCKSTATE();
997 newnfsstats.srvclients--;
998 nfsrv_openpluslock--;
999 nfsrv_clients--;
1000 NFSUNLOCKSTATE();
1001}
1002
1003/*
1004 * Free a list of delegation state structures.
1005 * (This function will also free all nfslockfile structures that no
1006 * longer have associated state.)
1007 */
1008APPLESTATIC void
1009nfsrv_freedeleglist(struct nfsstatehead *sthp)
1010{
1011 struct nfsstate *stp, *nstp;
1012
1013 LIST_FOREACH_SAFE(stp, sthp, ls_list, nstp) {
1014 nfsrv_freedeleg(stp);
1015 }
1016 LIST_INIT(sthp);
1017}
1018
1019/*
1020 * Free up a delegation.
1021 */
1022static void
1023nfsrv_freedeleg(struct nfsstate *stp)
1024{
1025 struct nfslockfile *lfp;
1026
1027 LIST_REMOVE(stp, ls_hash);
1028 LIST_REMOVE(stp, ls_list);
1029 LIST_REMOVE(stp, ls_file);
1030 lfp = stp->ls_lfp;
1031 if (LIST_EMPTY(&lfp->lf_open) &&
1032 LIST_EMPTY(&lfp->lf_lock) && LIST_EMPTY(&lfp->lf_deleg) &&
1033 LIST_EMPTY(&lfp->lf_locallock) && LIST_EMPTY(&lfp->lf_rollback) &&
1034 lfp->lf_usecount == 0 &&
1035 nfsv4_testlock(&lfp->lf_locallock_lck) == 0)
1036 nfsrv_freenfslockfile(lfp);
1037 FREE((caddr_t)stp, M_NFSDSTATE);
1038 newnfsstats.srvdelegates--;
1039 nfsrv_openpluslock--;
1040 nfsrv_delegatecnt--;
1041}
1042
1043/*
1044 * This function frees an open owner and all associated opens.
1045 */
1046static void
1047nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, NFSPROC_T *p)
1048{
1049 struct nfsstate *nstp, *tstp;
1050
1051 LIST_REMOVE(stp, ls_list);
1052 /*
1053 * Now, free all associated opens.
1054 */
1055 nstp = LIST_FIRST(&stp->ls_open);
1056 while (nstp != LIST_END(&stp->ls_open)) {
1057 tstp = nstp;
1058 nstp = LIST_NEXT(nstp, ls_list);
1059 (void) nfsrv_freeopen(tstp, NULL, cansleep, p);
1060 }
1061 if (stp->ls_op)
1062 nfsrvd_derefcache(stp->ls_op);
1063 FREE((caddr_t)stp, M_NFSDSTATE);
1064 newnfsstats.srvopenowners--;
1065 nfsrv_openpluslock--;
1066}
1067
1068/*
1069 * This function frees an open (nfsstate open structure) with all associated
1070 * lock_owners and locks. It also frees the nfslockfile structure iff there
1071 * are no other opens on the file.
1072 * Returns 1 if it free'd the nfslockfile, 0 otherwise.
1073 */
1074static int
1075nfsrv_freeopen(struct nfsstate *stp, vnode_t vp, int cansleep, NFSPROC_T *p)
1076{
1077 struct nfsstate *nstp, *tstp;
1078 struct nfslockfile *lfp;
1079 int ret;
1080
1081 LIST_REMOVE(stp, ls_hash);
1082 LIST_REMOVE(stp, ls_list);
1083 LIST_REMOVE(stp, ls_file);
1084
1085 lfp = stp->ls_lfp;
1086 /*
1087 * Now, free all lockowners associated with this open.
1088 */
1089 LIST_FOREACH_SAFE(tstp, &stp->ls_open, ls_list, nstp)
1090 nfsrv_freelockowner(tstp, vp, cansleep, p);
1091
1092 /*
1093 * The nfslockfile is freed here if there are no locks
1094 * associated with the open.
1095 * If there are locks associated with the open, the
1096 * nfslockfile structure can be freed via nfsrv_freelockowner().
1097 * Acquire the state mutex to avoid races with calls to
1098 * nfsrv_getlockfile().
1099 */
1100 if (cansleep != 0)
1101 NFSLOCKSTATE();
1102 if (lfp != NULL && LIST_EMPTY(&lfp->lf_open) &&
1103 LIST_EMPTY(&lfp->lf_deleg) && LIST_EMPTY(&lfp->lf_lock) &&
1104 LIST_EMPTY(&lfp->lf_locallock) && LIST_EMPTY(&lfp->lf_rollback) &&
1105 lfp->lf_usecount == 0 &&
1106 (cansleep != 0 || nfsv4_testlock(&lfp->lf_locallock_lck) == 0)) {
1107 nfsrv_freenfslockfile(lfp);
1108 ret = 1;
1109 } else
1110 ret = 0;
1111 if (cansleep != 0)
1112 NFSUNLOCKSTATE();
1113 FREE((caddr_t)stp, M_NFSDSTATE);
1114 newnfsstats.srvopens--;
1115 nfsrv_openpluslock--;
1116 return (ret);
1117}
1118
1119/*
1120 * Frees a lockowner and all associated locks.
1121 */
1122static void
1123nfsrv_freelockowner(struct nfsstate *stp, vnode_t vp, int cansleep,
1124 NFSPROC_T *p)
1125{
1126
1127 LIST_REMOVE(stp, ls_hash);
1128 LIST_REMOVE(stp, ls_list);
1129 nfsrv_freeallnfslocks(stp, vp, cansleep, p);
1130 if (stp->ls_op)
1131 nfsrvd_derefcache(stp->ls_op);
1132 FREE((caddr_t)stp, M_NFSDSTATE);
1133 newnfsstats.srvlockowners--;
1134 nfsrv_openpluslock--;
1135}
1136
1137/*
1138 * Free all the nfs locks on a lockowner.
1139 */
1140static void
1141nfsrv_freeallnfslocks(struct nfsstate *stp, vnode_t vp, int cansleep,
1142 NFSPROC_T *p)
1143{
1144 struct nfslock *lop, *nlop;
1145 struct nfsrollback *rlp, *nrlp;
1146 struct nfslockfile *lfp = NULL;
1147 int gottvp = 0;
1148 vnode_t tvp = NULL;
1149 uint64_t first, end;
1150
1151 lop = LIST_FIRST(&stp->ls_lock);
1152 while (lop != LIST_END(&stp->ls_lock)) {
1153 nlop = LIST_NEXT(lop, lo_lckowner);
1154 /*
1155 * Since all locks should be for the same file, lfp should
1156 * not change.
1157 */
1158 if (lfp == NULL)
1159 lfp = lop->lo_lfp;
1160 else if (lfp != lop->lo_lfp)
1161 panic("allnfslocks");
1162 /*
1163 * If vp is NULL and cansleep != 0, a vnode must be acquired
1164 * from the file handle. This only occurs when called from
1165 * nfsrv_cleanclient().
1166 */
1167 if (gottvp == 0) {
1168 if (nfsrv_dolocallocks == 0)
1169 tvp = NULL;
1170 else if (vp == NULL && cansleep != 0)
1171 tvp = nfsvno_getvp(&lfp->lf_fh);
1172 else
1173 tvp = vp;
1174 gottvp = 1;
1175 }
1176
1177 if (tvp != NULL) {
1178 if (cansleep == 0)
1179 panic("allnfs2");
1180 first = lop->lo_first;
1181 end = lop->lo_end;
1182 nfsrv_freenfslock(lop);
1183 nfsrv_localunlock(tvp, lfp, first, end, p);
1184 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list,
1185 nrlp)
1186 free(rlp, M_NFSDROLLBACK);
1187 LIST_INIT(&lfp->lf_rollback);
1188 } else
1189 nfsrv_freenfslock(lop);
1190 lop = nlop;
1191 }
1192 if (vp == NULL && tvp != NULL)
1193 vput(tvp);
1194}
1195
1196/*
1197 * Free an nfslock structure.
1198 */
1199static void
1200nfsrv_freenfslock(struct nfslock *lop)
1201{
1202
1203 if (lop->lo_lckfile.le_prev != NULL) {
1204 LIST_REMOVE(lop, lo_lckfile);
1205 newnfsstats.srvlocks--;
1206 nfsrv_openpluslock--;
1207 }
1208 LIST_REMOVE(lop, lo_lckowner);
1209 FREE((caddr_t)lop, M_NFSDLOCK);
1210}
1211
1212/*
1213 * This function frees an nfslockfile structure.
1214 */
1215static void
1216nfsrv_freenfslockfile(struct nfslockfile *lfp)
1217{
1218
1219 LIST_REMOVE(lfp, lf_hash);
1220 FREE((caddr_t)lfp, M_NFSDLOCKFILE);
1221}
1222
1223/*
1224 * This function looks up an nfsstate structure via stateid.
1225 */
1226static int
1227nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp, __unused u_int32_t flags,
1228 struct nfsstate **stpp)
1229{
1230 struct nfsstate *stp;
1231 struct nfsstatehead *hp;
1232
1233 *stpp = NULL;
1234 hp = NFSSTATEHASH(clp, *stateidp);
1235 LIST_FOREACH(stp, hp, ls_hash) {
1236 if (!NFSBCMP(stp->ls_stateid.other, stateidp->other,
1237 NFSX_STATEIDOTHER))
1238 break;
1239 }
1240
1241 /*
1242 * If no state id in list, return NFSERR_BADSTATEID.
1243 */
1244 if (stp == LIST_END(hp))
1245 return (NFSERR_BADSTATEID);
1246 *stpp = stp;
1247 return (0);
1248}
1249
1250/*
1251 * This function gets an nfsstate structure via owner string.
1252 */
1253static void
1254nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp,
1255 struct nfsstate **stpp)
1256{
1257 struct nfsstate *stp;
1258
1259 *stpp = NULL;
1260 LIST_FOREACH(stp, hp, ls_list) {
1261 if (new_stp->ls_ownerlen == stp->ls_ownerlen &&
1262 !NFSBCMP(new_stp->ls_owner,stp->ls_owner,stp->ls_ownerlen)) {
1263 *stpp = stp;
1264 return;
1265 }
1266 }
1267}
1268
1269/*
1270 * Lock control function called to update lock status.
1271 * Returns 0 upon success, -1 if there is no lock and the flags indicate
1272 * that one isn't to be created and an NFSERR_xxx for other errors.
1273 * The structures new_stp and new_lop are passed in as pointers that should
1274 * be set to NULL if the structure is used and shouldn't be free'd.
1275 * For the NFSLCK_TEST and NFSLCK_CHECK cases, the structures are
1276 * never used and can safely be allocated on the stack. For all other
1277 * cases, *new_stpp and *new_lopp should be malloc'd before the call,
1278 * in case they are used.
1279 */
1280APPLESTATIC int
1281nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp,
1282 struct nfslock **new_lopp, struct nfslockconflict *cfp,
1283 nfsquad_t clientid, nfsv4stateid_t *stateidp,
1284 __unused struct nfsexstuff *exp,
1285 struct nfsrv_descript *nd, NFSPROC_T *p)
1286{
1287 struct nfslock *lop;
1288 struct nfsstate *new_stp = *new_stpp;
1289 struct nfslock *new_lop = *new_lopp;
1290 struct nfsstate *tstp, *mystp, *nstp;
1291 int specialid = 0;
1292 struct nfslockfile *lfp;
1293 struct nfslock *other_lop = NULL;
1294 struct nfsstate *stp, *lckstp = NULL;
1295 struct nfsclient *clp = NULL;
1296 u_int32_t bits;
1297 int error = 0, haslock = 0, ret, reterr;
1298 int getlckret, delegation = 0, filestruct_locked;
1299 fhandle_t nfh;
1300 uint64_t first, end;
1301 uint32_t lock_flags;
1302
1303 if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_SETATTR)) {
1304 /*
1305 * Note the special cases of "all 1s" or "all 0s" stateids and
1306 * let reads with all 1s go ahead.
1307 */
1308 if (new_stp->ls_stateid.seqid == 0x0 &&
1309 new_stp->ls_stateid.other[0] == 0x0 &&
1310 new_stp->ls_stateid.other[1] == 0x0 &&
1311 new_stp->ls_stateid.other[2] == 0x0)
1312 specialid = 1;
1313 else if (new_stp->ls_stateid.seqid == 0xffffffff &&
1314 new_stp->ls_stateid.other[0] == 0xffffffff &&
1315 new_stp->ls_stateid.other[1] == 0xffffffff &&
1316 new_stp->ls_stateid.other[2] == 0xffffffff)
1317 specialid = 2;
1318 }
1319
1320 /*
1321 * Check for restart conditions (client and server).
1322 */
1323 error = nfsrv_checkrestart(clientid, new_stp->ls_flags,
1324 &new_stp->ls_stateid, specialid);
1325 if (error)
1326 return (error);
1327
1328 /*
1329 * Check for state resource limit exceeded.
1330 */
1331 if ((new_stp->ls_flags & NFSLCK_LOCK) &&
1332 nfsrv_openpluslock > NFSRV_V4STATELIMIT)
1333 return (NFSERR_RESOURCE);
1334
1335 /*
1336 * For the lock case, get another nfslock structure,
1337 * just in case we need it.
1338 * Malloc now, before we start sifting through the linked lists,
1339 * in case we have to wait for memory.
1340 */
1341tryagain:
1342 if (new_stp->ls_flags & NFSLCK_LOCK)
1343 MALLOC(other_lop, struct nfslock *, sizeof (struct nfslock),
1344 M_NFSDLOCK, M_WAITOK);
1345 filestruct_locked = 0;
1346 reterr = 0;
1347 lfp = NULL;
1348
1349 /*
1350 * Get the lockfile structure for CFH now, so we can do a sanity
1351 * check against the stateid, before incrementing the seqid#, since
1352 * we want to return NFSERR_BADSTATEID on failure and the seqid#
1353 * shouldn't be incremented for this case.
1354 * If nfsrv_getlockfile() returns -1, it means "not found", which
1355 * will be handled later.
1356 * If we are doing Lock/LockU and local locking is enabled, sleep
1357 * lock the nfslockfile structure.
1358 */
1359 getlckret = nfsrv_getlockfh(vp, new_stp->ls_flags, NULL, &nfh, p);
1360 NFSLOCKSTATE();
1361 if (getlckret == 0) {
1362 if ((new_stp->ls_flags & (NFSLCK_LOCK | NFSLCK_UNLOCK)) != 0 &&
1363 nfsrv_dolocallocks != 0 && nd->nd_repstat == 0) {
1364 getlckret = nfsrv_getlockfile(new_stp->ls_flags, NULL,
1365 &lfp, &nfh, 1);
1366 if (getlckret == 0)
1367 filestruct_locked = 1;
1368 } else
1369 getlckret = nfsrv_getlockfile(new_stp->ls_flags, NULL,
1370 &lfp, &nfh, 0);
1371 }
1372 if (getlckret != 0 && getlckret != -1)
1373 reterr = getlckret;
1374
1375 if (filestruct_locked != 0) {
1376 LIST_INIT(&lfp->lf_rollback);
1377 if ((new_stp->ls_flags & NFSLCK_LOCK)) {
1378 /*
1379 * For local locking, do the advisory locking now, so
1380 * that any conflict can be detected. A failure later
1381 * can be rolled back locally. If an error is returned,
1382 * struct nfslockfile has been unlocked and any local
1383 * locking rolled back.
1384 */
1385 NFSUNLOCKSTATE();
1386 reterr = nfsrv_locallock(vp, lfp,
1387 (new_lop->lo_flags & (NFSLCK_READ | NFSLCK_WRITE)),
1388 new_lop->lo_first, new_lop->lo_end, cfp, p);
1389 NFSLOCKSTATE();
1390 }
1391 }
1392
1393 if (specialid == 0) {
1394 if (new_stp->ls_flags & NFSLCK_TEST) {
1395 /*
1396 * RFC 3530 does not list LockT as an op that renews a
1397 * lease, but the concensus seems to be that it is ok
1398 * for a server to do so.
1399 */
1400 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
1401 (nfsquad_t)((u_quad_t)0), NULL, p);
1402
1403 /*
1404 * Since NFSERR_EXPIRED, NFSERR_ADMINREVOKED are not valid
1405 * error returns for LockT, just go ahead and test for a lock,
1406 * since there are no locks for this client, but other locks
1407 * can conflict. (ie. same client will always be false)
1408 */
1409 if (error == NFSERR_EXPIRED || error == NFSERR_ADMINREVOKED)
1410 error = 0;
1411 lckstp = new_stp;
1412 } else {
1413 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
1414 (nfsquad_t)((u_quad_t)0), NULL, p);
1415 if (error == 0)
1416 /*
1417 * Look up the stateid
1418 */
1419 error = nfsrv_getstate(clp, &new_stp->ls_stateid,
1420 new_stp->ls_flags, &stp);
1421 /*
1422 * do some sanity checks for an unconfirmed open or a
1423 * stateid that refers to the wrong file, for an open stateid
1424 */
1425 if (error == 0 && (stp->ls_flags & NFSLCK_OPEN) &&
1426 ((stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM) ||
1427 (getlckret == 0 && stp->ls_lfp != lfp)))
1428 error = NFSERR_BADSTATEID;
1429 if (error == 0 &&
1430 (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) &&
1431 getlckret == 0 && stp->ls_lfp != lfp)
1432 error = NFSERR_BADSTATEID;
1433
1434 /*
1435 * If the lockowner stateid doesn't refer to the same file,
1436 * I believe that is considered ok, since some clients will
1437 * only create a single lockowner and use that for all locks
1438 * on all files.
1439 * For now, log it as a diagnostic, instead of considering it
1440 * a BadStateid.
1441 */
1442 if (error == 0 && (stp->ls_flags &
1443 (NFSLCK_OPEN | NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) == 0 &&
1444 getlckret == 0 && stp->ls_lfp != lfp) {
1445#ifdef DIAGNOSTIC
1446 printf("Got a lock statid for different file open\n");
1447#endif
1448 /*
1449 error = NFSERR_BADSTATEID;
1450 */
1451 }
1452
1453 if (error == 0) {
1454 if (new_stp->ls_flags & NFSLCK_OPENTOLOCK) {
1455 /*
1456 * If haslock set, we've already checked the seqid.
1457 */
1458 if (!haslock) {
1459 if (stp->ls_flags & NFSLCK_OPEN)
1460 error = nfsrv_checkseqid(nd, new_stp->ls_seq,
1461 stp->ls_openowner, new_stp->ls_op);
1462 else
1463 error = NFSERR_BADSTATEID;
1464 }
1465 if (!error)
1466 nfsrv_getowner(&stp->ls_open, new_stp, &lckstp);
1467 if (lckstp)
1468 /*
1469 * I believe this should be an error, but it
1470 * isn't obvious what NFSERR_xxx would be
1471 * appropriate, so I'll use NFSERR_INVAL for now.
1472 */
1473 error = NFSERR_INVAL;
1474 else
1475 lckstp = new_stp;
1476 } else if (new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK)) {
1477 /*
1478 * If haslock set, ditto above.
1479 */
1480 if (!haslock) {
1481 if (stp->ls_flags & NFSLCK_OPEN)
1482 error = NFSERR_BADSTATEID;
1483 else
1484 error = nfsrv_checkseqid(nd, new_stp->ls_seq,
1485 stp, new_stp->ls_op);
1486 }
1487 lckstp = stp;
1488 } else {
1489 lckstp = stp;
1490 }
1491 }
1492 /*
1493 * If the seqid part of the stateid isn't the same, return
1494 * NFSERR_OLDSTATEID for cases other than I/O Ops.
1495 * For I/O Ops, only return NFSERR_OLDSTATEID if
1496 * nfsrv_returnoldstateid is set. (The concensus on the email
1497 * list was that most clients would prefer to not receive
1498 * NFSERR_OLDSTATEID for I/O Ops, but the RFC suggests that that
1499 * is what will happen, so I use the nfsrv_returnoldstateid to
1500 * allow for either server configuration.)
1501 */
1502 if (!error && stp->ls_stateid.seqid!=new_stp->ls_stateid.seqid &&
1503 (!(new_stp->ls_flags & NFSLCK_CHECK) ||
1504 nfsrv_returnoldstateid))
1505 error = NFSERR_OLDSTATEID;
1506 }
1507 }
1508
1509 /*
1510 * Now we can check for grace.
1511 */
1512 if (!error)
1513 error = nfsrv_checkgrace(new_stp->ls_flags);
1514 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error &&
1515 nfsrv_checkstable(clp))
1516 error = NFSERR_NOGRACE;
1517 /*
1518 * If we successfully Reclaimed state, note that.
1519 */
1520 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error)
1521 nfsrv_markstable(clp);
1522
1523 /*
1524 * At this point, either error == NFSERR_BADSTATEID or the
1525 * seqid# has been updated, so we can return any error.
1526 * If error == 0, there may be an error in:
1527 * nd_repstat - Set by the calling function.
1528 * reterr - Set above, if getting the nfslockfile structure
1529 * or acquiring the local lock failed.
1530 * (If both of these are set, nd_repstat should probably be
1531 * returned, since that error was detected before this
1532 * function call.)
1533 */
1534 if (error != 0 || nd->nd_repstat != 0 || reterr != 0) {
1535 if (error == 0) {
1536 if (nd->nd_repstat != 0)
1537 error = nd->nd_repstat;
1538 else
1539 error = reterr;
1540 }
1541 if (filestruct_locked != 0) {
1542 /* Roll back local locks. */
1543 NFSUNLOCKSTATE();
1544 nfsrv_locallock_rollback(vp, lfp, p);
1545 NFSLOCKSTATE();
1546 nfsrv_unlocklf(lfp);
1547 }
1548 NFSUNLOCKSTATE();
1549 if (other_lop)
1550 FREE((caddr_t)other_lop, M_NFSDLOCK);
1551 if (haslock) {
1552 NFSLOCKV4ROOTMUTEX();
1553 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1554 NFSUNLOCKV4ROOTMUTEX();
1555 }
1556 return (error);
1557 }
1558
1559 /*
1560 * Check the nfsrv_getlockfile return.
1561 * Returned -1 if no structure found.
1562 */
1563 if (getlckret == -1) {
1564 error = NFSERR_EXPIRED;
1565 /*
1566 * Called from lockt, so no lock is OK.
1567 */
1568 if (new_stp->ls_flags & NFSLCK_TEST) {
1569 error = 0;
1570 } else if (new_stp->ls_flags &
1571 (NFSLCK_CHECK | NFSLCK_SETATTR)) {
1572 /*
1573 * Called to check for a lock, OK if the stateid is all
1574 * 1s or all 0s, but there should be an nfsstate
1575 * otherwise.
1576 * (ie. If there is no open, I'll assume no share
1577 * deny bits.)
1578 */
1579 if (specialid)
1580 error = 0;
1581 else
1582 error = NFSERR_BADSTATEID;
1583 }
1584 NFSUNLOCKSTATE();
1585 if (haslock) {
1586 NFSLOCKV4ROOTMUTEX();
1587 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1588 NFSUNLOCKV4ROOTMUTEX();
1589 }
1590 /*
1591 * Called to lock or unlock, so the lock has gone away.
1592 */
1593 return (error);
1594 }
1595
1596 /*
1597 * For NFSLCK_CHECK and NFSLCK_LOCK, test for a share conflict.
1598 * For NFSLCK_CHECK, allow a read if write access is granted,
1599 * but check for a deny. For NFSLCK_LOCK, require correct access,
1600 * which implies a conflicting deny can't exist.
1601 */
1602 if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_LOCK)) {
1603 /*
1604 * Four kinds of state id:
1605 * - specialid (all 0s or all 1s), only for NFSLCK_CHECK
1606 * - stateid for an open
1607 * - stateid for a delegation
1608 * - stateid for a lock owner
1609 */
1610 if (!specialid) {
1611 if (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) {
1612 delegation = 1;
1613 mystp = stp;
1614 nfsrv_delaydelegtimeout(stp);
1615 } else if (stp->ls_flags & NFSLCK_OPEN) {
1616 mystp = stp;
1617 } else {
1618 mystp = stp->ls_openstp;
1619 }
1620 /*
1621 * If locking or checking, require correct access
1622 * bit set.
1623 */
1624 if (((new_stp->ls_flags & NFSLCK_LOCK) &&
1625 !((new_lop->lo_flags >> NFSLCK_LOCKSHIFT) &
1626 mystp->ls_flags & NFSLCK_ACCESSBITS)) ||
1627 ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_READACCESS)) ==
1628 (NFSLCK_CHECK | NFSLCK_READACCESS) &&
1629 !(mystp->ls_flags & NFSLCK_READACCESS)) ||
1630 ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_WRITEACCESS)) ==
1631 (NFSLCK_CHECK | NFSLCK_WRITEACCESS) &&
1632 !(mystp->ls_flags & NFSLCK_WRITEACCESS))) {
1633 if (filestruct_locked != 0) {
1634 /* Roll back local locks. */
1635 NFSUNLOCKSTATE();
1636 nfsrv_locallock_rollback(vp, lfp, p);
1637 NFSLOCKSTATE();
1638 nfsrv_unlocklf(lfp);
1639 }
1640 NFSUNLOCKSTATE();
1641 if (other_lop)
1642 FREE((caddr_t)other_lop, M_NFSDLOCK);
1643 if (haslock) {
1644 NFSLOCKV4ROOTMUTEX();
1645 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1646 NFSUNLOCKV4ROOTMUTEX();
1647 }
1648 return (NFSERR_OPENMODE);
1649 }
1650 } else
1651 mystp = NULL;
1652 if ((new_stp->ls_flags & NFSLCK_CHECK) && !delegation) {
1653 /*
1654 * Check for a conflicting deny bit.
1655 */
1656 LIST_FOREACH(tstp, &lfp->lf_open, ls_file) {
1657 if (tstp != mystp) {
1658 bits = tstp->ls_flags;
1659 bits >>= NFSLCK_SHIFT;
1660 if (new_stp->ls_flags & bits & NFSLCK_ACCESSBITS) {
1661 ret = nfsrv_clientconflict(tstp->ls_clp, &haslock,
1662 vp, p);
1663 if (ret == 1) {
1664 /*
1665 * nfsrv_clientconflict unlocks state
1666 * when it returns non-zero.
1667 */
1668 lckstp = NULL;
1669 goto tryagain;
1670 }
1671 if (ret == 0)
1672 NFSUNLOCKSTATE();
1673 if (haslock) {
1674 NFSLOCKV4ROOTMUTEX();
1675 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1676 NFSUNLOCKV4ROOTMUTEX();
1677 }
1678 if (ret == 2)
1679 return (NFSERR_PERM);
1680 else
1681 return (NFSERR_OPENMODE);
1682 }
1683 }
1684 }
1685
1686 /* We're outta here */
1687 NFSUNLOCKSTATE();
1688 if (haslock) {
1689 NFSLOCKV4ROOTMUTEX();
1690 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1691 NFSUNLOCKV4ROOTMUTEX();
1692 }
1693 return (0);
1694 }
1695 }
1696
1697 /*
1698 * For setattr, just get rid of all the Delegations for other clients.
1699 */
1700 if (new_stp->ls_flags & NFSLCK_SETATTR) {
1701 ret = nfsrv_cleandeleg(vp, lfp, clp, &haslock, p);
1702 if (ret) {
1703 /*
1704 * nfsrv_cleandeleg() unlocks state when it
1705 * returns non-zero.
1706 */
1707 if (ret == -1) {
1708 lckstp = NULL;
1709 goto tryagain;
1710 }
1711 return (ret);
1712 }
1713 if (!(new_stp->ls_flags & NFSLCK_CHECK) ||
1714 (LIST_EMPTY(&lfp->lf_open) && LIST_EMPTY(&lfp->lf_lock) &&
1715 LIST_EMPTY(&lfp->lf_deleg))) {
1716 NFSUNLOCKSTATE();
1717 if (haslock) {
1718 NFSLOCKV4ROOTMUTEX();
1719 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1720 NFSUNLOCKV4ROOTMUTEX();
1721 }
1722 return (0);
1723 }
1724 }
1725
1726 /*
1727 * Check for a conflicting delegation. If one is found, call
1728 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't
1729 * been set yet, it will get the lock. Otherwise, it will recall
1730 * the delegation. Then, we try try again...
1731 * I currently believe the conflict algorithm to be:
1732 * For Lock Ops (Lock/LockT/LockU)
1733 * - there is a conflict iff a different client has a write delegation
1734 * For Reading (Read Op)
1735 * - there is a conflict iff a different client has a write delegation
1736 * (the specialids are always a different client)
1737 * For Writing (Write/Setattr of size)
1738 * - there is a conflict if a different client has any delegation
1739 * - there is a conflict if the same client has a read delegation
1740 * (I don't understand why this isn't allowed, but that seems to be
1741 * the current concensus?)
1742 */
1743 tstp = LIST_FIRST(&lfp->lf_deleg);
1744 while (tstp != LIST_END(&lfp->lf_deleg)) {
1745 nstp = LIST_NEXT(tstp, ls_file);
1746 if ((((new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK|NFSLCK_TEST))||
1747 ((new_stp->ls_flags & NFSLCK_CHECK) &&
1748 (new_lop->lo_flags & NFSLCK_READ))) &&
1749 clp != tstp->ls_clp &&
1750 (tstp->ls_flags & NFSLCK_DELEGWRITE)) ||
1751 ((new_stp->ls_flags & NFSLCK_CHECK) &&
1752 (new_lop->lo_flags & NFSLCK_WRITE) &&
1753 (clp != tstp->ls_clp ||
1754 (tstp->ls_flags & NFSLCK_DELEGREAD)))) {
1755 if (filestruct_locked != 0) {
1756 /* Roll back local locks. */
1757 NFSUNLOCKSTATE();
1758 nfsrv_locallock_rollback(vp, lfp, p);
1759 NFSLOCKSTATE();
1760 nfsrv_unlocklf(lfp);
1761 }
1762 ret = nfsrv_delegconflict(tstp, &haslock, p, vp);
1763 if (ret) {
1764 /*
1765 * nfsrv_delegconflict unlocks state when it
1766 * returns non-zero, which it always does.
1767 */
1768 if (other_lop) {
1769 FREE((caddr_t)other_lop, M_NFSDLOCK);
1770 other_lop = NULL;
1771 }
1772 if (ret == -1) {
1773 lckstp = NULL;
1774 goto tryagain;
1775 }
1776 return (ret);
1777 }
1778 /* Never gets here. */
1779 }
1780 tstp = nstp;
1781 }
1782
1783 /*
1784 * Handle the unlock case by calling nfsrv_updatelock().
1785 * (Should I have done some access checking above for unlock? For now,
1786 * just let it happen.)
1787 */
1788 if (new_stp->ls_flags & NFSLCK_UNLOCK) {
1789 first = new_lop->lo_first;
1790 end = new_lop->lo_end;
1791 nfsrv_updatelock(stp, new_lopp, &other_lop, lfp);
1792 stateidp->seqid = ++(stp->ls_stateid.seqid);
1793 stateidp->other[0] = stp->ls_stateid.other[0];
1794 stateidp->other[1] = stp->ls_stateid.other[1];
1795 stateidp->other[2] = stp->ls_stateid.other[2];
1796 if (filestruct_locked != 0) {
1797 NFSUNLOCKSTATE();
1798 /* Update the local locks. */
1799 nfsrv_localunlock(vp, lfp, first, end, p);
1800 NFSLOCKSTATE();
1801 nfsrv_unlocklf(lfp);
1802 }
1803 NFSUNLOCKSTATE();
1804 if (haslock) {
1805 NFSLOCKV4ROOTMUTEX();
1806 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1807 NFSUNLOCKV4ROOTMUTEX();
1808 }
1809 return (0);
1810 }
1811
1812 /*
1813 * Search for a conflicting lock. A lock conflicts if:
1814 * - the lock range overlaps and
1815 * - at least one lock is a write lock and
1816 * - it is not owned by the same lock owner
1817 */
1818 if (!delegation) {
1819 LIST_FOREACH(lop, &lfp->lf_lock, lo_lckfile) {
1820 if (new_lop->lo_end > lop->lo_first &&
1821 new_lop->lo_first < lop->lo_end &&
1822 (new_lop->lo_flags == NFSLCK_WRITE ||
1823 lop->lo_flags == NFSLCK_WRITE) &&
1824 lckstp != lop->lo_stp &&
1825 (clp != lop->lo_stp->ls_clp ||
1826 lckstp->ls_ownerlen != lop->lo_stp->ls_ownerlen ||
1827 NFSBCMP(lckstp->ls_owner, lop->lo_stp->ls_owner,
1828 lckstp->ls_ownerlen))) {
1829 if (other_lop) {
1830 FREE((caddr_t)other_lop, M_NFSDLOCK);
1831 other_lop = NULL;
1832 }
1833 ret = nfsrv_clientconflict(lop->lo_stp->ls_clp,&haslock,vp,p);
1834 if (ret == 1) {
1835 if (filestruct_locked != 0) {
1836 /* Roll back local locks. */
1837 nfsrv_locallock_rollback(vp, lfp, p);
1838 NFSLOCKSTATE();
1839 nfsrv_unlocklf(lfp);
1840 NFSUNLOCKSTATE();
1841 }
1842 /*
1843 * nfsrv_clientconflict() unlocks state when it
1844 * returns non-zero.
1845 */
1846 lckstp = NULL;
1847 goto tryagain;
1848 }
1849 /*
1850 * Found a conflicting lock, so record the conflict and
1851 * return the error.
1852 */
1853 if (cfp != NULL && ret == 0) {
1854 cfp->cl_clientid.lval[0]=lop->lo_stp->ls_stateid.other[0];
1855 cfp->cl_clientid.lval[1]=lop->lo_stp->ls_stateid.other[1];
1856 cfp->cl_first = lop->lo_first;
1857 cfp->cl_end = lop->lo_end;
1858 cfp->cl_flags = lop->lo_flags;
1859 cfp->cl_ownerlen = lop->lo_stp->ls_ownerlen;
1860 NFSBCOPY(lop->lo_stp->ls_owner, cfp->cl_owner,
1861 cfp->cl_ownerlen);
1862 }
1863 if (ret == 2)
1864 error = NFSERR_PERM;
1865 else if (new_stp->ls_flags & NFSLCK_RECLAIM)
1866 error = NFSERR_RECLAIMCONFLICT;
1867 else if (new_stp->ls_flags & NFSLCK_CHECK)
1868 error = NFSERR_LOCKED;
1869 else
1870 error = NFSERR_DENIED;
1871 if (filestruct_locked != 0 && ret == 0) {
1872 /* Roll back local locks. */
1873 NFSUNLOCKSTATE();
1874 nfsrv_locallock_rollback(vp, lfp, p);
1875 NFSLOCKSTATE();
1876 nfsrv_unlocklf(lfp);
1877 }
1878 if (ret == 0)
1879 NFSUNLOCKSTATE();
1880 if (haslock) {
1881 NFSLOCKV4ROOTMUTEX();
1882 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1883 NFSUNLOCKV4ROOTMUTEX();
1884 }
1885 return (error);
1886 }
1887 }
1888 }
1889
1890 /*
1891 * We only get here if there was no lock that conflicted.
1892 */
1893 if (new_stp->ls_flags & (NFSLCK_TEST | NFSLCK_CHECK)) {
1894 NFSUNLOCKSTATE();
1895 if (haslock) {
1896 NFSLOCKV4ROOTMUTEX();
1897 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1898 NFSUNLOCKV4ROOTMUTEX();
1899 }
1900 return (0);
1901 }
1902
1903 /*
1904 * We only get here when we are creating or modifying a lock.
1905 * There are two variants:
1906 * - exist_lock_owner where lock_owner exists
1907 * - open_to_lock_owner with new lock_owner
1908 */
1909 first = new_lop->lo_first;
1910 end = new_lop->lo_end;
1911 lock_flags = new_lop->lo_flags;
1912 if (!(new_stp->ls_flags & NFSLCK_OPENTOLOCK)) {
1913 nfsrv_updatelock(lckstp, new_lopp, &other_lop, lfp);
1914 stateidp->seqid = ++(lckstp->ls_stateid.seqid);
1915 stateidp->other[0] = lckstp->ls_stateid.other[0];
1916 stateidp->other[1] = lckstp->ls_stateid.other[1];
1917 stateidp->other[2] = lckstp->ls_stateid.other[2];
1918 } else {
1919 /*
1920 * The new open_to_lock_owner case.
1921 * Link the new nfsstate into the lists.
1922 */
1923 new_stp->ls_seq = new_stp->ls_opentolockseq;
1924 nfsrvd_refcache(new_stp->ls_op);
1925 stateidp->seqid = new_stp->ls_stateid.seqid = 1;
1926 stateidp->other[0] = new_stp->ls_stateid.other[0] =
1927 clp->lc_clientid.lval[0];
1928 stateidp->other[1] = new_stp->ls_stateid.other[1] =
1929 clp->lc_clientid.lval[1];
1930 stateidp->other[2] = new_stp->ls_stateid.other[2] =
1931 nfsrv_nextstateindex(clp);
1932 new_stp->ls_clp = clp;
1933 LIST_INIT(&new_stp->ls_lock);
1934 new_stp->ls_openstp = stp;
1935 new_stp->ls_lfp = lfp;
1936 nfsrv_insertlock(new_lop, (struct nfslock *)new_stp, new_stp,
1937 lfp);
1938 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_stp->ls_stateid),
1939 new_stp, ls_hash);
1940 LIST_INSERT_HEAD(&stp->ls_open, new_stp, ls_list);
1941 *new_lopp = NULL;
1942 *new_stpp = NULL;
1943 newnfsstats.srvlockowners++;
1944 nfsrv_openpluslock++;
1945 }
1946 if (filestruct_locked != 0) {
1947 NFSUNLOCKSTATE();
1948 nfsrv_locallock_commit(lfp, lock_flags, first, end);
1949 NFSLOCKSTATE();
1950 nfsrv_unlocklf(lfp);
1951 }
1952 NFSUNLOCKSTATE();
1953 if (haslock) {
1954 NFSLOCKV4ROOTMUTEX();
1955 nfsv4_unlock(&nfsv4rootfs_lock, 1);
1956 NFSUNLOCKV4ROOTMUTEX();
1957 }
1958 if (other_lop)
1959 FREE((caddr_t)other_lop, M_NFSDLOCK);
1960 return (0);
1961}
1962
1963/*
1964 * Check for state errors for Open.
1965 * repstat is passed back out as an error if more critical errors
1966 * are not detected.
1967 */
1968APPLESTATIC int
1969nfsrv_opencheck(nfsquad_t clientid, nfsv4stateid_t *stateidp,
1970 struct nfsstate *new_stp, vnode_t vp, struct nfsrv_descript *nd,
1971 NFSPROC_T *p, int repstat)
1972{
1973 struct nfsstate *stp, *nstp;
1974 struct nfsclient *clp;
1975 struct nfsstate *ownerstp;
1976 struct nfslockfile *lfp, *new_lfp;
1977 int error, haslock = 0, ret, readonly = 0, getfhret = 0;
1978
1979 if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS)
1980 readonly = 1;
1981 /*
1982 * Check for restart conditions (client and server).
1983 */
1984 error = nfsrv_checkrestart(clientid, new_stp->ls_flags,
1985 &new_stp->ls_stateid, 0);
1986 if (error)
1987 return (error);
1988
1989 /*
1990 * Check for state resource limit exceeded.
1991 * Technically this should be SMP protected, but the worst
1992 * case error is "out by one or two" on the count when it
1993 * returns NFSERR_RESOURCE and the limit is just a rather
1994 * arbitrary high water mark, so no harm is done.
1995 */
1996 if (nfsrv_openpluslock > NFSRV_V4STATELIMIT)
1997 return (NFSERR_RESOURCE);
1998
1999tryagain:
2000 MALLOC(new_lfp, struct nfslockfile *, sizeof (struct nfslockfile),
2001 M_NFSDLOCKFILE, M_WAITOK);
2002 if (vp)
2003 getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, &new_lfp,
2004 NULL, p);
2005 NFSLOCKSTATE();
2006 /*
2007 * Get the nfsclient structure.
2008 */
2009 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
2010 (nfsquad_t)((u_quad_t)0), NULL, p);
2011
2012 /*
2013 * Look up the open owner. See if it needs confirmation and
2014 * check the seq#, as required.
2015 */
2016 if (!error)
2017 nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp);
2018
2019 if (!error && ownerstp) {
2020 error = nfsrv_checkseqid(nd, new_stp->ls_seq, ownerstp,
2021 new_stp->ls_op);
2022 /*
2023 * If the OpenOwner hasn't been confirmed, assume the
2024 * old one was a replay and this one is ok.
2025 * See: RFC3530 Sec. 14.2.18.
2026 */
2027 if (error == NFSERR_BADSEQID &&
2028 (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM))
2029 error = 0;
2030 }
2031
2032 /*
2033 * Check for grace.
2034 */
2035 if (!error)
2036 error = nfsrv_checkgrace(new_stp->ls_flags);
2037 if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error &&
2038 nfsrv_checkstable(clp))
2039 error = NFSERR_NOGRACE;
2040
2041 /*
2042 * If none of the above errors occurred, let repstat be
2043 * returned.
2044 */
2045 if (repstat && !error)
2046 error = repstat;
2047 if (error) {
2048 NFSUNLOCKSTATE();
2049 if (haslock) {
2050 NFSLOCKV4ROOTMUTEX();
2051 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2052 NFSUNLOCKV4ROOTMUTEX();
2053 }
2054 free((caddr_t)new_lfp, M_NFSDLOCKFILE);
2055 return (error);
2056 }
2057
2058 /*
2059 * If vp == NULL, the file doesn't exist yet, so return ok.
2060 * (This always happens on the first pass, so haslock must be 0.)
2061 */
2062 if (vp == NULL) {
2063 NFSUNLOCKSTATE();
2064 FREE((caddr_t)new_lfp, M_NFSDLOCKFILE);
2065 return (0);
2066 }
2067
2068 /*
2069 * Get the structure for the underlying file.
2070 */
2071 if (getfhret)
2072 error = getfhret;
2073 else
2074 error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp,
2075 NULL, 0);
2076 if (new_lfp)
2077 FREE((caddr_t)new_lfp, M_NFSDLOCKFILE);
2078 if (error) {
2079 NFSUNLOCKSTATE();
2080 if (haslock) {
2081 NFSLOCKV4ROOTMUTEX();
2082 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2083 NFSUNLOCKV4ROOTMUTEX();
2084 }
2085 return (error);
2086 }
2087
2088 /*
2089 * Search for a conflicting open/share.
2090 */
2091 if (new_stp->ls_flags & NFSLCK_DELEGCUR) {
2092 /*
2093 * For Delegate_Cur, search for the matching Delegation,
2094 * which indicates no conflict.
2095 * An old delegation should have been recovered by the
2096 * client doing a Claim_DELEGATE_Prev, so I won't let
2097 * it match and return NFSERR_EXPIRED. Should I let it
2098 * match?
2099 */
2100 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) {
2101 if (!(stp->ls_flags & NFSLCK_OLDDELEG) &&
2102 stateidp->seqid == stp->ls_stateid.seqid &&
2103 !NFSBCMP(stateidp->other, stp->ls_stateid.other,
2104 NFSX_STATEIDOTHER))
2105 break;
2106 }
2107 if (stp == LIST_END(&lfp->lf_deleg) ||
2108 ((new_stp->ls_flags & NFSLCK_WRITEACCESS) &&
2109 (stp->ls_flags & NFSLCK_DELEGREAD))) {
2110 NFSUNLOCKSTATE();
2111 if (haslock) {
2112 NFSLOCKV4ROOTMUTEX();
2113 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2114 NFSUNLOCKV4ROOTMUTEX();
2115 }
2116 return (NFSERR_EXPIRED);
2117 }
2118 }
2119
2120 /*
2121 * Check for access/deny bit conflicts. I check for the same
2122 * owner as well, in case the client didn't bother.
2123 */
2124 LIST_FOREACH(stp, &lfp->lf_open, ls_file) {
2125 if (!(new_stp->ls_flags & NFSLCK_DELEGCUR) &&
2126 (((new_stp->ls_flags & NFSLCK_ACCESSBITS) &
2127 ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))||
2128 ((stp->ls_flags & NFSLCK_ACCESSBITS) &
2129 ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS)))){
2130 ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p);
2131 if (ret == 1) {
2132 /*
2133 * nfsrv_clientconflict() unlocks
2134 * state when it returns non-zero.
2135 */
2136 goto tryagain;
2137 }
2138 if (ret == 2)
2139 error = NFSERR_PERM;
2140 else if (new_stp->ls_flags & NFSLCK_RECLAIM)
2141 error = NFSERR_RECLAIMCONFLICT;
2142 else
2143 error = NFSERR_SHAREDENIED;
2144 if (ret == 0)
2145 NFSUNLOCKSTATE();
2146 if (haslock) {
2147 NFSLOCKV4ROOTMUTEX();
2148 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2149 NFSUNLOCKV4ROOTMUTEX();
2150 }
2151 return (error);
2152 }
2153 }
2154
2155 /*
2156 * Check for a conflicting delegation. If one is found, call
2157 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't
2158 * been set yet, it will get the lock. Otherwise, it will recall
2159 * the delegation. Then, we try try again...
2160 * (If NFSLCK_DELEGCUR is set, it has a delegation, so there
2161 * isn't a conflict.)
2162 * I currently believe the conflict algorithm to be:
2163 * For Open with Read Access and Deny None
2164 * - there is a conflict iff a different client has a write delegation
2165 * For Open with other Write Access or any Deny except None
2166 * - there is a conflict if a different client has any delegation
2167 * - there is a conflict if the same client has a read delegation
2168 * (The current concensus is that this last case should be
2169 * considered a conflict since the client with a read delegation
2170 * could have done an Open with ReadAccess and WriteDeny
2171 * locally and then not have checked for the WriteDeny.)
2172 * Don't check for a Reclaim, since that will be dealt with
2173 * by nfsrv_openctrl().
2174 */
2175 if (!(new_stp->ls_flags &
2176 (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR | NFSLCK_RECLAIM))) {
2177 stp = LIST_FIRST(&lfp->lf_deleg);
2178 while (stp != LIST_END(&lfp->lf_deleg)) {
2179 nstp = LIST_NEXT(stp, ls_file);
2180 if ((readonly && stp->ls_clp != clp &&
2181 (stp->ls_flags & NFSLCK_DELEGWRITE)) ||
2182 (!readonly && (stp->ls_clp != clp ||
2183 (stp->ls_flags & NFSLCK_DELEGREAD)))) {
2184 ret = nfsrv_delegconflict(stp, &haslock, p, vp);
2185 if (ret) {
2186 /*
2187 * nfsrv_delegconflict() unlocks state
2188 * when it returns non-zero.
2189 */
2190 if (ret == -1)
2191 goto tryagain;
2192 return (ret);
2193 }
2194 }
2195 stp = nstp;
2196 }
2197 }
2198 NFSUNLOCKSTATE();
2199 if (haslock) {
2200 NFSLOCKV4ROOTMUTEX();
2201 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2202 NFSUNLOCKV4ROOTMUTEX();
2203 }
2204 return (0);
2205}
2206
2207/*
2208 * Open control function to create/update open state for an open.
2209 */
2210APPLESTATIC int
2211nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
2212 struct nfsstate **new_stpp, nfsquad_t clientid, nfsv4stateid_t *stateidp,
2213 nfsv4stateid_t *delegstateidp, u_int32_t *rflagsp, struct nfsexstuff *exp,
2214 NFSPROC_T *p, u_quad_t filerev)
2215{
2216 struct nfsstate *new_stp = *new_stpp;
2217 struct nfsstate *stp, *nstp;
2218 struct nfsstate *openstp = NULL, *new_open, *ownerstp, *new_deleg;
2219 struct nfslockfile *lfp, *new_lfp;
2220 struct nfsclient *clp;
2221 int error, haslock = 0, ret, delegate = 1, writedeleg = 1;
2222 int readonly = 0, cbret = 1, getfhret = 0;
2223
2224 if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS)
2225 readonly = 1;
2226 /*
2227 * Check for restart conditions (client and server).
2228 * (Paranoia, should have been detected by nfsrv_opencheck().)
2229 * If an error does show up, return NFSERR_EXPIRED, since the
2230 * the seqid# has already been incremented.
2231 */
2232 error = nfsrv_checkrestart(clientid, new_stp->ls_flags,
2233 &new_stp->ls_stateid, 0);
2234 if (error) {
2235 printf("Nfsd: openctrl unexpected restart err=%d\n",
2236 error);
2237 return (NFSERR_EXPIRED);
2238 }
2239
2240tryagain:
2241 MALLOC(new_lfp, struct nfslockfile *, sizeof (struct nfslockfile),
2242 M_NFSDLOCKFILE, M_WAITOK);
2243 MALLOC(new_open, struct nfsstate *, sizeof (struct nfsstate),
2244 M_NFSDSTATE, M_WAITOK);
2245 MALLOC(new_deleg, struct nfsstate *, sizeof (struct nfsstate),
2246 M_NFSDSTATE, M_WAITOK);
2247 getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, &new_lfp,
2248 NULL, p);
2249 NFSLOCKSTATE();
2250 /*
2251 * Get the client structure. Since the linked lists could be changed
2252 * by other nfsd processes if this process does a tsleep(), one of
2253 * two things must be done.
2254 * 1 - don't tsleep()
2255 * or
2256 * 2 - get the nfsv4_lock() { indicated by haslock == 1 }
2257 * before using the lists, since this lock stops the other
2258 * nfsd. This should only be used for rare cases, since it
2259 * essentially single threads the nfsd.
2260 * At this time, it is only done for cases where the stable
2261 * storage file must be written prior to completion of state
2262 * expiration.
2263 */
2264 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
2265 (nfsquad_t)((u_quad_t)0), NULL, p);
2266 if (!error && (clp->lc_flags & LCL_NEEDSCBNULL) &&
2267 clp->lc_program) {
2268 /*
2269 * This happens on the first open for a client
2270 * that supports callbacks.
2271 */
2272 NFSUNLOCKSTATE();
2273 /*
2274 * Although nfsrv_docallback() will sleep, clp won't
2275 * go away, since they are only removed when the
2276 * nfsv4_lock() has blocked the nfsd threads. The
2277 * fields in clp can change, but having multiple
2278 * threads do this Null callback RPC should be
2279 * harmless.
2280 */
2281 cbret = nfsrv_docallback(clp, NFSV4PROC_CBNULL,
2282 NULL, 0, NULL, NULL, NULL, p);
2283 NFSLOCKSTATE();
2284 clp->lc_flags &= ~LCL_NEEDSCBNULL;
2285 if (!cbret)
2286 clp->lc_flags |= LCL_CALLBACKSON;
2287 }
2288
2289 /*
2290 * Look up the open owner. See if it needs confirmation and
2291 * check the seq#, as required.
2292 */
2293 if (!error)
2294 nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp);
2295
2296 if (error) {
2297 NFSUNLOCKSTATE();
2298 printf("Nfsd: openctrl unexpected state err=%d\n",
2299 error);
2300 free((caddr_t)new_lfp, M_NFSDLOCKFILE);
2301 free((caddr_t)new_open, M_NFSDSTATE);
2302 free((caddr_t)new_deleg, M_NFSDSTATE);
2303 if (haslock) {
2304 NFSLOCKV4ROOTMUTEX();
2305 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2306 NFSUNLOCKV4ROOTMUTEX();
2307 }
2308 return (NFSERR_EXPIRED);
2309 }
2310
2311 if (new_stp->ls_flags & NFSLCK_RECLAIM)
2312 nfsrv_markstable(clp);
2313
2314 /*
2315 * Get the structure for the underlying file.
2316 */
2317 if (getfhret)
2318 error = getfhret;
2319 else
2320 error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp,
2321 NULL, 0);
2322 if (new_lfp)
2323 FREE((caddr_t)new_lfp, M_NFSDLOCKFILE);
2324 if (error) {
2325 NFSUNLOCKSTATE();
2326 printf("Nfsd openctrl unexpected getlockfile err=%d\n",
2327 error);
2328 free((caddr_t)new_open, M_NFSDSTATE);
2329 free((caddr_t)new_deleg, M_NFSDSTATE);
2330 if (haslock) {
2331 NFSLOCKV4ROOTMUTEX();
2332 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2333 NFSUNLOCKV4ROOTMUTEX();
2334 }
2335 return (error);
2336 }
2337
2338 /*
2339 * Search for a conflicting open/share.
2340 */
2341 if (new_stp->ls_flags & NFSLCK_DELEGCUR) {
2342 /*
2343 * For Delegate_Cur, search for the matching Delegation,
2344 * which indicates no conflict.
2345 * An old delegation should have been recovered by the
2346 * client doing a Claim_DELEGATE_Prev, so I won't let
2347 * it match and return NFSERR_EXPIRED. Should I let it
2348 * match?
2349 */
2350 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) {
2351 if (!(stp->ls_flags & NFSLCK_OLDDELEG) &&
2352 stateidp->seqid == stp->ls_stateid.seqid &&
2353 !NFSBCMP(stateidp->other, stp->ls_stateid.other,
2354 NFSX_STATEIDOTHER))
2355 break;
2356 }
2357 if (stp == LIST_END(&lfp->lf_deleg) ||
2358 ((new_stp->ls_flags & NFSLCK_WRITEACCESS) &&
2359 (stp->ls_flags & NFSLCK_DELEGREAD))) {
2360 NFSUNLOCKSTATE();
2361 printf("Nfsd openctrl unexpected expiry\n");
2362 free((caddr_t)new_open, M_NFSDSTATE);
2363 free((caddr_t)new_deleg, M_NFSDSTATE);
2364 if (haslock) {
2365 NFSLOCKV4ROOTMUTEX();
2366 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2367 NFSUNLOCKV4ROOTMUTEX();
2368 }
2369 return (NFSERR_EXPIRED);
2370 }
2371
2372 /*
2373 * Don't issue a Delegation, since one already exists and
2374 * delay delegation timeout, as required.
2375 */
2376 delegate = 0;
2377 nfsrv_delaydelegtimeout(stp);
2378 }
2379
2380 /*
2381 * Check for access/deny bit conflicts. I also check for the
2382 * same owner, since the client might not have bothered to check.
2383 * Also, note an open for the same file and owner, if found,
2384 * which is all we do here for Delegate_Cur, since conflict
2385 * checking is already done.
2386 */
2387 LIST_FOREACH(stp, &lfp->lf_open, ls_file) {
2388 if (ownerstp && stp->ls_openowner == ownerstp)
2389 openstp = stp;
2390 if (!(new_stp->ls_flags & NFSLCK_DELEGCUR)) {
2391 /*
2392 * If another client has the file open, the only
2393 * delegation that can be issued is a Read delegation
2394 * and only if it is a Read open with Deny none.
2395 */
2396 if (clp != stp->ls_clp) {
2397 if ((stp->ls_flags & NFSLCK_SHAREBITS) ==
2398 NFSLCK_READACCESS)
2399 writedeleg = 0;
2400 else
2401 delegate = 0;
2402 }
2403 if(((new_stp->ls_flags & NFSLCK_ACCESSBITS) &
2404 ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))||
2405 ((stp->ls_flags & NFSLCK_ACCESSBITS) &
2406 ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS))){
2407 ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p);
2408 if (ret == 1) {
2409 /*
2410 * nfsrv_clientconflict() unlocks state
2411 * when it returns non-zero.
2412 */
2413 free((caddr_t)new_open, M_NFSDSTATE);
2414 free((caddr_t)new_deleg, M_NFSDSTATE);
2415 openstp = NULL;
2416 goto tryagain;
2417 }
2418 if (ret == 2)
2419 error = NFSERR_PERM;
2420 else if (new_stp->ls_flags & NFSLCK_RECLAIM)
2421 error = NFSERR_RECLAIMCONFLICT;
2422 else
2423 error = NFSERR_SHAREDENIED;
2424 if (ret == 0)
2425 NFSUNLOCKSTATE();
2426 if (haslock) {
2427 NFSLOCKV4ROOTMUTEX();
2428 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2429 NFSUNLOCKV4ROOTMUTEX();
2430 }
2431 free((caddr_t)new_open, M_NFSDSTATE);
2432 free((caddr_t)new_deleg, M_NFSDSTATE);
2433 printf("nfsd openctrl unexpected client cnfl\n");
2434 return (error);
2435 }
2436 }
2437 }
2438
2439 /*
2440 * Check for a conflicting delegation. If one is found, call
2441 * nfsrv_delegconflict() to handle it. If the v4root lock hasn't
2442 * been set yet, it will get the lock. Otherwise, it will recall
2443 * the delegation. Then, we try try again...
2444 * (If NFSLCK_DELEGCUR is set, it has a delegation, so there
2445 * isn't a conflict.)
2446 * I currently believe the conflict algorithm to be:
2447 * For Open with Read Access and Deny None
2448 * - there is a conflict iff a different client has a write delegation
2449 * For Open with other Write Access or any Deny except None
2450 * - there is a conflict if a different client has any delegation
2451 * - there is a conflict if the same client has a read delegation
2452 * (The current concensus is that this last case should be
2453 * considered a conflict since the client with a read delegation
2454 * could have done an Open with ReadAccess and WriteDeny
2455 * locally and then not have checked for the WriteDeny.)
2456 */
2457 if (!(new_stp->ls_flags & (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR))) {
2458 stp = LIST_FIRST(&lfp->lf_deleg);
2459 while (stp != LIST_END(&lfp->lf_deleg)) {
2460 nstp = LIST_NEXT(stp, ls_file);
2461 if (stp->ls_clp != clp && (stp->ls_flags & NFSLCK_DELEGREAD))
2462 writedeleg = 0;
2463 else
2464 delegate = 0;
2465 if ((readonly && stp->ls_clp != clp &&
2466 (stp->ls_flags & NFSLCK_DELEGWRITE)) ||
2467 (!readonly && (stp->ls_clp != clp ||
2468 (stp->ls_flags & NFSLCK_DELEGREAD)))) {
2469 if (new_stp->ls_flags & NFSLCK_RECLAIM) {
2470 delegate = 2;
2471 } else {
2472 ret = nfsrv_delegconflict(stp, &haslock, p, vp);
2473 if (ret) {
2474 /*
2475 * nfsrv_delegconflict() unlocks state
2476 * when it returns non-zero.
2477 */
2478 printf("Nfsd openctrl unexpected deleg cnfl\n");
2479 free((caddr_t)new_open, M_NFSDSTATE);
2480 free((caddr_t)new_deleg, M_NFSDSTATE);
2481 if (ret == -1) {
2482 openstp = NULL;
2483 goto tryagain;
2484 }
2485 return (ret);
2486 }
2487 }
2488 }
2489 stp = nstp;
2490 }
2491 }
2492
2493 /*
2494 * We only get here if there was no open that conflicted.
2495 * If an open for the owner exists, or in the access/deny bits.
2496 * Otherwise it is a new open. If the open_owner hasn't been
2497 * confirmed, replace the open with the new one needing confirmation,
2498 * otherwise add the open.
2499 */
2500 if (new_stp->ls_flags & NFSLCK_DELEGPREV) {
2501 /*
2502 * Handle NFSLCK_DELEGPREV by searching the old delegations for
2503 * a match. If found, just move the old delegation to the current
2504 * delegation list and issue open. If not found, return
2505 * NFSERR_EXPIRED.
2506 */
2507 LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) {
2508 if (stp->ls_lfp == lfp) {
2509 /* Found it */
2510 if (stp->ls_clp != clp)
2511 panic("olddeleg clp");
2512 LIST_REMOVE(stp, ls_list);
2513 LIST_REMOVE(stp, ls_hash);
2514 stp->ls_flags &= ~NFSLCK_OLDDELEG;
2515 stp->ls_stateid.seqid = delegstateidp->seqid = 0;
2516 stp->ls_stateid.other[0] = delegstateidp->other[0] =
2517 clp->lc_clientid.lval[0];
2518 stp->ls_stateid.other[1] = delegstateidp->other[1] =
2519 clp->lc_clientid.lval[1];
2520 stp->ls_stateid.other[2] = delegstateidp->other[2] =
2521 nfsrv_nextstateindex(clp);
2522 stp->ls_compref = nd->nd_compref;
2523 LIST_INSERT_HEAD(&clp->lc_deleg, stp, ls_list);
2524 LIST_INSERT_HEAD(NFSSTATEHASH(clp,
2525 stp->ls_stateid), stp, ls_hash);
2526 if (stp->ls_flags & NFSLCK_DELEGWRITE)
2527 *rflagsp |= NFSV4OPEN_WRITEDELEGATE;
2528 else
2529 *rflagsp |= NFSV4OPEN_READDELEGATE;
2530 clp->lc_delegtime = NFSD_MONOSEC +
2531 nfsrv_lease + NFSRV_LEASEDELTA;
2532
2533 /*
2534 * Now, do the associated open.
2535 */
2536 new_open->ls_stateid.seqid = 0;
2537 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0];
2538 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1];
2539 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp);
2540 new_open->ls_flags = (new_stp->ls_flags&NFSLCK_DENYBITS)|
2541 NFSLCK_OPEN;
2542 if (stp->ls_flags & NFSLCK_DELEGWRITE)
2543 new_open->ls_flags |= (NFSLCK_READACCESS |
2544 NFSLCK_WRITEACCESS);
2545 else
2546 new_open->ls_flags |= NFSLCK_READACCESS;
2547 new_open->ls_uid = new_stp->ls_uid;
2548 new_open->ls_lfp = lfp;
2549 new_open->ls_clp = clp;
2550 LIST_INIT(&new_open->ls_open);
2551 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file);
2552 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid),
2553 new_open, ls_hash);
2554 /*
2555 * and handle the open owner
2556 */
2557 if (ownerstp) {
2558 new_open->ls_openowner = ownerstp;
2559 LIST_INSERT_HEAD(&ownerstp->ls_open,new_open,ls_list);
2560 } else {
2561 new_open->ls_openowner = new_stp;
2562 new_stp->ls_flags = 0;
2563 nfsrvd_refcache(new_stp->ls_op);
2564 new_stp->ls_noopens = 0;
2565 LIST_INIT(&new_stp->ls_open);
2566 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list);
2567 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list);
2568 *new_stpp = NULL;
2569 newnfsstats.srvopenowners++;
2570 nfsrv_openpluslock++;
2571 }
2572 openstp = new_open;
2573 new_open = NULL;
2574 newnfsstats.srvopens++;
2575 nfsrv_openpluslock++;
2576 break;
2577 }
2578 }
2579 if (stp == LIST_END(&clp->lc_olddeleg))
2580 error = NFSERR_EXPIRED;
2581 } else if (new_stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) {
2582 /*
2583 * Scan to see that no delegation for this client and file
2584 * doesn't already exist.
2585 * There also shouldn't yet be an Open for this file and
2586 * openowner.
2587 */
2588 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) {
2589 if (stp->ls_clp == clp)
2590 break;
2591 }
2592 if (stp == LIST_END(&lfp->lf_deleg) && openstp == NULL) {
2593 /*
2594 * This is the Claim_Previous case with a delegation
2595 * type != Delegate_None.
2596 */
2597 /*
2598 * First, add the delegation. (Although we must issue the
2599 * delegation, we can also ask for an immediate return.)
2600 */
2601 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0;
2602 new_deleg->ls_stateid.other[0] = delegstateidp->other[0] =
2603 clp->lc_clientid.lval[0];
2604 new_deleg->ls_stateid.other[1] = delegstateidp->other[1] =
2605 clp->lc_clientid.lval[1];
2606 new_deleg->ls_stateid.other[2] = delegstateidp->other[2] =
2607 nfsrv_nextstateindex(clp);
2608 if (new_stp->ls_flags & NFSLCK_DELEGWRITE) {
2609 new_deleg->ls_flags = (NFSLCK_DELEGWRITE |
2610 NFSLCK_READACCESS | NFSLCK_WRITEACCESS);
2611 *rflagsp |= NFSV4OPEN_WRITEDELEGATE;
2612 } else {
2613 new_deleg->ls_flags = (NFSLCK_DELEGREAD |
2614 NFSLCK_READACCESS);
2615 *rflagsp |= NFSV4OPEN_READDELEGATE;
2616 }
2617 new_deleg->ls_uid = new_stp->ls_uid;
2618 new_deleg->ls_lfp = lfp;
2619 new_deleg->ls_clp = clp;
2620 new_deleg->ls_filerev = filerev;
2621 new_deleg->ls_compref = nd->nd_compref;
2622 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file);
2623 LIST_INSERT_HEAD(NFSSTATEHASH(clp,
2624 new_deleg->ls_stateid), new_deleg, ls_hash);
2625 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list);
2626 new_deleg = NULL;
2627 if (delegate == 2 || nfsrv_issuedelegs == 0 ||
2628 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) !=
2629 LCL_CALLBACKSON ||
2630 NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) ||
2631 !NFSVNO_DELEGOK(vp))
2632 *rflagsp |= NFSV4OPEN_RECALL;
2633 newnfsstats.srvdelegates++;
2634 nfsrv_openpluslock++;
2635 nfsrv_delegatecnt++;
2636
2637 /*
2638 * Now, do the associated open.
2639 */
2640 new_open->ls_stateid.seqid = 0;
2641 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0];
2642 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1];
2643 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp);
2644 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_DENYBITS) |
2645 NFSLCK_OPEN;
2646 if (new_stp->ls_flags & NFSLCK_DELEGWRITE)
2647 new_open->ls_flags |= (NFSLCK_READACCESS |
2648 NFSLCK_WRITEACCESS);
2649 else
2650 new_open->ls_flags |= NFSLCK_READACCESS;
2651 new_open->ls_uid = new_stp->ls_uid;
2652 new_open->ls_lfp = lfp;
2653 new_open->ls_clp = clp;
2654 LIST_INIT(&new_open->ls_open);
2655 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file);
2656 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid),
2657 new_open, ls_hash);
2658 /*
2659 * and handle the open owner
2660 */
2661 if (ownerstp) {
2662 new_open->ls_openowner = ownerstp;
2663 LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list);
2664 } else {
2665 new_open->ls_openowner = new_stp;
2666 new_stp->ls_flags = 0;
2667 nfsrvd_refcache(new_stp->ls_op);
2668 new_stp->ls_noopens = 0;
2669 LIST_INIT(&new_stp->ls_open);
2670 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list);
2671 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list);
2672 *new_stpp = NULL;
2673 newnfsstats.srvopenowners++;
2674 nfsrv_openpluslock++;
2675 }
2676 openstp = new_open;
2677 new_open = NULL;
2678 newnfsstats.srvopens++;
2679 nfsrv_openpluslock++;
2680 } else {
2681 error = NFSERR_RECLAIMCONFLICT;
2682 }
2683 } else if (ownerstp) {
2684 if (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM) {
2685 /* Replace the open */
2686 if (ownerstp->ls_op)
2687 nfsrvd_derefcache(ownerstp->ls_op);
2688 ownerstp->ls_op = new_stp->ls_op;
2689 nfsrvd_refcache(ownerstp->ls_op);
2690 ownerstp->ls_seq = new_stp->ls_seq;
2691 *rflagsp |= NFSV4OPEN_RESULTCONFIRM;
2692 stp = LIST_FIRST(&ownerstp->ls_open);
2693 stp->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) |
2694 NFSLCK_OPEN;
2695 stp->ls_stateid.seqid = 0;
2696 stp->ls_uid = new_stp->ls_uid;
2697 if (lfp != stp->ls_lfp) {
2698 LIST_REMOVE(stp, ls_file);
2699 LIST_INSERT_HEAD(&lfp->lf_open, stp, ls_file);
2700 stp->ls_lfp = lfp;
2701 }
2702 openstp = stp;
2703 } else if (openstp) {
2704 openstp->ls_flags |= (new_stp->ls_flags & NFSLCK_SHAREBITS);
2705 openstp->ls_stateid.seqid++;
2706
2707 /*
2708 * This is where we can choose to issue a delegation.
2709 */
2710 if (delegate && nfsrv_issuedelegs &&
2711 writedeleg && !NFSVNO_EXRDONLY(exp) &&
2712 (nfsrv_writedelegifpos || !readonly) &&
2713 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) ==
2714 LCL_CALLBACKSON &&
2715 !NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) &&
2716 NFSVNO_DELEGOK(vp)) {
2717 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0;
2718 new_deleg->ls_stateid.other[0] = delegstateidp->other[0]
2719 = clp->lc_clientid.lval[0];
2720 new_deleg->ls_stateid.other[1] = delegstateidp->other[1]
2721 = clp->lc_clientid.lval[1];
2722 new_deleg->ls_stateid.other[2] = delegstateidp->other[2]
2723 = nfsrv_nextstateindex(clp);
2724 new_deleg->ls_flags = (NFSLCK_DELEGWRITE |
2725 NFSLCK_READACCESS | NFSLCK_WRITEACCESS);
2726 *rflagsp |= NFSV4OPEN_WRITEDELEGATE;
2727 new_deleg->ls_uid = new_stp->ls_uid;
2728 new_deleg->ls_lfp = lfp;
2729 new_deleg->ls_clp = clp;
2730 new_deleg->ls_filerev = filerev;
2731 new_deleg->ls_compref = nd->nd_compref;
2732 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file);
2733 LIST_INSERT_HEAD(NFSSTATEHASH(clp,
2734 new_deleg->ls_stateid), new_deleg, ls_hash);
2735 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list);
2736 new_deleg = NULL;
2737 newnfsstats.srvdelegates++;
2738 nfsrv_openpluslock++;
2739 nfsrv_delegatecnt++;
2740 }
2741 } else {
2742 new_open->ls_stateid.seqid = 0;
2743 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0];
2744 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1];
2745 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp);
2746 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS)|
2747 NFSLCK_OPEN;
2748 new_open->ls_uid = new_stp->ls_uid;
2749 new_open->ls_openowner = ownerstp;
2750 new_open->ls_lfp = lfp;
2751 new_open->ls_clp = clp;
2752 LIST_INIT(&new_open->ls_open);
2753 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file);
2754 LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list);
2755 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid),
2756 new_open, ls_hash);
2757 openstp = new_open;
2758 new_open = NULL;
2759 newnfsstats.srvopens++;
2760 nfsrv_openpluslock++;
2761
2762 /*
2763 * This is where we can choose to issue a delegation.
2764 */
2765 if (delegate && nfsrv_issuedelegs &&
2766 (writedeleg || readonly) &&
2767 (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) ==
2768 LCL_CALLBACKSON &&
2769 !NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) &&
2770 NFSVNO_DELEGOK(vp)) {
2771 new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0;
2772 new_deleg->ls_stateid.other[0] = delegstateidp->other[0]
2773 = clp->lc_clientid.lval[0];
2774 new_deleg->ls_stateid.other[1] = delegstateidp->other[1]
2775 = clp->lc_clientid.lval[1];
2776 new_deleg->ls_stateid.other[2] = delegstateidp->other[2]
2777 = nfsrv_nextstateindex(clp);
2778 if (writedeleg && !NFSVNO_EXRDONLY(exp) &&
2779 (nfsrv_writedelegifpos || !readonly)) {
2780 new_deleg->ls_flags = (NFSLCK_DELEGWRITE |
2781 NFSLCK_READACCESS | NFSLCK_WRITEACCESS);
2782 *rflagsp |= NFSV4OPEN_WRITEDELEGATE;
2783 } else {
2784 new_deleg->ls_flags = (NFSLCK_DELEGREAD |
2785 NFSLCK_READACCESS);
2786 *rflagsp |= NFSV4OPEN_READDELEGATE;
2787 }
2788 new_deleg->ls_uid = new_stp->ls_uid;
2789 new_deleg->ls_lfp = lfp;
2790 new_deleg->ls_clp = clp;
2791 new_deleg->ls_filerev = filerev;
2792 new_deleg->ls_compref = nd->nd_compref;
2793 LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file);
2794 LIST_INSERT_HEAD(NFSSTATEHASH(clp,
2795 new_deleg->ls_stateid), new_deleg, ls_hash);
2796 LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list);
2797 new_deleg = NULL;
2798 newnfsstats.srvdelegates++;
2799 nfsrv_openpluslock++;
2800 nfsrv_delegatecnt++;
2801 }
2802 }
2803 } else {
2804 /*
2805 * New owner case. Start the open_owner sequence with a
2806 * Needs confirmation (unless a reclaim) and hang the
2807 * new open off it.
2808 */
2809 new_open->ls_stateid.seqid = 0;
2810 new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0];
2811 new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1];
2812 new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp);
2813 new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) |
2814 NFSLCK_OPEN;
2815 new_open->ls_uid = new_stp->ls_uid;
2816 LIST_INIT(&new_open->ls_open);
2817 new_open->ls_openowner = new_stp;
2818 new_open->ls_lfp = lfp;
2819 new_open->ls_clp = clp;
2820 LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file);
2821 if (new_stp->ls_flags & NFSLCK_RECLAIM) {
2822 new_stp->ls_flags = 0;
2823 } else {
2824 *rflagsp |= NFSV4OPEN_RESULTCONFIRM;
2825 new_stp->ls_flags = NFSLCK_NEEDSCONFIRM;
2826 }
2827 nfsrvd_refcache(new_stp->ls_op);
2828 new_stp->ls_noopens = 0;
2829 LIST_INIT(&new_stp->ls_open);
2830 LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list);
2831 LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list);
2832 LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid),
2833 new_open, ls_hash);
2834 openstp = new_open;
2835 new_open = NULL;
2836 *new_stpp = NULL;
2837 newnfsstats.srvopens++;
2838 nfsrv_openpluslock++;
2839 newnfsstats.srvopenowners++;
2840 nfsrv_openpluslock++;
2841 }
2842 if (!error) {
2843 stateidp->seqid = openstp->ls_stateid.seqid;
2844 stateidp->other[0] = openstp->ls_stateid.other[0];
2845 stateidp->other[1] = openstp->ls_stateid.other[1];
2846 stateidp->other[2] = openstp->ls_stateid.other[2];
2847 }
2848 NFSUNLOCKSTATE();
2849 if (haslock) {
2850 NFSLOCKV4ROOTMUTEX();
2851 nfsv4_unlock(&nfsv4rootfs_lock, 1);
2852 NFSUNLOCKV4ROOTMUTEX();
2853 }
2854 if (new_open)
2855 FREE((caddr_t)new_open, M_NFSDSTATE);
2856 if (new_deleg)
2857 FREE((caddr_t)new_deleg, M_NFSDSTATE);
2858 return (error);
2859}
2860
2861/*
2862 * Open update. Does the confirm, downgrade and close.
2863 */
2864APPLESTATIC int
2865nfsrv_openupdate(vnode_t vp, struct nfsstate *new_stp, nfsquad_t clientid,
2866 nfsv4stateid_t *stateidp, struct nfsrv_descript *nd, NFSPROC_T *p)
2867{
2868 struct nfsstate *stp, *ownerstp;
2869 struct nfsclient *clp;
2870 struct nfslockfile *lfp;
2871 u_int32_t bits;
2872 int error, gotstate = 0, len = 0;
2873 u_char client[NFSV4_OPAQUELIMIT];
2874
2875 /*
2876 * Check for restart conditions (client and server).
2877 */
2878 error = nfsrv_checkrestart(clientid, new_stp->ls_flags,
2879 &new_stp->ls_stateid, 0);
2880 if (error)
2881 return (error);
2882
2883 NFSLOCKSTATE();
2884 /*
2885 * Get the open structure via clientid and stateid.
2886 */
2887 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
2888 (nfsquad_t)((u_quad_t)0), NULL, p);
2889 if (!error)
2890 error = nfsrv_getstate(clp, &new_stp->ls_stateid,
2891 new_stp->ls_flags, &stp);
2892
2893 /*
2894 * Sanity check the open.
2895 */
2896 if (!error && (!(stp->ls_flags & NFSLCK_OPEN) ||
2897 (!(new_stp->ls_flags & NFSLCK_CONFIRM) &&
2898 (stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)) ||
2899 ((new_stp->ls_flags & NFSLCK_CONFIRM) &&
2900 (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)))))
2901 error = NFSERR_BADSTATEID;
2902
2903 if (!error)
2904 error = nfsrv_checkseqid(nd, new_stp->ls_seq,
2905 stp->ls_openowner, new_stp->ls_op);
2906 if (!error && stp->ls_stateid.seqid != new_stp->ls_stateid.seqid &&
2907 !(new_stp->ls_flags & NFSLCK_CONFIRM))
2908 error = NFSERR_OLDSTATEID;
2909 if (!error && vnode_vtype(vp) != VREG) {
2910 if (vnode_vtype(vp) == VDIR)
2911 error = NFSERR_ISDIR;
2912 else
2913 error = NFSERR_INVAL;
2914 }
2915
2916 if (error) {
2917 /*
2918 * If a client tries to confirm an Open with a bad
2919 * seqid# and there are no byte range locks or other Opens
2920 * on the openowner, just throw it away, so the next use of the
2921 * openowner will start a fresh seq#.
2922 */
2923 if (error == NFSERR_BADSEQID &&
2924 (new_stp->ls_flags & NFSLCK_CONFIRM) &&
2925 nfsrv_nootherstate(stp))
2926 nfsrv_freeopenowner(stp->ls_openowner, 0, p);
2927 NFSUNLOCKSTATE();
2928 return (error);
2929 }
2930
2931 /*
2932 * Set the return stateid.
2933 */
2934 stateidp->seqid = stp->ls_stateid.seqid + 1;
2935 stateidp->other[0] = stp->ls_stateid.other[0];
2936 stateidp->other[1] = stp->ls_stateid.other[1];
2937 stateidp->other[2] = stp->ls_stateid.other[2];
2938 /*
2939 * Now, handle the three cases.
2940 */
2941 if (new_stp->ls_flags & NFSLCK_CONFIRM) {
2942 /*
2943 * If the open doesn't need confirmation, it seems to me that
2944 * there is a client error, but I'll just log it and keep going?
2945 */
2946 if (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM))
2947 printf("Nfsv4d: stray open confirm\n");
2948 stp->ls_openowner->ls_flags = 0;
2949 stp->ls_stateid.seqid++;
2950 if (!(clp->lc_flags & LCL_STAMPEDSTABLE)) {
2951 clp->lc_flags |= LCL_STAMPEDSTABLE;
2952 len = clp->lc_idlen;
2953 NFSBCOPY(clp->lc_id, client, len);
2954 gotstate = 1;
2955 }
2956 NFSUNLOCKSTATE();
2957 } else if (new_stp->ls_flags & NFSLCK_CLOSE) {
2958 ownerstp = stp->ls_openowner;
2959 lfp = stp->ls_lfp;
2960 if (nfsrv_dolocallocks != 0 && !LIST_EMPTY(&stp->ls_open)) {
2961 /* Get the lf lock */
2962 nfsrv_locklf(lfp);
2963 NFSUNLOCKSTATE();
2964 if (nfsrv_freeopen(stp, vp, 1, p) == 0) {
2965 NFSLOCKSTATE();
2966 nfsrv_unlocklf(lfp);
2967 NFSUNLOCKSTATE();
2968 }
2969 } else {
2970 (void) nfsrv_freeopen(stp, NULL, 0, p);
2971 NFSUNLOCKSTATE();
2972 }
2973 } else {
2974 /*
2975 * Update the share bits, making sure that the new set are a
2976 * subset of the old ones.
2977 */
2978 bits = (new_stp->ls_flags & NFSLCK_SHAREBITS);
2979 if (~(stp->ls_flags) & bits) {
2980 NFSUNLOCKSTATE();
2981 return (NFSERR_INVAL);
2982 }
2983 stp->ls_flags = (bits | NFSLCK_OPEN);
2984 stp->ls_stateid.seqid++;
2985 NFSUNLOCKSTATE();
2986 }
2987
2988 /*
2989 * If the client just confirmed its first open, write a timestamp
2990 * to the stable storage file.
2991 */
2992 if (gotstate != 0) {
2993 nfsrv_writestable(client, len, NFSNST_NEWSTATE, p);
2994 nfsrv_backupstable();
2995 }
2996 return (error);
2997}
2998
2999/*
3000 * Delegation update. Does the purge and return.
3001 */
3002APPLESTATIC int
3003nfsrv_delegupdate(nfsquad_t clientid, nfsv4stateid_t *stateidp,
3004 vnode_t vp, int op, struct ucred *cred, NFSPROC_T *p)
3005{
3006 struct nfsstate *stp;
3007 struct nfsclient *clp;
3008 int error;
3009 fhandle_t fh;
3010
3011 /*
3012 * Do a sanity check against the file handle for DelegReturn.
3013 */
3014 if (vp) {
3015 error = nfsvno_getfh(vp, &fh, p);
3016 if (error)
3017 return (error);
3018 }
3019 /*
3020 * Check for restart conditions (client and server).
3021 */
3022 if (op == NFSV4OP_DELEGRETURN)
3023 error = nfsrv_checkrestart(clientid, NFSLCK_DELEGRETURN,
3024 stateidp, 0);
3025 else
3026 error = nfsrv_checkrestart(clientid, NFSLCK_DELEGPURGE,
3027 stateidp, 0);
3028
3029 NFSLOCKSTATE();
3030 /*
3031 * Get the open structure via clientid and stateid.
3032 */
3033 if (!error)
3034 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
3035 (nfsquad_t)((u_quad_t)0), NULL, p);
3036 if (error) {
3037 if (error == NFSERR_CBPATHDOWN)
3038 error = 0;
3039 if (error == NFSERR_STALECLIENTID && op == NFSV4OP_DELEGRETURN)
3040 error = NFSERR_STALESTATEID;
3041 }
3042 if (!error && op == NFSV4OP_DELEGRETURN) {
3043 error = nfsrv_getstate(clp, stateidp, NFSLCK_DELEGRETURN, &stp);
3044 if (!error && stp->ls_stateid.seqid != stateidp->seqid)
3045 error = NFSERR_OLDSTATEID;
3046 }
3047 /*
3048 * NFSERR_EXPIRED means that the state has gone away,
3049 * so Delegations have been purged. Just return ok.
3050 */
3051 if (error == NFSERR_EXPIRED && op == NFSV4OP_DELEGPURGE) {
3052 NFSUNLOCKSTATE();
3053 return (0);
3054 }
3055 if (error) {
3056 NFSUNLOCKSTATE();
3057 return (error);
3058 }
3059
3060 if (op == NFSV4OP_DELEGRETURN) {
3061 if (NFSBCMP((caddr_t)&fh, (caddr_t)&stp->ls_lfp->lf_fh,
3062 sizeof (fhandle_t))) {
3063 NFSUNLOCKSTATE();
3064 return (NFSERR_BADSTATEID);
3065 }
3066 nfsrv_freedeleg(stp);
3067 } else {
3068 nfsrv_freedeleglist(&clp->lc_olddeleg);
3069 }
3070 NFSUNLOCKSTATE();
3071 return (0);
3072}
3073
3074/*
3075 * Release lock owner.
3076 */
3077APPLESTATIC int
3078nfsrv_releaselckown(struct nfsstate *new_stp, nfsquad_t clientid,
3079 NFSPROC_T *p)
3080{
3081 struct nfsstate *stp, *nstp, *openstp, *ownstp;
3082 struct nfsclient *clp;
3083 int error;
3084
3085 /*
3086 * Check for restart conditions (client and server).
3087 */
3088 error = nfsrv_checkrestart(clientid, new_stp->ls_flags,
3089 &new_stp->ls_stateid, 0);
3090 if (error)
3091 return (error);
3092
3093 NFSLOCKSTATE();
3094 /*
3095 * Get the lock owner by name.
3096 */
3097 error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp,
3098 (nfsquad_t)((u_quad_t)0), NULL, p);
3099 if (error) {
3100 NFSUNLOCKSTATE();
3101 return (error);
3102 }
3103 LIST_FOREACH(ownstp, &clp->lc_open, ls_list) {
3104 LIST_FOREACH(openstp, &ownstp->ls_open, ls_list) {
3105 stp = LIST_FIRST(&openstp->ls_open);
3106 while (stp != LIST_END(&openstp->ls_open)) {
3107 nstp = LIST_NEXT(stp, ls_list);
3108 /*
3109 * If the owner matches, check for locks and
3110 * then free or return an error.
3111 */
3112 if (stp->ls_ownerlen == new_stp->ls_ownerlen &&
3113 !NFSBCMP(stp->ls_owner, new_stp->ls_owner,
3114 stp->ls_ownerlen)){
3115 if (LIST_EMPTY(&stp->ls_lock)) {
3116 nfsrv_freelockowner(stp, NULL, 0, p);
3117 } else {
3118 NFSUNLOCKSTATE();
3119 return (NFSERR_LOCKSHELD);
3120 }
3121 }
3122 stp = nstp;
3123 }
3124 }
3125 }
3126 NFSUNLOCKSTATE();
3127 return (0);
3128}
3129
3130/*
3131 * Get the file handle for a lock structure.
3132 */
3133static int
3134nfsrv_getlockfh(vnode_t vp, u_short flags,
3135 struct nfslockfile **new_lfpp, fhandle_t *nfhp, NFSPROC_T *p)
3136{
3137 fhandle_t *fhp = NULL;
3138 struct nfslockfile *new_lfp;
3139 int error;
3140
3141 /*
3142 * For lock, use the new nfslock structure, otherwise just
3143 * a fhandle_t on the stack.
3144 */
3145 if (flags & NFSLCK_OPEN) {
3146 new_lfp = *new_lfpp;
3147 fhp = &new_lfp->lf_fh;
3148 } else if (nfhp) {
3149 fhp = nfhp;
3150 } else {
3151 panic("nfsrv_getlockfh");
3152 }
3153 error = nfsvno_getfh(vp, fhp, p);
3154 return (error);
3155}
3156
3157/*
3158 * Get an nfs lock structure. Allocate one, as required, and return a
3159 * pointer to it.
3160 * Returns an NFSERR_xxx upon failure or -1 to indicate no current lock.
3161 */
3162static int
3163nfsrv_getlockfile(u_short flags, struct nfslockfile **new_lfpp,
3164 struct nfslockfile **lfpp, fhandle_t *nfhp, int lockit)
3165{
3166 struct nfslockfile *lfp;
3167 fhandle_t *fhp = NULL, *tfhp;
3168 struct nfslockhashhead *hp;
3169 struct nfslockfile *new_lfp = NULL;
3170
3171 /*
3172 * For lock, use the new nfslock structure, otherwise just
3173 * a fhandle_t on the stack.
3174 */
3175 if (flags & NFSLCK_OPEN) {
3176 new_lfp = *new_lfpp;
3177 fhp = &new_lfp->lf_fh;
3178 } else if (nfhp) {
3179 fhp = nfhp;
3180 } else {
3181 panic("nfsrv_getlockfile");
3182 }
3183
3184 hp = NFSLOCKHASH(fhp);
3185 LIST_FOREACH(lfp, hp, lf_hash) {
3186 tfhp = &lfp->lf_fh;
3187 if (NFSVNO_CMPFH(fhp, tfhp)) {
3188 if (lockit)
3189 nfsrv_locklf(lfp);
3190 *lfpp = lfp;
3191 return (0);
3192 }
3193 }
3194 if (!(flags & NFSLCK_OPEN))
3195 return (-1);
3196
3197 /*
3198 * No match, so chain the new one into the list.
3199 */
3200 LIST_INIT(&new_lfp->lf_open);
3201 LIST_INIT(&new_lfp->lf_lock);
3202 LIST_INIT(&new_lfp->lf_deleg);
3203 LIST_INIT(&new_lfp->lf_locallock);
3204 LIST_INIT(&new_lfp->lf_rollback);
3205 new_lfp->lf_locallock_lck.nfslock_usecnt = 0;
3206 new_lfp->lf_locallock_lck.nfslock_lock = 0;
3207 new_lfp->lf_usecount = 0;
3208 LIST_INSERT_HEAD(hp, new_lfp, lf_hash);
3209 *lfpp = new_lfp;
3210 *new_lfpp = NULL;
3211 return (0);
3212}
3213
3214/*
3215 * This function adds a nfslock lock structure to the list for the associated
3216 * nfsstate and nfslockfile structures. It will be inserted after the
3217 * entry pointed at by insert_lop.
3218 */
3219static void
3220nfsrv_insertlock(struct nfslock *new_lop, struct nfslock *insert_lop,
3221 struct nfsstate *stp, struct nfslockfile *lfp)
3222{
3223 struct nfslock *lop, *nlop;
3224
3225 new_lop->lo_stp = stp;
3226 new_lop->lo_lfp = lfp;
3227
3228 if (stp != NULL) {
3229 /* Insert in increasing lo_first order */
3230 lop = LIST_FIRST(&lfp->lf_lock);
3231 if (lop == LIST_END(&lfp->lf_lock) ||
3232 new_lop->lo_first <= lop->lo_first) {
3233 LIST_INSERT_HEAD(&lfp->lf_lock, new_lop, lo_lckfile);
3234 } else {
3235 nlop = LIST_NEXT(lop, lo_lckfile);
3236 while (nlop != LIST_END(&lfp->lf_lock) &&
3237 nlop->lo_first < new_lop->lo_first) {
3238 lop = nlop;
3239 nlop = LIST_NEXT(lop, lo_lckfile);
3240 }
3241 LIST_INSERT_AFTER(lop, new_lop, lo_lckfile);
3242 }
3243 } else {
3244 new_lop->lo_lckfile.le_prev = NULL; /* list not used */
3245 }
3246
3247 /*
3248 * Insert after insert_lop, which is overloaded as stp or lfp for
3249 * an empty list.
3250 */
3251 if (stp == NULL && (struct nfslockfile *)insert_lop == lfp)
3252 LIST_INSERT_HEAD(&lfp->lf_locallock, new_lop, lo_lckowner);
3253 else if ((struct nfsstate *)insert_lop == stp)
3254 LIST_INSERT_HEAD(&stp->ls_lock, new_lop, lo_lckowner);
3255 else
3256 LIST_INSERT_AFTER(insert_lop, new_lop, lo_lckowner);
3257 if (stp != NULL) {
3258 newnfsstats.srvlocks++;
3259 nfsrv_openpluslock++;
3260 }
3261}
3262
3263/*
3264 * This function updates the locking for a lock owner and given file. It
3265 * maintains a list of lock ranges ordered on increasing file offset that
3266 * are NFSLCK_READ or NFSLCK_WRITE and non-overlapping (aka POSIX style).
3267 * It always adds new_lop to the list and sometimes uses the one pointed
3268 * at by other_lopp.
3269 */
3270static void
3271nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp,
3272 struct nfslock **other_lopp, struct nfslockfile *lfp)
3273{
3274 struct nfslock *new_lop = *new_lopp;
3275 struct nfslock *lop, *tlop, *ilop;
3276 struct nfslock *other_lop = *other_lopp;
3277 int unlock = 0, myfile = 0;
3278 u_int64_t tmp;
3279
3280 /*
3281 * Work down the list until the lock is merged.
3282 */
3283 if (new_lop->lo_flags & NFSLCK_UNLOCK)
3284 unlock = 1;
3285 if (stp != NULL) {
3286 ilop = (struct nfslock *)stp;
3287 lop = LIST_FIRST(&stp->ls_lock);
3288 } else {
3289 ilop = (struct nfslock *)lfp;
3290 lop = LIST_FIRST(&lfp->lf_locallock);
3291 }
3292 while (lop != NULL) {
3293 /*
3294 * Only check locks for this file that aren't before the start of
3295 * new lock's range.
3296 */
3297 if (lop->lo_lfp == lfp) {
3298 myfile = 1;
3299 if (lop->lo_end >= new_lop->lo_first) {
3300 if (new_lop->lo_end < lop->lo_first) {
3301 /*
3302 * If the new lock ends before the start of the
3303 * current lock's range, no merge, just insert
3304 * the new lock.
3305 */
3306 break;
3307 }
3308 if (new_lop->lo_flags == lop->lo_flags ||
3309 (new_lop->lo_first <= lop->lo_first &&
3310 new_lop->lo_end >= lop->lo_end)) {
3311 /*
3312 * This lock can be absorbed by the new lock/unlock.
3313 * This happens when it covers the entire range
3314 * of the old lock or is contiguous
3315 * with the old lock and is of the same type or an
3316 * unlock.
3317 */
3318 if (lop->lo_first < new_lop->lo_first)
3319 new_lop->lo_first = lop->lo_first;
3320 if (lop->lo_end > new_lop->lo_end)
3321 new_lop->lo_end = lop->lo_end;
3322 tlop = lop;
3323 lop = LIST_NEXT(lop, lo_lckowner);
3324 nfsrv_freenfslock(tlop);
3325 continue;
3326 }
3327
3328 /*
3329 * All these cases are for contiguous locks that are not the
3330 * same type, so they can't be merged.
3331 */
3332 if (new_lop->lo_first <= lop->lo_first) {
3333 /*
3334 * This case is where the new lock overlaps with the
3335 * first part of the old lock. Move the start of the
3336 * old lock to just past the end of the new lock. The
3337 * new lock will be inserted in front of the old, since
3338 * ilop hasn't been updated. (We are done now.)
3339 */
3340 lop->lo_first = new_lop->lo_end;
3341 break;
3342 }
3343 if (new_lop->lo_end >= lop->lo_end) {
3344 /*
3345 * This case is where the new lock overlaps with the
3346 * end of the old lock's range. Move the old lock's
3347 * end to just before the new lock's first and insert
3348 * the new lock after the old lock.
3349 * Might not be done yet, since the new lock could
3350 * overlap further locks with higher ranges.
3351 */
3352 lop->lo_end = new_lop->lo_first;
3353 ilop = lop;
3354 lop = LIST_NEXT(lop, lo_lckowner);
3355 continue;
3356 }
3357 /*
3358 * The final case is where the new lock's range is in the
3359 * middle of the current lock's and splits the current lock
3360 * up. Use *other_lopp to handle the second part of the
3361 * split old lock range. (We are done now.)
3362 * For unlock, we use new_lop as other_lop and tmp, since
3363 * other_lop and new_lop are the same for this case.
3364 * We noted the unlock case above, so we don't need
3365 * new_lop->lo_flags any longer.
3366 */
3367 tmp = new_lop->lo_first;
3368 if (other_lop == NULL) {
3369 if (!unlock)
3370 panic("nfsd srv update unlock");
3371 other_lop = new_lop;
3372 *new_lopp = NULL;
3373 }
3374 other_lop->lo_first = new_lop->lo_end;
3375 other_lop->lo_end = lop->lo_end;
3376 other_lop->lo_flags = lop->lo_flags;
3377 other_lop->lo_stp = stp;
3378 other_lop->lo_lfp = lfp;
3379 lop->lo_end = tmp;
3380 nfsrv_insertlock(other_lop, lop, stp, lfp);
3381 *other_lopp = NULL;
3382 ilop = lop;
3383 break;
3384 }
3385 }
3386 ilop = lop;
3387 lop = LIST_NEXT(lop, lo_lckowner);
3388 if (myfile && (lop == NULL || lop->lo_lfp != lfp))
3389 break;
3390 }
3391
3392 /*
3393 * Insert the new lock in the list at the appropriate place.
3394 */
3395 if (!unlock) {
3396 nfsrv_insertlock(new_lop, ilop, stp, lfp);
3397 *new_lopp = NULL;
3398 }
3399}
3400
3401/*
3402 * This function handles sequencing of locks, etc.
3403 * It returns an error that indicates what the caller should do.
3404 */
3405static int
3406nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid,
3407 struct nfsstate *stp, struct nfsrvcache *op)
3408{
3409
3410 if (op != nd->nd_rp)
3411 panic("nfsrvstate checkseqid");
3412 if (!(op->rc_flag & RC_INPROG))
3413 panic("nfsrvstate not inprog");
3414 if (stp->ls_op && stp->ls_op->rc_refcnt <= 0) {
3415 printf("refcnt=%d\n", stp->ls_op->rc_refcnt);
3416 panic("nfsrvstate op refcnt");
3417 }
3418 if ((stp->ls_seq + 1) == seqid) {
3419 if (stp->ls_op)
3420 nfsrvd_derefcache(stp->ls_op);
3421 stp->ls_op = op;
3422 nfsrvd_refcache(op);
3423 stp->ls_seq = seqid;
3424 return (0);
3425 } else if (stp->ls_seq == seqid && stp->ls_op &&
3426 op->rc_xid == stp->ls_op->rc_xid &&
3427 op->rc_refcnt == 0 &&
3428 op->rc_reqlen == stp->ls_op->rc_reqlen &&
3429 op->rc_cksum == stp->ls_op->rc_cksum) {
3430 if (stp->ls_op->rc_flag & RC_INPROG)
3431 return (NFSERR_DONTREPLY);
3432 nd->nd_rp = stp->ls_op;
3433 nd->nd_rp->rc_flag |= RC_INPROG;
3434 nfsrvd_delcache(op);
3435 return (NFSERR_REPLYFROMCACHE);
3436 }
3437 return (NFSERR_BADSEQID);
3438}
3439
3440/*
3441 * Get the client ip address for callbacks. If the strings can't be parsed,
3442 * just set lc_program to 0 to indicate no callbacks are possible.
3443 * (For cases where the address can't be parsed or is 0.0.0.0.0.0, set
3444 * the address to the client's transport address. This won't be used
3445 * for callbacks, but can be printed out by newnfsstats for info.)
3446 * Return error if the xdr can't be parsed, 0 otherwise.
3447 */
3448APPLESTATIC int
3449nfsrv_getclientipaddr(struct nfsrv_descript *nd, struct nfsclient *clp)
3450{
3451 u_int32_t *tl;
3452 u_char *cp, *cp2;
3453 int i, j;
3454 struct sockaddr_in *rad, *sad;
3455 u_char protocol[5], addr[24];
3456 int error = 0, cantparse = 0;
3457 union {
3458 u_long ival;
3459 u_char cval[4];
3460 } ip;
3461 union {
3462 u_short sval;
3463 u_char cval[2];
3464 } port;
3465
3466 rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *);
3467 rad->sin_family = AF_INET;
3468 rad->sin_len = sizeof (struct sockaddr_in);
3469 rad->sin_addr.s_addr = 0;
3470 rad->sin_port = 0;
3471 clp->lc_req.nr_client = NULL;
3472 clp->lc_req.nr_lock = 0;
3473 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3474 i = fxdr_unsigned(int, *tl);
3475 if (i >= 3 && i <= 4) {
3476 error = nfsrv_mtostr(nd, protocol, i);
3477 if (error)
3478 goto nfsmout;
3479 if (!strcmp(protocol, "tcp")) {
3480 clp->lc_flags |= LCL_TCPCALLBACK;
3481 clp->lc_req.nr_sotype = SOCK_STREAM;
3482 clp->lc_req.nr_soproto = IPPROTO_TCP;
3483 } else if (!strcmp(protocol, "udp")) {
3484 clp->lc_req.nr_sotype = SOCK_DGRAM;
3485 clp->lc_req.nr_soproto = IPPROTO_UDP;
3486 } else {
3487 cantparse = 1;
3488 }
3489 } else {
3490 cantparse = 1;
3491 if (i > 0) {
3492 error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
3493 if (error)
3494 goto nfsmout;
3495 }
3496 }
3497 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3498 i = fxdr_unsigned(int, *tl);
3499 if (i < 0) {
3500 error = NFSERR_BADXDR;
3501 goto nfsmout;
3502 } else if (i == 0) {
3503 cantparse = 1;
3504 } else if (!cantparse && i <= 23 && i >= 11) {
3505 error = nfsrv_mtostr(nd, addr, i);
3506 if (error)
3507 goto nfsmout;
3508
3509 /*
3510 * Parse out the address fields. We expect 6 decimal numbers
3511 * separated by '.'s.
3512 */
3513 cp = addr;
3514 i = 0;
3515 while (*cp && i < 6) {
3516 cp2 = cp;
3517 while (*cp2 && *cp2 != '.')
3518 cp2++;
3519 if (*cp2)
3520 *cp2++ = '\0';
3521 else if (i != 5) {
3522 cantparse = 1;
3523 break;
3524 }
3525 j = nfsrv_getipnumber(cp);
3526 if (j >= 0) {
3527 if (i < 4)
3528 ip.cval[3 - i] = j;
3529 else
3530 port.cval[5 - i] = j;
3531 } else {
3532 cantparse = 1;
3533 break;
3534 }
3535 cp = cp2;
3536 i++;
3537 }
3538 if (!cantparse) {
3539 if (ip.ival != 0x0) {
3540 rad->sin_addr.s_addr = htonl(ip.ival);
3541 rad->sin_port = htons(port.sval);
3542 } else {
3543 cantparse = 1;
3544 }
3545 }
3546 } else {
3547 cantparse = 1;
3548 if (i > 0) {
3549 error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
3550 if (error)
3551 goto nfsmout;
3552 }
3553 }
3554 if (cantparse) {
3555 sad = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in *);
3556 rad->sin_addr.s_addr = sad->sin_addr.s_addr;
3557 rad->sin_port = 0x0;
3558 clp->lc_program = 0;
3559 }
3560nfsmout:
3561 return (error);
3562}
3563
3564/*
3565 * Turn a string of up to three decimal digits into a number. Return -1 upon
3566 * error.
3567 */
3568static int
3569nfsrv_getipnumber(u_char *cp)
3570{
3571 int i = 0, j = 0;
3572
3573 while (*cp) {
3574 if (j > 2 || *cp < '0' || *cp > '9')
3575 return (-1);
3576 i *= 10;
3577 i += (*cp - '0');
3578 cp++;
3579 j++;
3580 }
3581 if (i < 256)
3582 return (i);
3583 return (-1);
3584}
3585
3586/*
3587 * This function checks for restart conditions.
3588 */
3589static int
3590nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags,
3591 nfsv4stateid_t *stateidp, int specialid)
3592{
3593 int ret;
3594
3595 /*
3596 * First check for a server restart. Open, LockT, ReleaseLockOwner
3597 * and DelegPurge have a clientid, the rest a stateid.
3598 */
3599 if (flags &
3600 (NFSLCK_OPEN | NFSLCK_TEST | NFSLCK_RELEASE | NFSLCK_DELEGPURGE)) {
3601 if (clientid.lval[0] != nfsrvboottime)
3602 return (NFSERR_STALECLIENTID);
3603 } else if (stateidp->other[0] != nfsrvboottime &&
3604 specialid == 0)
3605 return (NFSERR_STALESTATEID);
3606
3607 /*
3608 * Read, Write, Setattr and LockT can return NFSERR_GRACE and do
3609 * not use a lock/open owner seqid#, so the check can be done now.
3610 * (The others will be checked, as required, later.)
3611 */
3612 if (!(flags & (NFSLCK_CHECK | NFSLCK_TEST)))
3613 return (0);
3614
3615 NFSLOCKSTATE();
3616 ret = nfsrv_checkgrace(flags);
3617 NFSUNLOCKSTATE();
3618 return (ret);
3619}
3620
3621/*
3622 * Check for grace.
3623 */
3624static int
3625nfsrv_checkgrace(u_int32_t flags)
3626{
3627
3628 if (nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) {
3629 if (flags & NFSLCK_RECLAIM)
3630 return (NFSERR_NOGRACE);
3631 } else {
3632 if (!(flags & NFSLCK_RECLAIM))
3633 return (NFSERR_GRACE);
3634
3635 /*
3636 * If grace is almost over and we are still getting Reclaims,
3637 * extend grace a bit.
3638 */
3639 if ((NFSD_MONOSEC + NFSRV_LEASEDELTA) >
3640 nfsrv_stablefirst.nsf_eograce)
3641 nfsrv_stablefirst.nsf_eograce = NFSD_MONOSEC +
3642 NFSRV_LEASEDELTA;
3643 }
3644 return (0);
3645}
3646
3647/*
3648 * Do a server callback.
3649 */
3650static int
3651nfsrv_docallback(struct nfsclient *clp, int procnum,
3652 nfsv4stateid_t *stateidp, int trunc, fhandle_t *fhp,
3653 struct nfsvattr *nap, nfsattrbit_t *attrbitp, NFSPROC_T *p)
3654{
3655 mbuf_t m;
3656 u_int32_t *tl;
3657 struct nfsrv_descript nfsd, *nd = &nfsd;
3658 struct ucred *cred;
3659 int error = 0;
3660 u_int32_t callback;
3661
3662 cred = newnfs_getcred();
3663 NFSLOCKSTATE(); /* mostly for lc_cbref++ */
3664 if (clp->lc_flags & LCL_NEEDSCONFIRM) {
3665 NFSUNLOCKSTATE();
3666 panic("docallb");
3667 }
3668 clp->lc_cbref++;
3669
3670 /*
3671 * Fill the callback program# and version into the request
3672 * structure for newnfs_connect() to use.
3673 */
3674 clp->lc_req.nr_prog = clp->lc_program;
3675 clp->lc_req.nr_vers = NFSV4_CBVERS;
3676
3677 /*
3678 * First, fill in some of the fields of nd and cr.
3679 */
3680 nd->nd_flag = ND_NFSV4;
3681 if (clp->lc_flags & LCL_GSS)
3682 nd->nd_flag |= ND_KERBV;
3683 nd->nd_repstat = 0;
3684 cred->cr_uid = clp->lc_uid;
3685 cred->cr_gid = clp->lc_gid;
3686 callback = clp->lc_callback;
3687 NFSUNLOCKSTATE();
3688 cred->cr_ngroups = 1;
3689
3690 /*
3691 * Get the first mbuf for the request.
3692 */
3693 MGET(m, M_WAIT, MT_DATA);
3694 mbuf_setlen(m, 0);
3695 nd->nd_mreq = nd->nd_mb = m;
3696 nd->nd_bpos = NFSMTOD(m, caddr_t);
3697
3698 /*
3699 * and build the callback request.
3700 */
3701 if (procnum == NFSV4OP_CBGETATTR) {
3702 nd->nd_procnum = NFSV4PROC_CBCOMPOUND;
3703 (void) nfsm_strtom(nd, "CB Getattr", 10);
3704 NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
3705 *tl++ = txdr_unsigned(NFSV4_MINORVERSION);
3706 *tl++ = txdr_unsigned(callback);
3707 *tl++ = txdr_unsigned(1);
3708 *tl = txdr_unsigned(NFSV4OP_CBGETATTR);
3709 (void) nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0);
3710 (void) nfsrv_putattrbit(nd, attrbitp);
3711 } else if (procnum == NFSV4OP_CBRECALL) {
3712 nd->nd_procnum = NFSV4PROC_CBCOMPOUND;
3713 (void) nfsm_strtom(nd, "CB Recall", 9);
3714 NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED + NFSX_STATEID);
3715 *tl++ = txdr_unsigned(NFSV4_MINORVERSION);
3716 *tl++ = txdr_unsigned(callback);
3717 *tl++ = txdr_unsigned(1);
3718 *tl++ = txdr_unsigned(NFSV4OP_CBRECALL);
3719 *tl++ = txdr_unsigned(stateidp->seqid);
3720 NFSBCOPY((caddr_t)stateidp->other, (caddr_t)tl,
3721 NFSX_STATEIDOTHER);
3722 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3723 if (trunc)
3724 *tl = newnfs_true;
3725 else
3726 *tl = newnfs_false;
3727 (void) nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0);
3728 } else {
3729 nd->nd_procnum = NFSV4PROC_CBNULL;
3730 }
3731
3732 /*
3733 * Call newnfs_connect(), as required, and then newnfs_request().
3734 */
3735 (void) newnfs_sndlock(&clp->lc_req.nr_lock);
3736 if (clp->lc_req.nr_client == NULL) {
3737 if (nd->nd_procnum == NFSV4PROC_CBNULL)
3738 error = newnfs_connect(NULL, &clp->lc_req, cred,
3739 NULL, 1);
3740 else
3741 error = newnfs_connect(NULL, &clp->lc_req, cred,
3742 NULL, 3);
3743 }
3744 newnfs_sndunlock(&clp->lc_req.nr_lock);
3745 if (!error) {
3746 error = newnfs_request(nd, NULL, clp, &clp->lc_req, NULL,
3747 NULL, cred, clp->lc_program, NFSV4_CBVERS, NULL, 1, NULL);
3748 }
3749 NFSFREECRED(cred);
3750
3751 /*
3752 * If error is set here, the Callback path isn't working
3753 * properly, so twiddle the appropriate LCL_ flags.
3754 * (nd_repstat != 0 indicates the Callback path is working,
3755 * but the callback failed on the client.)
3756 */
3757 if (error) {
3758 /*
3759 * Mark the callback pathway down, which disabled issuing
3760 * of delegations and gets Renew to return NFSERR_CBPATHDOWN.
3761 */
3762 NFSLOCKSTATE();
3763 clp->lc_flags |= LCL_CBDOWN;
3764 NFSUNLOCKSTATE();
3765 } else {
3766 /*
3767 * Callback worked. If the callback path was down, disable
3768 * callbacks, so no more delegations will be issued. (This
3769 * is done on the assumption that the callback pathway is
3770 * flakey.)
3771 */
3772 NFSLOCKSTATE();
3773 if (clp->lc_flags & LCL_CBDOWN)
3774 clp->lc_flags &= ~(LCL_CBDOWN | LCL_CALLBACKSON);
3775 NFSUNLOCKSTATE();
3776 if (nd->nd_repstat)
3777 error = nd->nd_repstat;
3778 else if (procnum == NFSV4OP_CBGETATTR)
3779 error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
3780 NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL,
3781 p, NULL);
3782 mbuf_freem(nd->nd_mrep);
3783 }
3784 NFSLOCKSTATE();
3785 clp->lc_cbref--;
3786 if ((clp->lc_flags & LCL_WAKEUPWANTED) && clp->lc_cbref == 0) {
3787 clp->lc_flags &= ~LCL_WAKEUPWANTED;
3788 NFSUNLOCKSTATE();
3789 wakeup((caddr_t)clp);
3790 } else {
3791 NFSUNLOCKSTATE();
3792 }
3793 return (error);
3794}
3795
3796/*
3797 * Return the next index# for a clientid. Mostly just increment and return
3798 * the next one, but... if the 32bit unsigned does actually wrap around,
3799 * it should be rebooted.
3800 * At an average rate of one new client per second, it will wrap around in
3801 * approximately 136 years. (I think the server will have been shut
3802 * down or rebooted before then.)
3803 */
3804static u_int32_t
3805nfsrv_nextclientindex(void)
3806{
3807 static u_int32_t client_index = 0;
3808
3809 client_index++;
3810 if (client_index != 0)
3811 return (client_index);
3812
3813 printf("%s: out of clientids\n", __func__);
3814 return (client_index);
3815}
3816
3817/*
3818 * Return the next index# for a stateid. Mostly just increment and return
3819 * the next one, but... if the 32bit unsigned does actually wrap around
3820 * (will a BSD server stay up that long?), find
3821 * new start and end values.
3822 */
3823static u_int32_t
3824nfsrv_nextstateindex(struct nfsclient *clp)
3825{
3826 struct nfsstate *stp;
3827 int i;
3828 u_int32_t canuse, min_index, max_index;
3829
3830 if (!(clp->lc_flags & LCL_INDEXNOTOK)) {
3831 clp->lc_stateindex++;
3832 if (clp->lc_stateindex != clp->lc_statemaxindex)
3833 return (clp->lc_stateindex);
3834 }
3835
3836 /*
3837 * Yuck, we've hit the end.
3838 * Look for a new min and max.
3839 */
3840 min_index = 0;
3841 max_index = 0xffffffff;
3842 for (i = 0; i < NFSSTATEHASHSIZE; i++) {
3843 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) {
3844 if (stp->ls_stateid.other[2] > 0x80000000) {
3845 if (stp->ls_stateid.other[2] < max_index)
3846 max_index = stp->ls_stateid.other[2];
3847 } else {
3848 if (stp->ls_stateid.other[2] > min_index)
3849 min_index = stp->ls_stateid.other[2];
3850 }
3851 }
3852 }
3853
3854 /*
3855 * Yikes, highly unlikely, but I'll handle it anyhow.
3856 */
3857 if (min_index == 0x80000000 && max_index == 0x80000001) {
3858 canuse = 0;
3859 /*
3860 * Loop around until we find an unused entry. Return that
3861 * and set LCL_INDEXNOTOK, so the search will continue next time.
3862 * (This is one of those rare cases where a goto is the
3863 * cleanest way to code the loop.)
3864 */
3865tryagain:
3866 for (i = 0; i < NFSSTATEHASHSIZE; i++) {
3867 LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) {
3868 if (stp->ls_stateid.other[2] == canuse) {
3869 canuse++;
3870 goto tryagain;
3871 }
3872 }
3873 }
3874 clp->lc_flags |= LCL_INDEXNOTOK;
3875 return (canuse);
3876 }
3877
3878 /*
3879 * Ok to start again from min + 1.
3880 */
3881 clp->lc_stateindex = min_index + 1;
3882 clp->lc_statemaxindex = max_index;
3883 clp->lc_flags &= ~LCL_INDEXNOTOK;
3884 return (clp->lc_stateindex);
3885}
3886
3887/*
3888 * The following functions handle the stable storage file that deals with
3889 * the edge conditions described in RFC3530 Sec. 8.6.3.
3890 * The file is as follows:
3891 * - a single record at the beginning that has the lease time of the
3892 * previous server instance (before the last reboot) and the nfsrvboottime
3893 * values for the previous server boots.
3894 * These previous boot times are used to ensure that the current
3895 * nfsrvboottime does not, somehow, get set to a previous one.
3896 * (This is important so that Stale ClientIDs and StateIDs can
3897 * be recognized.)
3898 * The number of previous nfsvrboottime values preceeds the list.
3899 * - followed by some number of appended records with:
3900 * - client id string
3901 * - flag that indicates it is a record revoking state via lease
3902 * expiration or similar
3903 * OR has successfully acquired state.
3904 * These structures vary in length, with the client string at the end, up
3905 * to NFSV4_OPAQUELIMIT in size.
3906 *
3907 * At the end of the grace period, the file is truncated, the first
3908 * record is rewritten with updated information and any acquired state
3909 * records for successful reclaims of state are written.
3910 *
3911 * Subsequent records are appended when the first state is issued to
3912 * a client and when state is revoked for a client.
3913 *
3914 * When reading the file in, state issued records that come later in
3915 * the file override older ones, since the append log is in cronological order.
3916 * If, for some reason, the file can't be read, the grace period is
3917 * immediately terminated and all reclaims get NFSERR_NOGRACE.
3918 */
3919
3920/*
3921 * Read in the stable storage file. Called by nfssvc() before the nfsd
3922 * processes start servicing requests.
3923 */
3924APPLESTATIC void
3925nfsrv_setupstable(NFSPROC_T *p)
3926{
3927 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst;
3928 struct nfsrv_stable *sp, *nsp;
3929 struct nfst_rec *tsp;
3930 int error, i, tryagain;
3931 off_t off = 0;
3932 int aresid, len;
3933 struct timeval curtime;
3934
3935 /*
3936 * If NFSNSF_UPDATEDONE is set, this is a restart of the nfsds without
3937 * a reboot, so state has not been lost.
3938 */
3939 if (sf->nsf_flags & NFSNSF_UPDATEDONE)
3940 return;
3941 /*
3942 * Set Grace over just until the file reads successfully.
3943 */
3944 NFSGETTIME(&curtime);
3945 nfsrvboottime = curtime.tv_sec;
3946 LIST_INIT(&sf->nsf_head);
3947 sf->nsf_flags = (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK);
3948 sf->nsf_eograce = NFSD_MONOSEC + NFSRV_LEASEDELTA;
3949 if (sf->nsf_fp == NULL)
3950 return;
3951 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp),
3952 (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), off, UIO_SYSSPACE,
3953 0, NFSFPCRED(sf->nsf_fp), &aresid, p);
3954 if (error || aresid || sf->nsf_numboots == 0 ||
3955 sf->nsf_numboots > NFSNSF_MAXNUMBOOTS)
3956 return;
3957
3958 /*
3959 * Now, read in the boottimes.
3960 */
3961 sf->nsf_bootvals = (time_t *)malloc((sf->nsf_numboots + 1) *
3962 sizeof (time_t), M_TEMP, M_WAITOK);
3963 off = sizeof (struct nfsf_rec);
3964 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp),
3965 (caddr_t)sf->nsf_bootvals, sf->nsf_numboots * sizeof (time_t), off,
3966 UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p);
3967 if (error || aresid) {
3968 free((caddr_t)sf->nsf_bootvals, M_TEMP);
3969 sf->nsf_bootvals = NULL;
3970 return;
3971 }
3972
3973 /*
3974 * Make sure this nfsrvboottime is different from all recorded
3975 * previous ones.
3976 */
3977 do {
3978 tryagain = 0;
3979 for (i = 0; i < sf->nsf_numboots; i++) {
3980 if (nfsrvboottime == sf->nsf_bootvals[i]) {
3981 nfsrvboottime++;
3982 tryagain = 1;
3983 break;
3984 }
3985 }
3986 } while (tryagain);
3987
3988 sf->nsf_flags |= NFSNSF_OK;
3989 off += (sf->nsf_numboots * sizeof (time_t));
3990
3991 /*
3992 * Read through the file, building a list of records for grace
3993 * checking.
3994 * Each record is between sizeof (struct nfst_rec) and
3995 * sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1
3996 * and is actually sizeof (struct nfst_rec) + nst_len - 1.
3997 */
3998 tsp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) +
3999 NFSV4_OPAQUELIMIT - 1, M_TEMP, M_WAITOK);
4000 do {
4001 error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp),
4002 (caddr_t)tsp, sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1,
4003 off, UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p);
4004 len = (sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1) - aresid;
4005 if (error || (len > 0 && (len < sizeof (struct nfst_rec) ||
4006 len < (sizeof (struct nfst_rec) + tsp->len - 1)))) {
4007 /*
4008 * Yuck, the file has been corrupted, so just return
4009 * after clearing out any restart state, so the grace period
4010 * is over.
4011 */
4012 LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) {
4013 LIST_REMOVE(sp, nst_list);
4014 free((caddr_t)sp, M_TEMP);
4015 }
4016 free((caddr_t)tsp, M_TEMP);
4017 sf->nsf_flags &= ~NFSNSF_OK;
4018 free((caddr_t)sf->nsf_bootvals, M_TEMP);
4019 sf->nsf_bootvals = NULL;
4020 return;
4021 }
4022 if (len > 0) {
4023 off += sizeof (struct nfst_rec) + tsp->len - 1;
4024 /*
4025 * Search the list for a matching client.
4026 */
4027 LIST_FOREACH(sp, &sf->nsf_head, nst_list) {
4028 if (tsp->len == sp->nst_len &&
4029 !NFSBCMP(tsp->client, sp->nst_client, tsp->len))
4030 break;
4031 }
4032 if (sp == LIST_END(&sf->nsf_head)) {
4033 sp = (struct nfsrv_stable *)malloc(tsp->len +
4034 sizeof (struct nfsrv_stable) - 1, M_TEMP,
4035 M_WAITOK);
4036 NFSBCOPY((caddr_t)tsp, (caddr_t)&sp->nst_rec,
4037 sizeof (struct nfst_rec) + tsp->len - 1);
4038 LIST_INSERT_HEAD(&sf->nsf_head, sp, nst_list);
4039 } else {
4040 if (tsp->flag == NFSNST_REVOKE)
4041 sp->nst_flag |= NFSNST_REVOKE;
4042 else
4043 /*
4044 * A subsequent timestamp indicates the client
4045 * did a setclientid/confirm and any previous
4046 * revoke is no longer relevant.
4047 */
4048 sp->nst_flag &= ~NFSNST_REVOKE;
4049 }
4050 }
4051 } while (len > 0);
4052 free((caddr_t)tsp, M_TEMP);
4053 sf->nsf_flags = NFSNSF_OK;
4054 sf->nsf_eograce = NFSD_MONOSEC + sf->nsf_lease +
4055 NFSRV_LEASEDELTA;
4056}
4057
4058/*
4059 * Update the stable storage file, now that the grace period is over.
4060 */
4061APPLESTATIC void
4062nfsrv_updatestable(NFSPROC_T *p)
4063{
4064 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst;
4065 struct nfsrv_stable *sp, *nsp;
4066 int i;
4067 struct nfsvattr nva;
4068 vnode_t vp;
4069#if defined(__FreeBSD_version) && (__FreeBSD_version >= 500000)
4070 mount_t mp = NULL;
4071#endif
4072 int error;
4073
4074 if (sf->nsf_fp == NULL || (sf->nsf_flags & NFSNSF_UPDATEDONE))
4075 return;
4076 sf->nsf_flags |= NFSNSF_UPDATEDONE;
4077 /*
4078 * Ok, we need to rewrite the stable storage file.
4079 * - truncate to 0 length
4080 * - write the new first structure
4081 * - loop through the data structures, writing out any that
4082 * have timestamps older than the old boot
4083 */
4084 if (sf->nsf_bootvals) {
4085 sf->nsf_numboots++;
4086 for (i = sf->nsf_numboots - 2; i >= 0; i--)
4087 sf->nsf_bootvals[i + 1] = sf->nsf_bootvals[i];
4088 } else {
4089 sf->nsf_numboots = 1;
4090 sf->nsf_bootvals = (time_t *)malloc(sizeof (time_t),
4091 M_TEMP, M_WAITOK);
4092 }
4093 sf->nsf_bootvals[0] = nfsrvboottime;
4094 sf->nsf_lease = nfsrv_lease;
4095 NFSVNO_ATTRINIT(&nva);
4096 NFSVNO_SETATTRVAL(&nva, size, 0);
4097 vp = NFSFPVNODE(sf->nsf_fp);
4098 vn_start_write(vp, &mp, V_WAIT);
4099 if (NFSVOPLOCK(vp, LK_EXCLUSIVE) == 0) {
4100 error = nfsvno_setattr(vp, &nva, NFSFPCRED(sf->nsf_fp), p,
4101 NULL);
4102 NFSVOPUNLOCK(vp, 0);
4103 } else
4104 error = EPERM;
4105 vn_finished_write(mp);
4106 if (!error)
4107 error = NFSD_RDWR(UIO_WRITE, vp,
4108 (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), (off_t)0,
4109 UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p);
4110 if (!error)
4111 error = NFSD_RDWR(UIO_WRITE, vp,
4112 (caddr_t)sf->nsf_bootvals,
4113 sf->nsf_numboots * sizeof (time_t),
4114 (off_t)(sizeof (struct nfsf_rec)),
4115 UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p);
4116 free((caddr_t)sf->nsf_bootvals, M_TEMP);
4117 sf->nsf_bootvals = NULL;
4118 if (error) {
4119 sf->nsf_flags &= ~NFSNSF_OK;
4120 printf("EEK! Can't write NfsV4 stable storage file\n");
4121 return;
4122 }
4123 sf->nsf_flags |= NFSNSF_OK;
4124
4125 /*
4126 * Loop through the list and write out timestamp records for
4127 * any clients that successfully reclaimed state.
4128 */
4129 LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) {
4130 if (sp->nst_flag & NFSNST_GOTSTATE) {
4131 nfsrv_writestable(sp->nst_client, sp->nst_len,
4132 NFSNST_NEWSTATE, p);
4133 sp->nst_clp->lc_flags |= LCL_STAMPEDSTABLE;
4134 }
4135 LIST_REMOVE(sp, nst_list);
4136 free((caddr_t)sp, M_TEMP);
4137 }
4138 nfsrv_backupstable();
4139}
4140
4141/*
4142 * Append a record to the stable storage file.
4143 */
4144APPLESTATIC void
4145nfsrv_writestable(u_char *client, int len, int flag, NFSPROC_T *p)
4146{
4147 struct nfsrv_stablefirst *sf = &nfsrv_stablefirst;
4148 struct nfst_rec *sp;
4149 int error;
4150
4151 if (!(sf->nsf_flags & NFSNSF_OK) || sf->nsf_fp == NULL)
4152 return;
4153 sp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) +
4154 len - 1, M_TEMP, M_WAITOK);
4155 sp->len = len;
4156 NFSBCOPY(client, sp->client, len);
4157 sp->flag = flag;
4158 error = NFSD_RDWR(UIO_WRITE, NFSFPVNODE(sf->nsf_fp),
4159 (caddr_t)sp, sizeof (struct nfst_rec) + len - 1, (off_t)0,
4160 UIO_SYSSPACE, (IO_SYNC | IO_APPEND), NFSFPCRED(sf->nsf_fp), NULL, p);
4161 free((caddr_t)sp, M_TEMP);
4162 if (error) {
4163 sf->nsf_flags &= ~NFSNSF_OK;
4164 printf("EEK! Can't write NfsV4 stable storage file\n");
4165 }
4166}
4167
4168/*
4169 * This function is called during the grace period to mark a client
4170 * that successfully reclaimed state.
4171 */
4172static void
4173nfsrv_markstable(struct nfsclient *clp)
4174{
4175 struct nfsrv_stable *sp;
4176
4177 /*
4178 * First find the client structure.
4179 */
4180 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) {
4181 if (sp->nst_len == clp->lc_idlen &&
4182 !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len))
4183 break;
4184 }
4185 if (sp == LIST_END(&nfsrv_stablefirst.nsf_head))
4186 return;
4187
4188 /*
4189 * Now, just mark it and set the nfsclient back pointer.
4190 */
4191 sp->nst_flag |= NFSNST_GOTSTATE;
4192 sp->nst_clp = clp;
4193}
4194
4195/*
4196 * This function is called for a reclaim, to see if it gets grace.
4197 * It returns 0 if a reclaim is allowed, 1 otherwise.
4198 */
4199static int
4200nfsrv_checkstable(struct nfsclient *clp)
4201{
4202 struct nfsrv_stable *sp;
4203
4204 /*
4205 * First, find the entry for the client.
4206 */
4207 LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) {
4208 if (sp->nst_len == clp->lc_idlen &&
4209 !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len))
4210 break;
4211 }
4212
4213 /*
4214 * If not in the list, state was revoked or no state was issued
4215 * since the previous reboot, a reclaim is denied.
4216 */
4217 if (sp == LIST_END(&nfsrv_stablefirst.nsf_head) ||
4218 (sp->nst_flag & NFSNST_REVOKE) ||
4219 !(nfsrv_stablefirst.nsf_flags & NFSNSF_OK))
4220 return (1);
4221 return (0);
4222}
4223
4224/*
4225 * Test for and try to clear out a conflicting client. This is called by
4226 * nfsrv_lockctrl() and nfsrv_openctrl() when conflicts with other clients
4227 * a found.
4228 * The trick here is that it can't revoke a conflicting client with an
4229 * expired lease unless it holds the v4root lock, so...
4230 * If no v4root lock, get the lock and return 1 to indicate "try again".
4231 * Return 0 to indicate the conflict can't be revoked and 1 to indicate
4232 * the revocation worked and the conflicting client is "bye, bye", so it
4233 * can be tried again.
4234 * Return 2 to indicate that the vnode is VI_DOOMED after NFSVOPLOCK().
4235 * Unlocks State before a non-zero value is returned.
4236 */
4237static int
4238nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, vnode_t vp,
4239 NFSPROC_T *p)
4240{
4241 int gotlock, lktype;
4242
4243 /*
4244 * If lease hasn't expired, we can't fix it.
4245 */
4246 if (clp->lc_expiry >= NFSD_MONOSEC ||
4247 !(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE))
4248 return (0);
4249 if (*haslockp == 0) {
4250 NFSUNLOCKSTATE();
4251 lktype = NFSVOPISLOCKED(vp);
4252 NFSVOPUNLOCK(vp, 0);
4253 NFSLOCKV4ROOTMUTEX();
4254 nfsv4_relref(&nfsv4rootfs_lock);
4255 do {
4256 gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL,
4257 NFSV4ROOTLOCKMUTEXPTR, NULL);
4258 } while (!gotlock);
4259 NFSUNLOCKV4ROOTMUTEX();
4260 *haslockp = 1;
4261 NFSVOPLOCK(vp, lktype | LK_RETRY);
4262 if ((vp->v_iflag & VI_DOOMED) != 0)
4263 return (2);
4264 else
4265 return (1);
4266 }
4267 NFSUNLOCKSTATE();
4268
4269 /*
4270 * Ok, we can expire the conflicting client.
4271 */
4272 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p);
4273 nfsrv_backupstable();
4274 nfsrv_cleanclient(clp, p);
4275 nfsrv_freedeleglist(&clp->lc_deleg);
4276 nfsrv_freedeleglist(&clp->lc_olddeleg);
4277 LIST_REMOVE(clp, lc_hash);
4278 nfsrv_zapclient(clp, p);
4279 return (1);
4280}
4281
4282/*
4283 * Resolve a delegation conflict.
4284 * Returns 0 to indicate the conflict was resolved without sleeping.
4285 * Return -1 to indicate that the caller should check for conflicts again.
4286 * Return > 0 for an error that should be returned, normally NFSERR_DELAY.
4287 *
4288 * Also, manipulate the nfsv4root_lock, as required. It isn't changed
4289 * for a return of 0, since there was no sleep and it could be required
4290 * later. It is released for a return of NFSERR_DELAY, since the caller
4291 * will return that error. It is released when a sleep was done waiting
4292 * for the delegation to be returned or expire (so that other nfsds can
4293 * handle ops). Then, it must be acquired for the write to stable storage.
4294 * (This function is somewhat similar to nfsrv_clientconflict(), but
4295 * the semantics differ in a couple of subtle ways. The return of 0
4296 * indicates the conflict was resolved without sleeping here, not
4297 * that the conflict can't be resolved and the handling of nfsv4root_lock
4298 * differs, as noted above.)
4299 * Unlocks State before returning a non-zero value.
4300 */
4301static int
4302nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p,
4303 vnode_t vp)
4304{
4305 struct nfsclient *clp = stp->ls_clp;
4306 int gotlock, error, lktype, retrycnt, zapped_clp;
4307 nfsv4stateid_t tstateid;
4308 fhandle_t tfh;
4309
4310 /*
4311 * If the conflict is with an old delegation...
4312 */
4313 if (stp->ls_flags & NFSLCK_OLDDELEG) {
4314 /*
4315 * You can delete it, if it has expired.
4316 */
4317 if (clp->lc_delegtime < NFSD_MONOSEC) {
4318 nfsrv_freedeleg(stp);
4319 NFSUNLOCKSTATE();
4320 return (-1);
4321 }
4322 NFSUNLOCKSTATE();
4323 /*
4324 * During this delay, the old delegation could expire or it
4325 * could be recovered by the client via an Open with
4326 * CLAIM_DELEGATE_PREV.
4327 * Release the nfsv4root_lock, if held.
4328 */
4329 if (*haslockp) {
4330 *haslockp = 0;
4331 NFSLOCKV4ROOTMUTEX();
4332 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4333 NFSUNLOCKV4ROOTMUTEX();
4334 }
4335 return (NFSERR_DELAY);
4336 }
4337
4338 /*
4339 * It's a current delegation, so:
4340 * - check to see if the delegation has expired
4341 * - if so, get the v4root lock and then expire it
4342 */
4343 if (!(stp->ls_flags & NFSLCK_DELEGRECALL)) {
4344 /*
4345 * - do a recall callback, since not yet done
4346 * For now, never allow truncate to be set. To use
4347 * truncate safely, it must be guaranteed that the
4348 * Remove, Rename or Setattr with size of 0 will
4349 * succeed and that would require major changes to
4350 * the VFS/Vnode OPs.
4351 * Set the expiry time large enough so that it won't expire
4352 * until after the callback, then set it correctly, once
4353 * the callback is done. (The delegation will now time
4354 * out whether or not the Recall worked ok. The timeout
4355 * will be extended when ops are done on the delegation
4356 * stateid, up to the timelimit.)
4357 */
4358 stp->ls_delegtime = NFSD_MONOSEC + (2 * nfsrv_lease) +
4359 NFSRV_LEASEDELTA;
4360 stp->ls_delegtimelimit = NFSD_MONOSEC + (6 * nfsrv_lease) +
4361 NFSRV_LEASEDELTA;
4362 stp->ls_flags |= NFSLCK_DELEGRECALL;
4363
4364 /*
4365 * Loop NFSRV_CBRETRYCNT times while the CBRecall replies
4366 * NFSERR_BADSTATEID or NFSERR_BADHANDLE. This is done
4367 * in order to try and avoid a race that could happen
4368 * when a CBRecall request passed the Open reply with
4369 * the delegation in it when transitting the network.
4370 * Since nfsrv_docallback will sleep, don't use stp after
4371 * the call.
4372 */
4373 NFSBCOPY((caddr_t)&stp->ls_stateid, (caddr_t)&tstateid,
4374 sizeof (tstateid));
4375 NFSBCOPY((caddr_t)&stp->ls_lfp->lf_fh, (caddr_t)&tfh,
4376 sizeof (tfh));
4377 NFSUNLOCKSTATE();
4378 if (*haslockp) {
4379 *haslockp = 0;
4380 NFSLOCKV4ROOTMUTEX();
4381 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4382 NFSUNLOCKV4ROOTMUTEX();
4383 }
4384 retrycnt = 0;
4385 do {
4386 error = nfsrv_docallback(clp, NFSV4OP_CBRECALL,
4387 &tstateid, 0, &tfh, NULL, NULL, p);
4388 retrycnt++;
4389 } while ((error == NFSERR_BADSTATEID ||
4390 error == NFSERR_BADHANDLE) && retrycnt < NFSV4_CBRETRYCNT);
4391 return (NFSERR_DELAY);
4392 }
4393
4394 if (clp->lc_expiry >= NFSD_MONOSEC &&
4395 stp->ls_delegtime >= NFSD_MONOSEC) {
4396 NFSUNLOCKSTATE();
4397 /*
4398 * A recall has been done, but it has not yet expired.
4399 * So, RETURN_DELAY.
4400 */
4401 if (*haslockp) {
4402 *haslockp = 0;
4403 NFSLOCKV4ROOTMUTEX();
4404 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4405 NFSUNLOCKV4ROOTMUTEX();
4406 }
4407 return (NFSERR_DELAY);
4408 }
4409
4410 /*
4411 * If we don't yet have the lock, just get it and then return,
4412 * since we need that before deleting expired state, such as
4413 * this delegation.
4414 * When getting the lock, unlock the vnode, so other nfsds that
4415 * are in progress, won't get stuck waiting for the vnode lock.
4416 */
4417 if (*haslockp == 0) {
4418 NFSUNLOCKSTATE();
4419 lktype = NFSVOPISLOCKED(vp);
4420 NFSVOPUNLOCK(vp, 0);
4421 NFSLOCKV4ROOTMUTEX();
4422 nfsv4_relref(&nfsv4rootfs_lock);
4423 do {
4424 gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL,
4425 NFSV4ROOTLOCKMUTEXPTR, NULL);
4426 } while (!gotlock);
4427 NFSUNLOCKV4ROOTMUTEX();
4428 *haslockp = 1;
4429 NFSVOPLOCK(vp, lktype | LK_RETRY);
4430 if ((vp->v_iflag & VI_DOOMED) != 0) {
4431 *haslockp = 0;
4432 NFSLOCKV4ROOTMUTEX();
4433 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4434 NFSUNLOCKV4ROOTMUTEX();
4435 return (NFSERR_PERM);
4436 }
4437 return (-1);
4438 }
4439
4440 NFSUNLOCKSTATE();
4441 /*
4442 * Ok, we can delete the expired delegation.
4443 * First, write the Revoke record to stable storage and then
4444 * clear out the conflict.
4445 * Since all other nfsd threads are now blocked, we can safely
4446 * sleep without the state changing.
4447 */
4448 nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p);
4449 nfsrv_backupstable();
4450 if (clp->lc_expiry < NFSD_MONOSEC) {
4451 nfsrv_cleanclient(clp, p);
4452 nfsrv_freedeleglist(&clp->lc_deleg);
4453 nfsrv_freedeleglist(&clp->lc_olddeleg);
4454 LIST_REMOVE(clp, lc_hash);
4455 zapped_clp = 1;
4456 } else {
4457 nfsrv_freedeleg(stp);
4458 zapped_clp = 0;
4459 }
4460 if (zapped_clp)
4461 nfsrv_zapclient(clp, p);
4462 return (-1);
4463}
4464
4465/*
4466 * Check for a remove allowed, if remove is set to 1 and get rid of
4467 * delegations.
4468 */
4469APPLESTATIC int
4470nfsrv_checkremove(vnode_t vp, int remove, NFSPROC_T *p)
4471{
4472 struct nfsstate *stp;
4473 struct nfslockfile *lfp;
4474 int error, haslock = 0;
4475 fhandle_t nfh;
4476
4477 /*
4478 * First, get the lock file structure.
4479 * (A return of -1 means no associated state, so remove ok.)
4480 */
4481 error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p);
4482tryagain:
4483 NFSLOCKSTATE();
4484 if (!error)
4485 error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh, 0);
4486 if (error) {
4487 NFSUNLOCKSTATE();
4488 if (haslock) {
4489 NFSLOCKV4ROOTMUTEX();
4490 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4491 NFSUNLOCKV4ROOTMUTEX();
4492 }
4493 if (error == -1)
4494 return (0);
4495 return (error);
4496 }
4497
4498 /*
4499 * Now, we must Recall any delegations.
4500 */
4501 error = nfsrv_cleandeleg(vp, lfp, NULL, &haslock, p);
4502 if (error) {
4503 /*
4504 * nfsrv_cleandeleg() unlocks state for non-zero
4505 * return.
4506 */
4507 if (error == -1)
4508 goto tryagain;
4509 if (haslock) {
4510 NFSLOCKV4ROOTMUTEX();
4511 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4512 NFSUNLOCKV4ROOTMUTEX();
4513 }
4514 return (error);
4515 }
4516
4517 /*
4518 * Now, look for a conflicting open share.
4519 */
4520 if (remove) {
4521 LIST_FOREACH(stp, &lfp->lf_open, ls_file) {
4522 if (stp->ls_flags & NFSLCK_WRITEDENY) {
4523 error = NFSERR_FILEOPEN;
4524 break;
4525 }
4526 }
4527 }
4528
4529 NFSUNLOCKSTATE();
4530 if (haslock) {
4531 NFSLOCKV4ROOTMUTEX();
4532 nfsv4_unlock(&nfsv4rootfs_lock, 1);
4533 NFSUNLOCKV4ROOTMUTEX();
4534 }
4535 return (error);
4536}
4537
4538/*
4539 * Clear out all delegations for the file referred to by lfp.
4540 * May return NFSERR_DELAY, if there will be a delay waiting for
4541 * delegations to expire.
4542 * Returns -1 to indicate it slept while recalling a delegation.
4543 * This function has the side effect of deleting the nfslockfile structure,
4544 * if it no longer has associated state and didn't have to sleep.
4545 * Unlocks State before a non-zero value is returned.
4546 */
4547static int
4548nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp,
4549 struct nfsclient *clp, int *haslockp, NFSPROC_T *p)
4550{
4551 struct nfsstate *stp, *nstp;
4552 int ret;
4553
4554 stp = LIST_FIRST(&lfp->lf_deleg);
4555 while (stp != LIST_END(&lfp->lf_deleg)) {
4556 nstp = LIST_NEXT(stp, ls_file);
4557 if (stp->ls_clp != clp) {
4558 ret = nfsrv_delegconflict(stp, haslockp, p, vp);
4559 if (ret) {
4560 /*
4561 * nfsrv_delegconflict() unlocks state
4562 * when it returns non-zero.
4563 */
4564 return (ret);
4565 }
4566 }
4567 stp = nstp;
4568 }
4569 return (0);
4570}
4571
4572/*
4573 * There are certain operations that, when being done outside of NFSv4,
4574 * require that any NFSv4 delegation for the file be recalled.
4575 * This function is to be called for those cases:
4576 * VOP_RENAME() - When a delegation is being recalled for any reason,
4577 * the client may have to do Opens against the server, using the file's
4578 * final component name. If the file has been renamed on the server,
4579 * that component name will be incorrect and the Open will fail.
4580 * VOP_REMOVE() - Theoretically, a client could Open a file after it has
4581 * been removed on the server, if there is a delegation issued to
4582 * that client for the file. I say "theoretically" since clients
4583 * normally do an Access Op before the Open and that Access Op will
4584 * fail with ESTALE. Note that NFSv2 and 3 don't even do Opens, so
4585 * they will detect the file's removal in the same manner. (There is
4586 * one case where RFC3530 allows a client to do an Open without first
4587 * doing an Access Op, which is passage of a check against the ACE
4588 * returned with a Write delegation, but current practice is to ignore
4589 * the ACE and always do an Access Op.)
4590 * Since the functions can only be called with an unlocked vnode, this
4591 * can't be done at this time.
4592 * VOP_ADVLOCK() - When a client holds a delegation, it can issue byte range
4593 * locks locally in the client, which are not visible to the server. To
4594 * deal with this, issuing of delegations for a vnode must be disabled
4595 * and all delegations for the vnode recalled. This is done via the
4596 * second function, using the VV_DISABLEDELEG vflag on the vnode.
4597 */
4598APPLESTATIC void
4599nfsd_recalldelegation(vnode_t vp, NFSPROC_T *p)
4600{
4601 struct timespec mytime;
4602 int32_t starttime;
4603 int error;
4604
4605 /*
4606 * First, check to see if the server is currently running and it has
4607 * been called for a regular file when issuing delegations.
4608 */
4609 if (newnfs_numnfsd == 0 || vp->v_type != VREG ||
4610 nfsrv_issuedelegs == 0)
4611 return;
4612
4613 KASSERT((NFSVOPISLOCKED(vp) != LK_EXCLUSIVE), ("vp %p is locked", vp));
4614 /*
4615 * First, get a reference on the nfsv4rootfs_lock so that an
4616 * exclusive lock cannot be acquired by another thread.
4617 */
4618 NFSLOCKV4ROOTMUTEX();
4619 nfsv4_getref(&nfsv4rootfs_lock, NULL, NFSV4ROOTLOCKMUTEXPTR, NULL);
4620 NFSUNLOCKV4ROOTMUTEX();
4621
4622 /*
4623 * Now, call nfsrv_checkremove() in a loop while it returns
4624 * NFSERR_DELAY. Return upon any other error or when timed out.
4625 */
4626 NFSGETNANOTIME(&mytime);
4627 starttime = (u_int32_t)mytime.tv_sec;
4628 do {
4629 if (NFSVOPLOCK(vp, LK_EXCLUSIVE) == 0) {
4630 error = nfsrv_checkremove(vp, 0, p);
4631 NFSVOPUNLOCK(vp, 0);
4632 } else
4633 error = EPERM;
4634 if (error == NFSERR_DELAY) {
4635 NFSGETNANOTIME(&mytime);
4636 if (((u_int32_t)mytime.tv_sec - starttime) >
4637 NFS_REMOVETIMEO &&
4638 ((u_int32_t)mytime.tv_sec - starttime) <
4639 100000)
4640 break;
4641 /* Sleep for a short period of time */
4642 (void) nfs_catnap(PZERO, 0, "nfsremove");
4643 }
4644 } while (error == NFSERR_DELAY);
4645 NFSLOCKV4ROOTMUTEX();
4646 nfsv4_relref(&nfsv4rootfs_lock);
4647 NFSUNLOCKV4ROOTMUTEX();
4648}
4649
4650APPLESTATIC void
4651nfsd_disabledelegation(vnode_t vp, NFSPROC_T *p)
4652{
4653
4654#ifdef VV_DISABLEDELEG
4655 /*
4656 * First, flag issuance of delegations disabled.
4657 */
4658 atomic_set_long(&vp->v_vflag, VV_DISABLEDELEG);
4659#endif
4660
4661 /*
4662 * Then call nfsd_recalldelegation() to get rid of all extant
4663 * delegations.
4664 */
4665 nfsd_recalldelegation(vp, p);
4666}
4667
4668/*
4669 * Check for conflicting locks, etc. and then get rid of delegations.
4670 * (At one point I thought that I should get rid of delegations for any
4671 * Setattr, since it could potentially disallow the I/O op (read or write)
4672 * allowed by the delegation. However, Setattr Ops that aren't changing
4673 * the size get a stateid of all 0s, so you can't tell if it is a delegation
4674 * for the same client or a different one, so I decided to only get rid
4675 * of delegations for other clients when the size is being changed.)
4676 * In general, a Setattr can disable NFS I/O Ops that are outstanding, such
4677 * as Write backs, even if there is no delegation, so it really isn't any
4678 * different?)
4679 */
4680APPLESTATIC int
4681nfsrv_checksetattr(vnode_t vp, struct nfsrv_descript *nd,
4682 nfsv4stateid_t *stateidp, struct nfsvattr *nvap, nfsattrbit_t *attrbitp,
4683 struct nfsexstuff *exp, NFSPROC_T *p)
4684{
4685 struct nfsstate st, *stp = &st;
4686 struct nfslock lo, *lop = &lo;
4687 int error = 0;
4688 nfsquad_t clientid;
4689
4690 if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_SIZE)) {
4691 stp->ls_flags = (NFSLCK_CHECK | NFSLCK_WRITEACCESS);
4692 lop->lo_first = nvap->na_size;
4693 } else {
4694 stp->ls_flags = 0;
4695 lop->lo_first = 0;
4696 }
4697 if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNER) ||
4698 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP) ||
4699 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_MODE) ||
4700 NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_ACL))
4701 stp->ls_flags |= NFSLCK_SETATTR;
4702 if (stp->ls_flags == 0)
4703 return (0);
4704 lop->lo_end = NFS64BITSSET;
4705 lop->lo_flags = NFSLCK_WRITE;
4706 stp->ls_ownerlen = 0;
4707 stp->ls_op = NULL;
4708 stp->ls_uid = nd->nd_cred->cr_uid;
4709 stp->ls_stateid.seqid = stateidp->seqid;
4710 clientid.lval[0] = stp->ls_stateid.other[0] = stateidp->other[0];
4711 clientid.lval[1] = stp->ls_stateid.other[1] = stateidp->other[1];
4712 stp->ls_stateid.other[2] = stateidp->other[2];
4713 error = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid,
4714 stateidp, exp, nd, p);
4715 return (error);
4716}
4717
4718/*
4719 * Check for a write delegation and do a CBGETATTR if there is one, updating
4720 * the attributes, as required.
4721 * Should I return an error if I can't get the attributes? (For now, I'll
4722 * just return ok.
4723 */
4724APPLESTATIC int
4725nfsrv_checkgetattr(struct nfsrv_descript *nd, vnode_t vp,
4726 struct nfsvattr *nvap, nfsattrbit_t *attrbitp, struct ucred *cred,
4727 NFSPROC_T *p)
4728{
4729 struct nfsstate *stp;
4730 struct nfslockfile *lfp;
4731 struct nfsclient *clp;
4732 struct nfsvattr nva;
4733 fhandle_t nfh;
4734 int error;
4735 nfsattrbit_t cbbits;
4736 u_quad_t delegfilerev;
4737
4738 NFSCBGETATTR_ATTRBIT(attrbitp, &cbbits);
4739 if (!NFSNONZERO_ATTRBIT(&cbbits))
4740 return (0);
4741
4742 /*
4743 * Get the lock file structure.
4744 * (A return of -1 means no associated state, so return ok.)
4745 */
4746 error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p);
4747 NFSLOCKSTATE();
4748 if (!error)
4749 error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh, 0);
4750 if (error) {
4751 NFSUNLOCKSTATE();
4752 if (error == -1)
4753 return (0);
4754 return (error);
4755 }
4756
4757 /*
4758 * Now, look for a write delegation.
4759 */
4760 LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) {
4761 if (stp->ls_flags & NFSLCK_DELEGWRITE)
4762 break;
4763 }
4764 if (stp == LIST_END(&lfp->lf_deleg)) {
4765 NFSUNLOCKSTATE();
4766 return (0);
4767 }
4768 clp = stp->ls_clp;
4769 delegfilerev = stp->ls_filerev;
4770
4771 /*
4772 * If the Write delegation was issued as a part of this Compound RPC
4773 * or if we have an Implied Clientid (used in a previous Op in this
4774 * compound) and it is the client the delegation was issued to,
4775 * just return ok.
4776 * I also assume that it is from the same client iff the network
4777 * host IP address is the same as the callback address. (Not
4778 * exactly correct by the RFC, but avoids a lot of Getattr
4779 * callbacks.)
4780 */
4781 if (nd->nd_compref == stp->ls_compref ||
4782 ((nd->nd_flag & ND_IMPLIEDCLID) &&
4783 clp->lc_clientid.qval == nd->nd_clientid.qval) ||
4784 nfsaddr2_match(clp->lc_req.nr_nam, nd->nd_nam)) {
4785 NFSUNLOCKSTATE();
4786 return (0);
4787 }
4788
4789 /*
4790 * We are now done with the delegation state structure,
4791 * so the statelock can be released and we can now tsleep().
4792 */
4793
4794 /*
4795 * Now, we must do the CB Getattr callback, to see if Change or Size
4796 * has changed.
4797 */
4798 if (clp->lc_expiry >= NFSD_MONOSEC) {
4799 NFSUNLOCKSTATE();
4800 NFSVNO_ATTRINIT(&nva);
4801 nva.na_filerev = NFS64BITSSET;
4802 error = nfsrv_docallback(clp, NFSV4OP_CBGETATTR, NULL,
4803 0, &nfh, &nva, &cbbits, p);
4804 if (!error) {
4805 if ((nva.na_filerev != NFS64BITSSET &&
4806 nva.na_filerev > delegfilerev) ||
4807 (NFSVNO_ISSETSIZE(&nva) &&
4808 nva.na_size != nvap->na_size)) {
4809 nfsvno_updfilerev(vp, nvap, cred, p);
4810 if (NFSVNO_ISSETSIZE(&nva))
4811 nvap->na_size = nva.na_size;
4812 }
4813 }
4814 } else {
4815 NFSUNLOCKSTATE();
4816 }
4817 return (0);
4818}
4819
4820/*
4821 * This function looks for openowners that haven't had any opens for
4822 * a while and throws them away. Called by an nfsd when NFSNSF_NOOPENS
4823 * is set.
4824 */
4825APPLESTATIC void
4826nfsrv_throwawayopens(NFSPROC_T *p)
4827{
4828 struct nfsclient *clp, *nclp;
4829 struct nfsstate *stp, *nstp;
4830 int i;
4831
4832 NFSLOCKSTATE();
4833 nfsrv_stablefirst.nsf_flags &= ~NFSNSF_NOOPENS;
4834 /*
4835 * For each client...
4836 */
4837 for (i = 0; i < NFSCLIENTHASHSIZE; i++) {
4838 LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, nclp) {
4839 LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp) {
4840 if (LIST_EMPTY(&stp->ls_open) &&
4841 (stp->ls_noopens > NFSNOOPEN ||
4842 (nfsrv_openpluslock * 2) >
4843 NFSRV_V4STATELIMIT))
4844 nfsrv_freeopenowner(stp, 0, p);
4845 }
4846 }
4847 }
4848 NFSUNLOCKSTATE();
4849}
4850
4851/*
4852 * This function checks to see if the credentials are the same.
4853 * Returns 1 for not same, 0 otherwise.
4854 */
4855static int
4856nfsrv_notsamecredname(struct nfsrv_descript *nd, struct nfsclient *clp)
4857{
4858
4859 if (nd->nd_flag & ND_GSS) {
4860 if (!(clp->lc_flags & LCL_GSS))
4861 return (1);
4862 if (clp->lc_flags & LCL_NAME) {
4863 if (nd->nd_princlen != clp->lc_namelen ||
4864 NFSBCMP(nd->nd_principal, clp->lc_name,
4865 clp->lc_namelen))
4866 return (1);
4867 else
4868 return (0);
4869 }
4870 if (nd->nd_cred->cr_uid == clp->lc_uid)
4871 return (0);
4872 else
4873 return (1);
4874 } else if (clp->lc_flags & LCL_GSS)
4875 return (1);
4876 /*
4877 * For AUTH_SYS, allow the same uid or root. (This is underspecified
4878 * in RFC3530, which talks about principals, but doesn't say anything
4879 * about uids for AUTH_SYS.)
4880 */
4881 if (nd->nd_cred->cr_uid == clp->lc_uid || nd->nd_cred->cr_uid == 0)
4882 return (0);
4883 else
4884 return (1);
4885}
4886
4887/*
4888 * Calculate the lease expiry time.
4889 */
4890static time_t
4891nfsrv_leaseexpiry(void)
4892{
4893 struct timeval curtime;
4894
4895 NFSGETTIME(&curtime);
4896 if (nfsrv_stablefirst.nsf_eograce > NFSD_MONOSEC)
4897 return (NFSD_MONOSEC + 2 * (nfsrv_lease + NFSRV_LEASEDELTA));
4898 return (NFSD_MONOSEC + nfsrv_lease + NFSRV_LEASEDELTA);
4899}
4900
4901/*
4902 * Delay the delegation timeout as far as ls_delegtimelimit, as required.
4903 */
4904static void
4905nfsrv_delaydelegtimeout(struct nfsstate *stp)
4906{
4907
4908 if ((stp->ls_flags & NFSLCK_DELEGRECALL) == 0)
4909 return;
4910
4911 if ((stp->ls_delegtime + 15) > NFSD_MONOSEC &&
4912 stp->ls_delegtime < stp->ls_delegtimelimit) {
4913 stp->ls_delegtime += nfsrv_lease;
4914 if (stp->ls_delegtime > stp->ls_delegtimelimit)
4915 stp->ls_delegtime = stp->ls_delegtimelimit;
4916 }
4917}
4918
4919/*
4920 * This function checks to see if there is any other state associated
4921 * with the openowner for this Open.
4922 * It returns 1 if there is no other state, 0 otherwise.
4923 */
4924static int
4925nfsrv_nootherstate(struct nfsstate *stp)
4926{
4927 struct nfsstate *tstp;
4928
4929 LIST_FOREACH(tstp, &stp->ls_openowner->ls_open, ls_list) {
4930 if (tstp != stp || !LIST_EMPTY(&tstp->ls_lock))
4931 return (0);
4932 }
4933 return (1);
4934}
4935
4936/*
4937 * Create a list of lock deltas (changes to local byte range locking
4938 * that can be rolled back using the list) and apply the changes via
4939 * nfsvno_advlock(). Optionally, lock the list. It is expected that either
4940 * the rollback or update function will be called after this.
4941 * It returns an error (and rolls back, as required), if any nfsvno_advlock()
4942 * call fails. If it returns an error, it will unlock the list.
4943 */
4944static int
4945nfsrv_locallock(vnode_t vp, struct nfslockfile *lfp, int flags,
4946 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p)
4947{
4948 struct nfslock *lop, *nlop;
4949 int error = 0;
4950
4951 /* Loop through the list of locks. */
4952 lop = LIST_FIRST(&lfp->lf_locallock);
4953 while (first < end && lop != NULL) {
4954 nlop = LIST_NEXT(lop, lo_lckowner);
4955 if (first >= lop->lo_end) {
4956 /* not there yet */
4957 lop = nlop;
4958 } else if (first < lop->lo_first) {
4959 /* new one starts before entry in list */
4960 if (end <= lop->lo_first) {
4961 /* no overlap between old and new */
4962 error = nfsrv_dolocal(vp, lfp, flags,
4963 NFSLCK_UNLOCK, first, end, cfp, p);
4964 if (error != 0)
4965 break;
4966 first = end;
4967 } else {
4968 /* handle fragment overlapped with new one */
4969 error = nfsrv_dolocal(vp, lfp, flags,
4970 NFSLCK_UNLOCK, first, lop->lo_first, cfp,
4971 p);
4972 if (error != 0)
4973 break;
4974 first = lop->lo_first;
4975 }
4976 } else {
4977 /* new one overlaps this entry in list */
4978 if (end <= lop->lo_end) {
4979 /* overlaps all of new one */
4980 error = nfsrv_dolocal(vp, lfp, flags,
4981 lop->lo_flags, first, end, cfp, p);
4982 if (error != 0)
4983 break;
4984 first = end;
4985 } else {
4986 /* handle fragment overlapped with new one */
4987 error = nfsrv_dolocal(vp, lfp, flags,
4988 lop->lo_flags, first, lop->lo_end, cfp, p);
4989 if (error != 0)
4990 break;
4991 first = lop->lo_end;
4992 lop = nlop;
4993 }
4994 }
4995 }
4996 if (first < end && error == 0)
4997 /* handle fragment past end of list */
4998 error = nfsrv_dolocal(vp, lfp, flags, NFSLCK_UNLOCK, first,
4999 end, cfp, p);
5000 return (error);
5001}
5002
5003/*
5004 * Local lock unlock. Unlock all byte ranges that are no longer locked
5005 * by NFSv4. To do this, unlock any subranges of first-->end that
5006 * do not overlap with the byte ranges of any lock in the lfp->lf_lock
5007 * list. This list has all locks for the file held by other
5008 * <clientid, lockowner> tuples. The list is ordered by increasing
5009 * lo_first value, but may have entries that overlap each other, for
5010 * the case of read locks.
5011 */
5012static void
5013nfsrv_localunlock(vnode_t vp, struct nfslockfile *lfp, uint64_t init_first,
5014 uint64_t init_end, NFSPROC_T *p)
5015{
5016 struct nfslock *lop;
5017 uint64_t first, end, prevfirst;
5018
5019 first = init_first;
5020 end = init_end;
5021 while (first < init_end) {
5022 /* Loop through all nfs locks, adjusting first and end */
5023 prevfirst = 0;
5024 LIST_FOREACH(lop, &lfp->lf_lock, lo_lckfile) {
5025 KASSERT(prevfirst <= lop->lo_first,
5026 ("nfsv4 locks out of order"));
5027 KASSERT(lop->lo_first < lop->lo_end,
5028 ("nfsv4 bogus lock"));
5029 prevfirst = lop->lo_first;
5030 if (first >= lop->lo_first &&
5031 first < lop->lo_end)
5032 /*
5033 * Overlaps with initial part, so trim
5034 * off that initial part by moving first past
5035 * it.
5036 */
5037 first = lop->lo_end;
5038 else if (end > lop->lo_first &&
5039 lop->lo_first > first) {
5040 /*
5041 * This lock defines the end of the
5042 * segment to unlock, so set end to the
5043 * start of it and break out of the loop.
5044 */
5045 end = lop->lo_first;
5046 break;
5047 }
5048 if (first >= end)
5049 /*
5050 * There is no segment left to do, so
5051 * break out of this loop and then exit
5052 * the outer while() since first will be set
5053 * to end, which must equal init_end here.
5054 */
5055 break;
5056 }
5057 if (first < end) {
5058 /* Unlock this segment */
5059 (void) nfsrv_dolocal(vp, lfp, NFSLCK_UNLOCK,
5060 NFSLCK_READ, first, end, NULL, p);
5061 nfsrv_locallock_commit(lfp, NFSLCK_UNLOCK,
5062 first, end);
5063 }
5064 /*
5065 * Now move past this segment and look for any further
5066 * segment in the range, if there is one.
5067 */
5068 first = end;
5069 end = init_end;
5070 }
5071}
5072
5073/*
5074 * Do the local lock operation and update the rollback list, as required.
5075 * Perform the rollback and return the error if nfsvno_advlock() fails.
5076 */
5077static int
5078nfsrv_dolocal(vnode_t vp, struct nfslockfile *lfp, int flags, int oldflags,
5079 uint64_t first, uint64_t end, struct nfslockconflict *cfp, NFSPROC_T *p)
5080{
5081 struct nfsrollback *rlp;
5082 int error, ltype, oldltype;
5083
5084 if (flags & NFSLCK_WRITE)
5085 ltype = F_WRLCK;
5086 else if (flags & NFSLCK_READ)
5087 ltype = F_RDLCK;
5088 else
5089 ltype = F_UNLCK;
5090 if (oldflags & NFSLCK_WRITE)
5091 oldltype = F_WRLCK;
5092 else if (oldflags & NFSLCK_READ)
5093 oldltype = F_RDLCK;
5094 else
5095 oldltype = F_UNLCK;
5096 if (ltype == oldltype || (oldltype == F_WRLCK && ltype == F_RDLCK))
5097 /* nothing to do */
5098 return (0);
5099 error = nfsvno_advlock(vp, ltype, first, end, p);
5100 if (error != 0) {
5101 if (cfp != NULL) {
5102 cfp->cl_clientid.lval[0] = 0;
5103 cfp->cl_clientid.lval[1] = 0;
5104 cfp->cl_first = 0;
5105 cfp->cl_end = NFS64BITSSET;
5106 cfp->cl_flags = NFSLCK_WRITE;
5107 cfp->cl_ownerlen = 5;
5108 NFSBCOPY("LOCAL", cfp->cl_owner, 5);
5109 }
5110 nfsrv_locallock_rollback(vp, lfp, p);
5111 } else if (ltype != F_UNLCK) {
5112 rlp = malloc(sizeof (struct nfsrollback), M_NFSDROLLBACK,
5113 M_WAITOK);
5114 rlp->rlck_first = first;
5115 rlp->rlck_end = end;
5116 rlp->rlck_type = oldltype;
5117 LIST_INSERT_HEAD(&lfp->lf_rollback, rlp, rlck_list);
5118 }
5119 return (error);
5120}
5121
5122/*
5123 * Roll back local lock changes and free up the rollback list.
5124 */
5125static void
5126nfsrv_locallock_rollback(vnode_t vp, struct nfslockfile *lfp, NFSPROC_T *p)
5127{
5128 struct nfsrollback *rlp, *nrlp;
5129
5130 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list, nrlp) {
5131 (void) nfsvno_advlock(vp, rlp->rlck_type, rlp->rlck_first,
5132 rlp->rlck_end, p);
5133 free(rlp, M_NFSDROLLBACK);
5134 }
5135 LIST_INIT(&lfp->lf_rollback);
5136}
5137
5138/*
5139 * Update local lock list and delete rollback list (ie now committed to the
5140 * local locks). Most of the work is done by the internal function.
5141 */
5142static void
5143nfsrv_locallock_commit(struct nfslockfile *lfp, int flags, uint64_t first,
5144 uint64_t end)
5145{
5146 struct nfsrollback *rlp, *nrlp;
5147 struct nfslock *new_lop, *other_lop;
5148
5149 new_lop = malloc(sizeof (struct nfslock), M_NFSDLOCK, M_WAITOK);
5150 if (flags & (NFSLCK_READ | NFSLCK_WRITE))
5151 other_lop = malloc(sizeof (struct nfslock), M_NFSDLOCK,
5152 M_WAITOK);
5153 else
5154 other_lop = NULL;
5155 new_lop->lo_flags = flags;
5156 new_lop->lo_first = first;
5157 new_lop->lo_end = end;
5158 nfsrv_updatelock(NULL, &new_lop, &other_lop, lfp);
5159 if (new_lop != NULL)
5160 free(new_lop, M_NFSDLOCK);
5161 if (other_lop != NULL)
5162 free(other_lop, M_NFSDLOCK);
5163
5164 /* and get rid of the rollback list */
5165 LIST_FOREACH_SAFE(rlp, &lfp->lf_rollback, rlck_list, nrlp)
5166 free(rlp, M_NFSDROLLBACK);
5167 LIST_INIT(&lfp->lf_rollback);
5168}
5169
5170/*
5171 * Lock the struct nfslockfile for local lock updating.
5172 */
5173static void
5174nfsrv_locklf(struct nfslockfile *lfp)
5175{
5176 int gotlock;
5177
5178 /* lf_usecount ensures *lfp won't be free'd */
5179 lfp->lf_usecount++;
5180 do {
5181 gotlock = nfsv4_lock(&lfp->lf_locallock_lck, 1, NULL,
5182 NFSSTATEMUTEXPTR, NULL);
5183 } while (gotlock == 0);
5184 lfp->lf_usecount--;
5185}
5186
5187/*
5188 * Unlock the struct nfslockfile after local lock updating.
5189 */
5190static void
5191nfsrv_unlocklf(struct nfslockfile *lfp)
5192{
5193
5194 nfsv4_unlock(&lfp->lf_locallock_lck, 0);
5195}
5196
5197/*
5198 * Clear out all state for the NFSv4 server.
5199 * Must be called by a thread that can sleep when no nfsds are running.
5200 */
5201void
5202nfsrv_throwawayallstate(NFSPROC_T *p)
5203{
5204 struct nfsclient *clp, *nclp;
5205 struct nfslockfile *lfp, *nlfp;
5206 int i;
5207
5208 /*
5209 * For each client, clean out the state and then free the structure.
5210 */
5211 for (i = 0; i < NFSCLIENTHASHSIZE; i++) {
5212 LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, nclp) {
5213 nfsrv_cleanclient(clp, p);
5214 nfsrv_freedeleglist(&clp->lc_deleg);
5215 nfsrv_freedeleglist(&clp->lc_olddeleg);
5216 free(clp, M_NFSDCLIENT);
5217 }
5218 }
5219
5220 /*
5221 * Also, free up any remaining lock file structures.
5222 */
5223 for (i = 0; i < NFSLOCKHASHSIZE; i++) {
5224 LIST_FOREACH_SAFE(lfp, &nfslockhash[i], lf_hash, nlfp) {
5225 printf("nfsd unload: fnd a lock file struct\n");
5226 nfsrv_freenfslockfile(lfp);
5227 }
5228 }
5229}
5230