lockd_lock.c revision 87096
1/*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2/*	$FreeBSD: head/usr.sbin/rpc.lockd/lockd_lock.c 87096 2001-11-29 17:36:45Z alfred $ */
3
4/*
5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6 * Copyright (c) 2000 Manuel Bouyer.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38#define LOCKD_DEBUG
39
40#include <stdio.h>
41#ifdef LOCKD_DEBUG
42#include <stdarg.h>
43#endif
44#include <stdlib.h>
45#include <unistd.h>
46#include <fcntl.h>
47#include <syslog.h>
48#include <errno.h>
49#include <string.h>
50#include <signal.h>
51#include <rpc/rpc.h>
52#include <sys/types.h>
53#include <sys/stat.h>
54#include <sys/socket.h>
55#include <sys/param.h>
56#include <sys/mount.h>
57#include <sys/wait.h>
58#include <rpcsvc/sm_inter.h>
59#include <rpcsvc/nlm_prot.h>
60#include "lockd_lock.h"
61#include "lockd.h"
62
63#define MAXOBJECTSIZE 64
64#define MAXBUFFERSIZE 1024
65
66/*
67 * SM_MAXSTRLEN is usually 1024.  This means that lock requests and
68 * host name monitoring entries are *MUCH* larger than they should be
69 */
70
71/*
72 * A set of utilities for managing file locking
73 *
74 * XXX: All locks are in a linked list, a better structure should be used
75 * to improve search/access effeciency.
76 */
77
78/* struct describing a lock */
79struct file_lock {
80	LIST_ENTRY(file_lock) nfslocklist;
81	fhandle_t filehandle; /* NFS filehandle */
82	struct sockaddr *addr;
83	struct nlm4_holder client; /* lock holder */
84	/* XXX: client_cookie used *only* in send_granted */
85	netobj client_cookie; /* cookie sent by the client */
86	char client_name[SM_MAXSTRLEN];
87	int nsm_status; /* status from the remote lock manager */
88	int status; /* lock status, see below */
89	int flags; /* lock flags, see lockd_lock.h */
90	int blocking; /* blocking lock or not */
91	pid_t locker; /* pid of the child process trying to get the lock */
92	int fd;	/* file descriptor for this lock */
93};
94
95LIST_HEAD(nfslocklist_head, file_lock);
96struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
97
98LIST_HEAD(blockedlocklist_head, file_lock);
99struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
100
101/* lock status */
102#define LKST_LOCKED	1 /* lock is locked */
103/* XXX: Is this flag file specific or lock specific? */
104#define LKST_WAITING	2 /* file is already locked by another host */
105#define LKST_PROCESSING	3 /* child is trying to aquire the lock */
106#define LKST_DYING	4 /* must dies when we get news from the child */
107
108/* struct describing a monitored host */
109struct host {
110	LIST_ENTRY(host) hostlst;
111	char name[SM_MAXSTRLEN];
112	int refcnt;
113};
114/* list of hosts we monitor */
115LIST_HEAD(hostlst_head, host);
116struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
117
118/*
119 * File monitoring handlers
120 * XXX: These might be able to be removed when kevent support
121 * is placed into the hardware lock/unlock routines.  (ie.
122 * let the kernel do all the file monitoring)
123 */
124
125/* Struct describing a monitored file */
126struct monfile {
127	LIST_ENTRY(monfile) monfilelist;
128	fhandle_t filehandle; /* Local access filehandle */
129	int fd; /* file descriptor: remains open until unlock! */
130	int refcount;
131	int exclusive;
132};
133
134/* List of files we monitor */
135LIST_HEAD(monfilelist_head, monfile);
136struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
137
138static int debugdelay = 0;
139
140enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
141		      NFS_DENIED, NFS_DENIED_NOLOCK,
142		      NFS_RESERR };
143
144enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
145		     HW_DENIED, HW_DENIED_NOLOCK,
146		     HW_STALEFH, HW_READONLY, HW_RESERR };
147
148enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
149			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
150			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
151
152enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
153enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
154/* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
155enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
156
157enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
158
159void send_granted(struct file_lock *fl, int opcode);
160void siglock(void);
161void sigunlock(void);
162void monitor_lock_host(const char *hostname);
163void unmonitor_lock_host(const char *hostname);
164
165void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
166    const bool_t exclusive, struct nlm4_holder *dest);
167void	deallocate_file_lock(struct file_lock *fl);
168int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
169    const u_int64_t start2, const u_int64_t len2);;
170enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
171    const u_int64_t startu, const u_int64_t lenu,
172    u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
173int	same_netobj(const netobj *n0, const netobj *n1);
174int	same_filelock_identity(const struct file_lock *fl0,
175    const struct file_lock *fl2);
176
177static void debuglog(char const *fmt, ...);
178void dump_static_object(const unsigned char* object, const int sizeof_object,
179                        unsigned char* hbuff, const int sizeof_hbuff,
180                        unsigned char* cbuff, const int sizeof_cbuff);
181void dump_netobj(const struct netobj *nobj);
182void dump_filelock(const struct file_lock *fl);
183struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
184enum nfslock_status	test_nfslock(const struct file_lock *fl,
185    struct file_lock **conflicting_fl);
186enum nfslock_status	lock_nfslock(struct file_lock *fl);
187enum nfslock_status	delete_nfslock(struct file_lock *fl);
188enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
189    struct file_lock **released_lock, struct file_lock **left_lock,
190    struct file_lock **right_lock);
191enum hwlock_status lock_hwlock(struct file_lock *fl);
192enum split_status split_nfslock(const struct file_lock *exist_lock,
193    const struct file_lock *unlock_lock, struct file_lock **left_lock,
194    struct file_lock **right_lock);
195void	add_blockingfilelock(struct file_lock *fl);
196enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
197enum hwlock_status	test_hwlock(const struct file_lock *fl,
198    struct file_lock **conflicting_fl);
199void	remove_blockingfilelock(struct file_lock *fl);
200void	clear_blockingfilelock(const char *hostname);
201void	retry_blockingfilelocklist(void);
202enum partialfilelock_status	unlock_partialfilelock(
203    const struct file_lock *fl);
204void	clear_partialfilelock(const char *hostname);
205enum partialfilelock_status	test_partialfilelock(
206    const struct file_lock *fl, struct file_lock **conflicting_fl);
207enum nlm_stats	do_test(struct file_lock *fl,
208    struct file_lock **conflicting_fl);
209enum nlm_stats	do_unlock(struct file_lock *fl);
210enum nlm_stats	do_lock(struct file_lock *fl);
211void	do_clear(const char *hostname);
212
213
214void
215debuglog(char const *fmt, ...)
216{
217	va_list ap;
218
219	if (debug_level < 1) {
220		return;
221	}
222
223	sleep(debugdelay);
224
225	va_start(ap, fmt);
226	vsyslog(LOG_DEBUG, fmt, ap);
227	va_end(ap);
228}
229
230void
231dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
232	const unsigned char *object;
233	const int size_object;
234	unsigned char *hbuff;
235	const int size_hbuff;
236	unsigned char *cbuff;
237	const int size_cbuff;
238{
239	int i, objectsize;
240
241	if (debug_level < 2) {
242		return;
243	}
244
245	objectsize = size_object;
246
247	if (objectsize == 0) {
248		debuglog("object is size 0\n");
249	} else {
250		if (objectsize > MAXOBJECTSIZE) {
251			debuglog("Object of size %d being clamped"
252			    "to size %d\n", objectsize, MAXOBJECTSIZE);
253			objectsize = MAXOBJECTSIZE;
254		}
255
256		if (hbuff != NULL) {
257			if (size_hbuff < objectsize*2+1) {
258				debuglog("Hbuff not large enough."
259				    "  Increase size\n");
260			} else {
261				for(i=0;i<objectsize;i++) {
262					sprintf(hbuff+i*2,"%02x",*(object+i));
263				}
264				*(hbuff+i*2) = '\0';
265			}
266		}
267
268		if (cbuff != NULL) {
269			if (size_cbuff < objectsize+1) {
270				debuglog("Cbuff not large enough."
271				    "  Increase Size\n");
272			}
273
274			for(i=0;i<objectsize;i++) {
275				if (*(object+i) >= 32 && *(object+i) <= 127) {
276					*(cbuff+i) = *(object+i);
277				} else {
278					*(cbuff+i) = '.';
279				}
280			}
281			*(cbuff+i) = '\0';
282		}
283	}
284}
285
286void
287dump_netobj(const struct netobj *nobj)
288{
289	char hbuff[MAXBUFFERSIZE*2];
290	char cbuff[MAXBUFFERSIZE];
291
292	if (debug_level < 2) {
293		return;
294	}
295
296	if (nobj == NULL) {
297		debuglog("Null netobj pointer\n");
298	}
299	else if (nobj->n_len == 0) {
300		debuglog("Size zero netobj\n");
301	} else {
302		dump_static_object(nobj->n_bytes, nobj->n_len,
303		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
304		debuglog("netobj: len: %d  data: %s :::  %s\n",
305		    nobj->n_len, hbuff, cbuff);
306	}
307}
308
309void
310dump_filelock(const struct file_lock *fl)
311{
312	char hbuff[MAXBUFFERSIZE*2];
313	char cbuff[MAXBUFFERSIZE];
314
315	if (debug_level < 2) {
316		return;
317	}
318
319	if (fl != NULL) {
320		debuglog("Dumping file lock structure @ %p\n", fl);
321
322		/*
323		dump_static_object((unsigned char *)&fl->filehandle,
324		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
325		    cbuff, sizeof(cbuff));
326		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
327		*/
328
329		debuglog("Dumping nlm4_holder:\n"
330		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
331		    fl->client.exclusive, fl->client.svid,
332		    fl->client.l_offset, fl->client.l_len);
333
334		/*
335		debuglog("Dumping client identity:\n");
336		dump_netobj(&fl->client.oh);
337
338		debuglog("Dumping client cookie:\n");
339		dump_netobj(&fl->client_cookie);
340
341		debuglog("nsm: %d  status: %d  flags: %d  locker: %d"
342		    "  fd:  %d\n", fl->nsm_status, fl->status,
343		    fl->flags, fl->locker, fl->fd);
344		*/
345	} else {
346		debuglog("NULL file lock structure\n");
347	}
348}
349
350void
351copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
352	const struct nlm4_lock *src;
353	const bool_t exclusive;
354	struct nlm4_holder *dest;
355{
356
357	dest->exclusive = exclusive;
358	dest->oh.n_len = src->oh.n_len;
359	dest->oh.n_bytes = src->oh.n_bytes;
360	dest->svid = src->svid;
361	dest->l_offset = src->l_offset;
362	dest->l_len = src->l_len;
363}
364
365
366/*
367 * allocate_file_lock: Create a lock with the given parameters
368 */
369
370struct file_lock *
371allocate_file_lock(const netobj *lockowner, const netobj *matchcookie)
372{
373	struct file_lock *newfl;
374
375	newfl = malloc(sizeof(struct file_lock));
376	if (newfl == NULL) {
377		return NULL;
378	}
379	bzero(newfl, sizeof(newfl));
380
381	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
382	if (newfl->client.oh.n_bytes == NULL) {
383		free(newfl);
384		return NULL;
385	}
386	newfl->client.oh.n_len = lockowner->n_len;
387	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
388
389	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
390	if (newfl->client_cookie.n_bytes == NULL) {
391		free(newfl->client.oh.n_bytes);
392		free(newfl);
393		return NULL;
394	}
395	newfl->client_cookie.n_len = matchcookie->n_len;
396	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
397
398	return newfl;
399}
400
401/*
402 * file_file_lock: Force creation of a valid file lock
403 */
404void
405fill_file_lock(struct file_lock *fl, const fhandle_t *fh, struct sockaddr *addr,
406    const bool_t exclusive, const int32_t svid, const u_int64_t offset, const u_int64_t len,
407    const char* caller_name, const int state, const int status, const int flags, const int blocking)
408{
409	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
410	fl->addr = addr;
411
412	fl->client.exclusive = exclusive;
413	fl->client.svid = svid;
414	fl->client.l_offset = offset;
415	fl->client.l_len = len;
416
417	strncpy(fl->client_name, caller_name, SM_MAXSTRLEN);
418
419	fl->nsm_status = state;
420	fl->status = status;
421	fl->flags = flags;
422	fl->blocking = blocking;
423}
424
425/*
426 * deallocate_file_lock: Free all storage associated with a file lock
427 */
428void
429deallocate_file_lock(struct file_lock *fl)
430{
431	free(fl->client.oh.n_bytes);
432	free(fl->client_cookie.n_bytes);
433	free(fl);
434}
435
436/*
437 * regions_overlap(): This function examines the two provided regions for
438 * overlap.
439 */
440int
441regions_overlap(start1, len1, start2, len2)
442	const u_int64_t start1, len1, start2, len2;
443{
444	u_int64_t d1,d2,d3,d4;
445	enum split_status result;
446
447	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
448		 start1, len1, start2, len2);
449
450	result = region_compare(start1, len1, start2, len2,
451	    &d1, &d2, &d3, &d4);
452
453	debuglog("Exiting region overlap with val: %d\n",result);
454
455	if (result == SPL_DISJOINT) {
456		return 0;
457	} else {
458		return 1;
459	}
460
461	return (result);
462}
463
464/*
465 * region_compare(): Examine lock regions and split appropriately
466 *
467 * XXX: Fix 64 bit overflow problems
468 * XXX: Check to make sure I got *ALL* the cases.
469 * XXX: This DESPERATELY needs a regression test.
470 */
471enum split_status
472region_compare(starte, lene, startu, lenu,
473    start1, len1, start2, len2)
474	const u_int64_t starte, lene, startu, lenu;
475	u_int64_t *start1, *len1, *start2, *len2;
476{
477	/*
478	 * Please pay attention to the sequential exclusions
479	 * of the if statements!!!
480	 */
481	enum LFLAGS lflags;
482	enum RFLAGS rflags;
483	enum split_status retval;
484
485	retval = SPL_DISJOINT;
486
487	if (lene == 0 && lenu == 0) {
488		/* Examine left edge of locker */
489		if (startu < starte) {
490			lflags = LEDGE_LEFT;
491		} else if (startu == starte) {
492			lflags = LEDGE_LBOUNDARY;
493		} else {
494			lflags = LEDGE_INSIDE;
495		}
496
497		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
498
499		if (lflags == LEDGE_INSIDE) {
500			*start1 = starte;
501			*len1 = startu - starte;
502		}
503
504		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
505			retval = SPL_CONTAINED;
506		} else {
507			retval = SPL_LOCK1;
508		}
509	} else if (lene == 0 && lenu != 0) {
510		/* Established lock is infinite */
511		/* Examine left edge of unlocker */
512		if (startu < starte) {
513			lflags = LEDGE_LEFT;
514		} else if (startu == starte) {
515			lflags = LEDGE_LBOUNDARY;
516		} else if (startu > starte) {
517			lflags = LEDGE_INSIDE;
518		}
519
520		/* Examine right edge of unlocker */
521		if (startu + lenu < starte) {
522			/* Right edge of unlocker left of established lock */
523			rflags = REDGE_LEFT;
524			return SPL_DISJOINT;
525		} else if (startu + lenu == starte) {
526			/* Right edge of unlocker on start of established lock */
527			rflags = REDGE_LBOUNDARY;
528			return SPL_DISJOINT;
529		} else { /* Infinifty is right of finity */
530			/* Right edge of unlocker inside established lock */
531			rflags = REDGE_INSIDE;
532		}
533
534		if (lflags == LEDGE_INSIDE) {
535			*start1 = starte;
536			*len1 = startu - starte;
537			retval |= SPL_LOCK1;
538		}
539
540		if (rflags == REDGE_INSIDE) {
541			/* Create right lock */
542			*start2 = startu+lenu;
543			*len2 = 0;
544			retval |= SPL_LOCK2;
545		}
546	} else if (lene != 0 && lenu == 0) {
547		/* Unlocker is infinite */
548		/* Examine left edge of unlocker */
549		if (startu < starte) {
550			lflags = LEDGE_LEFT;
551			retval = SPL_CONTAINED;
552			return retval;
553		} else if (startu == starte) {
554			lflags = LEDGE_LBOUNDARY;
555			retval = SPL_CONTAINED;
556			return retval;
557		} else if ((startu > starte) && (startu < starte + lene - 1)) {
558			lflags = LEDGE_INSIDE;
559		} else if (startu == starte + lene - 1) {
560			lflags = LEDGE_RBOUNDARY;
561		} else { /* startu > starte + lene -1 */
562			lflags = LEDGE_RIGHT;
563			return SPL_DISJOINT;
564		}
565
566		rflags = REDGE_RIGHT; /* Infinity is right of finity */
567
568		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
569			*start1 = starte;
570			*len1 = startu - starte;
571			retval |= SPL_LOCK1;
572			return retval;
573		}
574
575	} else {
576		/* Both locks are finite */
577
578		/* Examine left edge of unlocker */
579		if (startu < starte) {
580			lflags = LEDGE_LEFT;
581		} else if (startu == starte) {
582			lflags = LEDGE_LBOUNDARY;
583		} else if ((startu > starte) && (startu < starte + lene - 1)) {
584			lflags = LEDGE_INSIDE;
585		} else if (startu == starte + lene - 1) {
586			lflags = LEDGE_RBOUNDARY;
587		} else { /* startu > starte + lene -1 */
588			lflags = LEDGE_RIGHT;
589			return SPL_DISJOINT;
590		}
591
592		/* Examine right edge of unlocker */
593		if (startu + lenu < starte) {
594			/* Right edge of unlocker left of established lock */
595			rflags = REDGE_LEFT;
596			return SPL_DISJOINT;
597		} else if (startu + lenu == starte) {
598			/* Right edge of unlocker on start of established lock */
599			rflags = REDGE_LBOUNDARY;
600			return SPL_DISJOINT;
601		} else if (startu + lenu < starte + lene) {
602			/* Right edge of unlocker inside established lock */
603			rflags = REDGE_INSIDE;
604		} else if (startu + lenu == starte + lene) {
605			/* Right edge of unlocker on right edge of established lock */
606			rflags = REDGE_RBOUNDARY;
607		} else { /* startu + lenu > starte + lene */
608			/* Right edge of unlocker is right of established lock */
609			rflags = REDGE_RIGHT;
610		}
611
612		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
613			/* Create left lock */
614			*start1 = starte;
615			*len1 = (startu - starte);
616			retval |= SPL_LOCK1;
617		}
618
619		if (rflags == REDGE_INSIDE) {
620			/* Create right lock */
621			*start2 = startu+lenu;
622			*len2 = starte+lene-(startu+lenu);
623			retval |= SPL_LOCK2;
624		}
625
626		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
627		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
628			retval = SPL_CONTAINED;
629		}
630	}
631
632	return retval;
633}
634
635/*
636 * same_netobj: Compares the apprpriate bits of a netobj for identity
637 */
638int
639same_netobj(const netobj *n0, const netobj *n1)
640{
641	int retval;
642
643	retval = 0;
644
645	debuglog("Entering netobj identity check\n");
646
647	if (n0->n_len == n1->n_len) {
648		debuglog("Preliminary length check passed\n");
649		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
650		debuglog("netobj %smatch\n", retval ? "" : "mis");
651	}
652
653	return (retval);
654}
655
656/*
657 * same_filelock_identity: Compares the appropriate bits of a file_lock
658 */
659int
660same_filelock_identity(fl0, fl1)
661	const struct file_lock *fl0, *fl1;
662{
663	int retval;
664
665	retval = 0;
666
667	debuglog("Checking filelock identity\n");
668
669	/*
670	 * Check process ids and host information.
671	 */
672	retval = (fl0->client.svid == fl1->client.svid &&
673	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
674
675	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
676
677	return (retval);
678}
679
680/*
681 * Below here are routines associated with manipulating the NFS
682 * lock list.
683 */
684
685/*
686 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
687 *                           or NULL otehrwise
688 * XXX: It is a shame that this duplicates so much code from test_nfslock.
689 */
690struct file_lock *
691get_lock_matching_unlock(const struct file_lock *fl)
692{
693	struct file_lock *ifl; /* Iterator */
694
695	debuglog("Entering lock_matching_unlock\n");
696	debuglog("********Dump of fl*****************\n");
697	dump_filelock(fl);
698
699	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
700		debuglog("Pointer to file lock: %p\n",ifl);
701
702		debuglog("****Dump of ifl****\n");
703		dump_filelock(ifl);
704		debuglog("*******************\n");
705
706		/*
707		 * XXX: It is conceivable that someone could use the NLM RPC
708		 * system to directly access filehandles.  This may be a
709		 * security hazard as the filehandle code may bypass normal
710		 * file access controls
711		 */
712		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
713			continue;
714
715		debuglog("matching_unlock: Filehandles match, "
716		    "checking regions\n");
717
718		/* Filehandles match, check for region overlap */
719		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
720			ifl->client.l_offset, ifl->client.l_len))
721			continue;
722
723		debuglog("matching_unlock: Region overlap"
724		    " found %llu : %llu -- %llu : %llu\n",
725		    fl->client.l_offset,fl->client.l_len,
726		    ifl->client.l_offset,ifl->client.l_len);
727
728		/* Regions overlap, check the identity */
729		if (!same_filelock_identity(fl,ifl))
730			continue;
731
732		debuglog("matching_unlock: Duplicate lock id.  Granting\n");
733		return (ifl);
734	}
735
736	debuglog("Exiting lock_matching_unlock\n");
737
738	return (NULL);
739}
740
741/*
742 * test_nfslock: check for NFS lock in lock list
743 *
744 * This routine makes the following assumptions:
745 *    1) Nothing will adjust the lock list during a lookup
746 *
747 * This routine has an intersting quirk which bit me hard.
748 * The conflicting_fl is the pointer to the conflicting lock.
749 * However, to modify the "*pointer* to the conflicting lock" rather
750 * that the "conflicting lock itself" one must pass in a "pointer to
751 * the pointer of the conflicting lock".  Gross.
752 */
753
754enum nfslock_status
755test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
756{
757	struct file_lock *ifl; /* Iterator */
758	enum nfslock_status retval;
759
760	debuglog("Entering test_nfslock\n");
761
762	retval = NFS_GRANTED;
763	(*conflicting_fl) = NULL;
764
765	debuglog("Entering lock search loop\n");
766
767	debuglog("***********************************\n");
768	debuglog("Dumping match filelock\n");
769	debuglog("***********************************\n");
770	dump_filelock(fl);
771	debuglog("***********************************\n");
772
773	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
774		if (retval == NFS_DENIED)
775			break;
776
777		debuglog("Top of lock loop\n");
778		debuglog("Pointer to file lock: %p\n",ifl);
779
780		debuglog("***********************************\n");
781		debuglog("Dumping test filelock\n");
782		debuglog("***********************************\n");
783		dump_filelock(ifl);
784		debuglog("***********************************\n");
785
786		/*
787		 * XXX: It is conceivable that someone could use the NLM RPC
788		 * system to directly access filehandles.  This may be a
789		 * security hazard as the filehandle code may bypass normal
790		 * file access controls
791		 */
792		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
793			continue;
794
795		debuglog("test_nfslock: filehandle match found\n");
796
797		/* Filehandles match, check for region overlap */
798		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
799			ifl->client.l_offset, ifl->client.l_len))
800			continue;
801
802		debuglog("test_nfslock: Region overlap found"
803		    " %llu : %llu -- %llu : %llu\n",
804		    fl->client.l_offset,fl->client.l_len,
805		    ifl->client.l_offset,ifl->client.l_len);
806
807		/* Regions overlap, check the exclusivity */
808		if (!(fl->client.exclusive || ifl->client.exclusive))
809			continue;
810
811		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
812		    fl->client.exclusive,
813		    ifl->client.exclusive);
814
815		if (same_filelock_identity(fl,ifl)) {
816			debuglog("test_nfslock: Duplicate id.  Granting\n");
817			(*conflicting_fl) = ifl;
818			retval = NFS_GRANTED_DUPLICATE;
819		} else {
820			/* locking attempt fails */
821			debuglog("test_nfslock: Lock attempt failed\n");
822			debuglog("Desired lock\n");
823			dump_filelock(fl);
824			debuglog("Conflicting lock\n");
825			dump_filelock(ifl);
826			(*conflicting_fl) = ifl;
827			retval = NFS_DENIED;
828		}
829	}
830
831	debuglog("Dumping file locks\n");
832	debuglog("Exiting test_nfslock\n");
833
834	return (retval);
835}
836
837/*
838 * lock_nfslock: attempt to create a lock in the NFS lock list
839 *
840 * This routine tests whether the lock will be granted and then adds
841 * the entry to the lock list if so.
842 *
843 * Argument fl gets modified as its list housekeeping entries get modified
844 * upon insertion into the NFS lock list
845 *
846 * This routine makes several assumptions:
847 *    1) It is perfectly happy to grant a duplicate lock from the same pid.
848 *       While this seems to be intuitively wrong, it is required for proper
849 *       Posix semantics during unlock.  It is absolutely imperative to not
850 *       unlock the main lock before the two child locks are established. Thus,
851 *       one has be be able to create duplicate locks over an existing lock
852 *    2) It currently accepts duplicate locks from the same id,pid
853 */
854
855enum nfslock_status
856lock_nfslock(struct file_lock *fl)
857{
858	enum nfslock_status retval;
859	struct file_lock *dummy_fl;
860
861	dummy_fl = NULL;
862
863	debuglog("Entering lock_nfslock...\n");
864
865	retval = test_nfslock(fl,&dummy_fl);
866
867	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
868		debuglog("Inserting lock...\n");
869		dump_filelock(fl);
870		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
871	}
872
873	debuglog("Exiting lock_nfslock...\n");
874
875	return (retval);
876}
877
878/*
879 * delete_nfslock: delete an NFS lock list entry
880 *
881 * This routine is used to delete a lock out of the NFS lock list
882 * without regard to status, underlying locks, regions or anything else
883 *
884 * Note that this routine *does not deallocate memory* of the lock.
885 * It just disconnects it from the list.  The lock can then be used
886 * by other routines without fear of trashing the list.
887 */
888
889enum nfslock_status
890delete_nfslock(struct file_lock *fl)
891{
892
893	LIST_REMOVE(fl, nfslocklist);
894
895	return (NFS_GRANTED);
896}
897
898enum split_status
899split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
900	const struct file_lock *exist_lock, *unlock_lock;
901	struct file_lock **left_lock, **right_lock;
902{
903	u_int64_t start1, len1, start2, len2;
904	enum split_status spstatus;
905
906	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
907	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
908	    &start1, &len1, &start2, &len2);
909
910	if ((spstatus & SPL_LOCK1) != 0) {
911		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
912		if (*left_lock == NULL) {
913			debuglog("Unable to allocate resource for split 1\n");
914			return SPL_RESERR;
915		}
916
917		fill_file_lock(*left_lock, &exist_lock->filehandle, exist_lock->addr,
918		    exist_lock->client.exclusive, exist_lock->client.svid,
919		    start1, len1,
920		    exist_lock->client_name, exist_lock->nsm_status,
921		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
922	}
923
924	if ((spstatus & SPL_LOCK2) != 0) {
925		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
926		if (*right_lock == NULL) {
927			debuglog("Unable to allocate resource for split 1\n");
928			if (*left_lock != NULL) {
929				deallocate_file_lock(*left_lock);
930			}
931			return SPL_RESERR;
932		}
933
934		fill_file_lock(*right_lock, &exist_lock->filehandle, exist_lock->addr,
935		    exist_lock->client.exclusive, exist_lock->client.svid,
936		    start2, len2,
937		    exist_lock->client_name, exist_lock->nsm_status,
938		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
939	}
940
941	return spstatus;
942}
943
944enum nfslock_status
945unlock_nfslock(fl, released_lock, left_lock, right_lock)
946	const struct file_lock *fl;
947	struct file_lock **released_lock;
948	struct file_lock **left_lock;
949	struct file_lock **right_lock;
950{
951	struct file_lock *mfl; /* Matching file lock */
952	enum nfslock_status retval;
953	enum split_status spstatus;
954
955	debuglog("Entering unlock_nfslock\n");
956
957	*released_lock = NULL;
958	*left_lock = NULL;
959	*right_lock = NULL;
960
961	retval = NFS_DENIED_NOLOCK;
962
963	printf("Attempting to match lock...\n");
964	mfl = get_lock_matching_unlock(fl);
965
966	if (mfl != NULL) {
967		debuglog("Unlock matched.  Querying for split\n");
968
969		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
970
971		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
972		debuglog("********Split dumps********");
973		dump_filelock(mfl);
974		dump_filelock(fl);
975		dump_filelock(*left_lock);
976		dump_filelock(*right_lock);
977		debuglog("********End Split dumps********");
978
979		if (spstatus == SPL_RESERR) {
980			if (*left_lock != NULL) {
981				deallocate_file_lock(*left_lock);
982				*left_lock = NULL;
983			}
984
985			if (*right_lock != NULL) {
986				deallocate_file_lock(*right_lock);
987				*right_lock = NULL;
988			}
989
990			return NFS_RESERR;
991		}
992
993		/* Insert new locks from split if required */
994		if (*left_lock != NULL) {
995			debuglog("Split left activated\n");
996			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
997		}
998
999		if (*right_lock != NULL) {
1000			debuglog("Split right activated\n");
1001			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1002		}
1003
1004		/* Unlock the lock since it matches identity */
1005		LIST_REMOVE(mfl, nfslocklist);
1006		*released_lock = mfl;
1007		retval = NFS_GRANTED;
1008	}
1009
1010	debuglog("Exiting unlock_nfslock\n");
1011
1012	return retval;
1013}
1014
1015/*
1016 * Below here are the routines for manipulating the file lock directly
1017 * on the disk hardware itself
1018 */
1019enum hwlock_status
1020lock_hwlock(struct file_lock *fl)
1021{
1022	struct monfile *imf,*nmf;
1023	int lflags, flerror;
1024
1025	/* Scan to see if filehandle already present */
1026	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1027		if (bcmp(&fl->filehandle, &imf->filehandle,
1028			sizeof(fl->filehandle)) == 0) {
1029			/* imf is the correct filehandle */
1030			break;
1031		}
1032	}
1033
1034	/*
1035	 * Filehandle already exists (we control the file)
1036	 * *AND* NFS has already cleared the lock for availability
1037	 * Grant it and bump the refcount.
1038	 */
1039	if (imf != NULL) {
1040		++(imf->refcount);
1041		return (HW_GRANTED);
1042	}
1043
1044	/* No filehandle found, create and go */
1045	nmf = malloc(sizeof(struct monfile));
1046	if (nmf == NULL) {
1047		debuglog("hwlock resource allocation failure\n");
1048		return (HW_RESERR);
1049	}
1050
1051	/* XXX: Is O_RDWR always the correct mode? */
1052	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1053	if (nmf->fd < 0) {
1054		debuglog("fhopen failed (from %16s): %32s\n",
1055		    fl->client_name, strerror(errno));
1056		free(nmf);
1057		switch (errno) {
1058		case ESTALE:
1059			return (HW_STALEFH);
1060		case EROFS:
1061			return (HW_READONLY);
1062		default:
1063			return (HW_RESERR);
1064		}
1065	}
1066
1067	/* File opened correctly, fill the monitor struct */
1068	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1069	nmf->refcount = 1;
1070	nmf->exclusive = fl->client.exclusive;
1071
1072	lflags = (nmf->exclusive == 1) ?
1073	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1074
1075	flerror = flock(nmf->fd, lflags);
1076
1077	if (flerror != 0) {
1078		debuglog("flock failed (from %16s): %32s\n",
1079		    fl->client_name, strerror(errno));
1080		close(nmf->fd);
1081		free(nmf);
1082		switch (errno) {
1083		case EAGAIN:
1084			return (HW_DENIED);
1085		case ESTALE:
1086			return (HW_STALEFH);
1087		case EROFS:
1088			return (HW_READONLY);
1089		default:
1090			return (HW_RESERR);
1091			break;
1092		}
1093	}
1094
1095	/* File opened and locked */
1096	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1097
1098	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1099	return (HW_GRANTED);
1100}
1101
1102enum hwlock_status
1103unlock_hwlock(const struct file_lock *fl)
1104{
1105	struct monfile *imf;
1106
1107	debuglog("Entering unlock_hwlock\n");
1108	debuglog("Entering loop interation\n");
1109
1110	/* Scan to see if filehandle already present */
1111	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1112		if (bcmp(&fl->filehandle, &imf->filehandle,
1113			sizeof(fl->filehandle)) == 0) {
1114			/* imf is the correct filehandle */
1115			break;
1116		}
1117	}
1118
1119	debuglog("Completed iteration.  Proceeding\n");
1120
1121	if (imf == NULL) {
1122		/* No lock found */
1123		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1124		return (HW_DENIED_NOLOCK);
1125	}
1126
1127	/* Lock found */
1128	--imf->refcount;
1129
1130	if (imf->refcount < 0) {
1131		debuglog("Negative hardware reference count\n");
1132	}
1133
1134	if (imf->refcount <= 0) {
1135		close(imf->fd);
1136		LIST_REMOVE(imf, monfilelist);
1137		free(imf);
1138	}
1139	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1140	return (HW_GRANTED);
1141}
1142
1143enum hwlock_status
1144test_hwlock(const struct file_lock *fl, struct file_lock **conflicting_fl)
1145{
1146
1147	/*
1148	 * XXX: lock tests on hardware are not required until
1149	 * true partial file testing is done on the underlying file
1150	 */
1151	return (HW_RESERR);
1152}
1153
1154
1155
1156/*
1157 * Below here are routines for manipulating blocked lock requests
1158 * They should only be called from the XXX_partialfilelock routines
1159 * if at all possible
1160 */
1161
1162void
1163add_blockingfilelock(struct file_lock *fl)
1164{
1165
1166	debuglog("Entering add_blockingfilelock\n");
1167
1168	/*
1169	 * Clear the blocking flag so that it can be reused without
1170	 * adding it to the blocking queue a second time
1171	 */
1172
1173	fl->blocking = 0;
1174	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1175
1176	debuglog("Exiting add_blockingfilelock\n");
1177}
1178
1179void
1180remove_blockingfilelock(struct file_lock *fl)
1181{
1182
1183	debuglog("Entering remove_blockingfilelock\n");
1184
1185	LIST_REMOVE(fl, nfslocklist);
1186
1187	debuglog("Exiting remove_blockingfilelock\n");
1188}
1189
1190void
1191clear_blockingfilelock(const char *hostname)
1192{
1193	struct file_lock *ifl,*nfl;
1194
1195	/*
1196	 * Normally, LIST_FOREACH is called for, but since
1197	 * the current element *is* the iterator, deleting it
1198	 * would mess up the iteration.  Thus, a next element
1199	 * must be used explicitly
1200	 */
1201
1202	ifl = LIST_FIRST(&blockedlocklist_head);
1203
1204	while (ifl != NULL) {
1205		nfl = LIST_NEXT(ifl, nfslocklist);
1206
1207		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1208			remove_blockingfilelock(ifl);
1209			deallocate_file_lock(ifl);
1210		}
1211
1212		ifl = nfl;
1213	}
1214}
1215
1216void
1217retry_blockingfilelocklist(void)
1218{
1219	/* Retry all locks in the blocked list */
1220	struct file_lock *ifl, *nfl; /* Iterator */
1221	enum partialfilelock_status pflstatus;
1222
1223	debuglog("Entering retry_blockingfilelocklist\n");
1224
1225	ifl = LIST_FIRST(&blockedlocklist_head);
1226	debuglog("Iterator choice %p\n",ifl);
1227
1228	while (ifl != NULL) {
1229		/*
1230		 * SUBTLE BUG: The next element must be worked out before the
1231		 * current element has been moved
1232		 */
1233		nfl = LIST_NEXT(ifl, nfslocklist);
1234		debuglog("Iterator choice %p\n",ifl);
1235		debuglog("Next iterator choice %p\n",nfl);
1236
1237		/*
1238		 * SUBTLE BUG: The file_lock must be removed from the
1239		 * old list so that it's list pointers get disconnected
1240		 * before being allowed to participate in the new list
1241		 * which will automatically add it in if necessary.
1242		 */
1243
1244		LIST_REMOVE(ifl, nfslocklist);
1245		pflstatus = lock_partialfilelock(ifl);
1246
1247		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1248			debuglog("Granted blocked lock\n");
1249			/* lock granted and is now being used */
1250			send_granted(ifl,0);
1251		} else {
1252			/* Reinsert lock back into same place in blocked list */
1253			debuglog("Replacing blocked lock\n");
1254			LIST_INSERT_BEFORE(nfl, ifl, nfslocklist);
1255		}
1256
1257		/* Valid increment behavior regardless of state of ifl */
1258		ifl = nfl;
1259	}
1260
1261	debuglog("Exiting retry_blockingfilelocklist\n");
1262}
1263
1264/*
1265 * Below here are routines associated with manipulating all
1266 * aspects of the partial file locking system (list, hardware, etc.)
1267 */
1268
1269/*
1270 * Please note that lock monitoring must be done at this level which
1271 * keeps track of *individual* lock requests on lock and unlock
1272 *
1273 * XXX: Split unlocking is going to make the unlock code miserable
1274 */
1275
1276/*
1277 * lock_partialfilelock:
1278 *
1279 * Argument fl gets modified as its list housekeeping entries get modified
1280 * upon insertion into the NFS lock list
1281 *
1282 * This routine makes several assumptions:
1283 * 1) It (will) pass locks through to flock to lock the entire underlying file
1284 *     and then parcel out NFS locks if it gets control of the file.
1285 *         This matches the old rpc.lockd file semantics (except where it
1286 *         is now more correct).  It is the safe solution, but will cause
1287 *         overly restrictive blocking if someone is trying to use the
1288 *         underlying files without using NFS.  This appears to be an
1289 *         acceptable tradeoff since most people use standalone NFS servers.
1290 * XXX: The right solution is probably kevent combined with fcntl
1291 *
1292 *    2) Nothing modifies the lock lists between testing and granting
1293 *           I have no idea whether this is a useful assumption or not
1294 */
1295
1296enum partialfilelock_status
1297lock_partialfilelock(struct file_lock *fl)
1298{
1299	enum partialfilelock_status retval;
1300	enum nfslock_status lnlstatus;
1301	enum hwlock_status hwstatus;
1302
1303	debuglog("Entering lock_partialfilelock\n");
1304
1305	retval = PFL_DENIED;
1306
1307	/*
1308	 * Execute the NFS lock first, if possible, as it is significantly
1309	 * easier and less expensive to undo than the filesystem lock
1310	 */
1311
1312	lnlstatus = lock_nfslock(fl);
1313
1314	switch (lnlstatus) {
1315	case NFS_GRANTED:
1316	case NFS_GRANTED_DUPLICATE:
1317		/*
1318		 * At this point, the NFS lock is allocated and active.
1319		 * Remember to clean it up if the hardware lock fails
1320		 */
1321		hwstatus = lock_hwlock(fl);
1322
1323		switch (hwstatus) {
1324		case HW_GRANTED:
1325		case HW_GRANTED_DUPLICATE:
1326			debuglog("HW GRANTED\n");
1327			/*
1328			 * XXX: Fixme: Check hwstatus for duplicate when
1329			 * true partial file locking and accounting is
1330			 * done on the hardware
1331			 */
1332			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1333				retval = PFL_GRANTED_DUPLICATE;
1334			} else {
1335				retval = PFL_GRANTED;
1336			}
1337			monitor_lock_host(fl->client_name);
1338			break;
1339		case HW_RESERR:
1340			debuglog("HW RESERR\n");
1341			retval = PFL_HWRESERR;
1342			break;
1343		case HW_DENIED:
1344			debuglog("HW DENIED\n");
1345			retval = PFL_HWDENIED;
1346			break;
1347		default:
1348			debuglog("Unmatched hwstatus %d\n",hwstatus);
1349			break;
1350		}
1351
1352		if (retval != PFL_GRANTED &&
1353		    retval != PFL_GRANTED_DUPLICATE) {
1354			/* Clean up the NFS lock */
1355			debuglog("Deleting trial NFS lock\n");
1356			delete_nfslock(fl);
1357		}
1358		break;
1359	case NFS_DENIED:
1360		retval = PFL_NFSDENIED;
1361		break;
1362	case NFS_RESERR:
1363		retval = PFL_NFSRESERR;
1364	default:
1365		debuglog("Unmatched lnlstatus %d\n");
1366		retval = PFL_NFSDENIED_NOLOCK;
1367		break;
1368	}
1369
1370	/*
1371	 * By the time fl reaches here, it is completely free again on
1372	 * failure.  The NFS lock done before attempting the
1373	 * hardware lock has been backed out
1374	 */
1375
1376	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1377		/* Once last chance to check the lock */
1378		if (fl->blocking == 1) {
1379			/* Queue the lock */
1380			debuglog("BLOCKING LOCK RECEIVED\n");
1381			retval = (retval == PFL_NFSDENIED ?
1382			    PFL_NFSBLOCKED : PFL_HWBLOCKED);
1383			add_blockingfilelock(fl);
1384			dump_filelock(fl);
1385		} else {
1386			/* Leave retval alone, it's already correct */
1387			debuglog("Lock denied.  Non-blocking failure\n");
1388			dump_filelock(fl);
1389		}
1390	}
1391
1392	debuglog("Exiting lock_partialfilelock\n");
1393
1394	return retval;
1395}
1396
1397/*
1398 * unlock_partialfilelock:
1399 *
1400 * Given a file_lock, unlock all locks which match.
1401 *
1402 * Note that a given lock might have to unlock ITSELF!  See
1403 * clear_partialfilelock for example.
1404 */
1405
1406enum partialfilelock_status
1407unlock_partialfilelock(const struct file_lock *fl)
1408{
1409	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1410	enum partialfilelock_status retval;
1411	enum nfslock_status unlstatus;
1412	enum hwlock_status unlhwstatus, lhwstatus;
1413
1414	debuglog("Entering unlock_partialfilelock\n");
1415
1416	selffl = NULL;
1417	lfl = NULL;
1418	rfl = NULL;
1419	releasedfl = NULL;
1420	retval = PFL_DENIED;
1421
1422	/*
1423	 * There are significant overlap and atomicity issues
1424	 * with partially releasing a lock.  For example, releasing
1425	 * part of an NFS shared lock does *not* always release the
1426	 * corresponding part of the file since there is only one
1427	 * rpc.lockd UID but multiple users could be requesting it
1428	 * from NFS.  Also, an unlock request should never allow
1429	 * another process to gain a lock on the remaining parts.
1430	 * ie. Always apply the new locks before releasing the
1431	 * old one
1432	 */
1433
1434	/*
1435	 * Loop is required since multiple little locks
1436	 * can be allocated and then deallocated with one
1437	 * big unlock.
1438	 *
1439	 * The loop is required to be here so that the nfs &
1440	 * hw subsystems do not need to communicate with one
1441	 * one another
1442	 */
1443
1444	do {
1445		debuglog("Value of releasedfl: %p\n",releasedfl);
1446		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1447		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1448		debuglog("Value of releasedfl: %p\n",releasedfl);
1449
1450
1451		/* XXX: This is grungy.  It should be refactored to be cleaner */
1452		if (lfl != NULL) {
1453			lhwstatus = lock_hwlock(lfl);
1454			if (lhwstatus != HW_GRANTED &&
1455			    lhwstatus != HW_GRANTED_DUPLICATE) {
1456				debuglog("HW duplicate lock failure for left split\n");
1457			}
1458			monitor_lock_host(lfl->client_name);
1459		}
1460
1461		if (rfl != NULL) {
1462			lhwstatus = lock_hwlock(rfl);
1463			if (lhwstatus != HW_GRANTED &&
1464			    lhwstatus != HW_GRANTED_DUPLICATE) {
1465				debuglog("HW duplicate lock failure for right split\n");
1466			}
1467			monitor_lock_host(rfl->client_name);
1468		}
1469
1470		switch (unlstatus) {
1471		case NFS_GRANTED:
1472			/* Attempt to unlock on the hardware */
1473			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1474
1475			/* This call *MUST NOT* unlock the two newly allocated locks */
1476			unlhwstatus = unlock_hwlock(fl);
1477			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1478
1479			switch (unlhwstatus) {
1480			case HW_GRANTED:
1481				debuglog("HW unlock granted\n");
1482				unmonitor_lock_host(releasedfl->client_name);
1483				retval = PFL_GRANTED;
1484				break;
1485			case HW_DENIED_NOLOCK:
1486				/* Huh?!?!  This shouldn't happen */
1487				debuglog("HW unlock denied no lock\n");
1488				retval = PFL_HWRESERR;
1489				/* Break out of do-while */
1490				unlstatus = NFS_RESERR;
1491				break;
1492			default:
1493				debuglog("HW unlock failed\n");
1494				retval = PFL_HWRESERR;
1495				/* Break out of do-while */
1496				unlstatus = NFS_RESERR;
1497				break;
1498			}
1499
1500			debuglog("Exiting with status retval: %d\n",retval);
1501
1502			retry_blockingfilelocklist();
1503			break;
1504		case NFS_DENIED_NOLOCK:
1505			retval = PFL_GRANTED;
1506			debuglog("All locks cleaned out\n");
1507			break;
1508		default:
1509			retval = PFL_NFSRESERR;
1510			debuglog("NFS unlock failure\n");
1511			dump_filelock(fl);
1512			break;
1513		}
1514
1515		if (releasedfl != NULL) {
1516			if (fl == releasedfl) {
1517				/*
1518				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1519				 * but we can't deallocate the space yet.  This is what
1520				 * happens when you don't write malloc and free together
1521				 */
1522				debuglog("Attempt to unlock self\n");
1523				selffl = releasedfl;
1524			} else {
1525				/*
1526				 * XXX: this deallocation *still* needs to migrate closer
1527				 * to the allocation code way up in get_lock or the allocation
1528				 * code needs to migrate down (violation of "When you write
1529				 * malloc you must write free")
1530				 */
1531
1532				deallocate_file_lock(releasedfl);
1533			}
1534		}
1535
1536	} while (unlstatus == NFS_GRANTED);
1537
1538	if (selffl != NULL) {
1539		/*
1540		 * This statement wipes out the incoming file lock (fl)
1541		 * in spite of the fact that it is declared const
1542		 */
1543		debuglog("WARNING!  Destroying incoming lock pointer\n");
1544		deallocate_file_lock(selffl);
1545	}
1546
1547	debuglog("Exiting unlock_partialfilelock\n");
1548
1549	return retval;
1550}
1551
1552/*
1553 * clear_partialfilelock
1554 *
1555 * Normally called in response to statd state number change.
1556 * Wipe out all locks held by a host.  As a bonus, the act of
1557 * doing so should automatically clear their statd entries and
1558 * unmonitor the host.
1559 */
1560
1561void
1562clear_partialfilelock(const char *hostname)
1563{
1564	struct file_lock *ifl, *nfl;
1565
1566	/* Clear blocking file lock list */
1567	clear_blockingfilelock(hostname);
1568
1569	/* do all required unlocks */
1570	/* Note that unlock can smash the current pointer to a lock */
1571
1572	/*
1573	 * Normally, LIST_FOREACH is called for, but since
1574	 * the current element *is* the iterator, deleting it
1575	 * would mess up the iteration.  Thus, a next element
1576	 * must be used explicitly
1577	 */
1578
1579	ifl = LIST_FIRST(&nfslocklist_head);
1580
1581	while (ifl != NULL) {
1582		nfl = LIST_NEXT(ifl, nfslocklist);
1583
1584		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1585			/* Unlock destroys ifl out from underneath */
1586			unlock_partialfilelock(ifl);
1587			/* ifl is NO LONGER VALID AT THIS POINT */
1588		}
1589		ifl = nfl;
1590	}
1591}
1592
1593/*
1594 * test_partialfilelock:
1595 */
1596enum partialfilelock_status
1597test_partialfilelock(const struct file_lock *fl,
1598    struct file_lock **conflicting_fl)
1599{
1600	enum partialfilelock_status retval;
1601	enum nfslock_status teststatus;
1602
1603	debuglog("Entering testpartialfilelock...\n");
1604
1605	retval = PFL_DENIED;
1606
1607	teststatus = test_nfslock(fl, conflicting_fl);
1608	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1609
1610	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1611		/* XXX: Add the underlying filesystem locking code */
1612		retval = (teststatus == NFS_GRANTED) ?
1613		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1614		debuglog("Dumping locks...\n");
1615		dump_filelock(fl);
1616		dump_filelock(*conflicting_fl);
1617		debuglog("Done dumping locks...\n");
1618	} else {
1619		retval = PFL_NFSDENIED;
1620		debuglog("NFS test denied.\n");
1621		dump_filelock(fl);
1622		debuglog("Conflicting.\n");
1623		dump_filelock(*conflicting_fl);
1624	}
1625
1626	debuglog("Exiting testpartialfilelock...\n");
1627
1628	return retval;
1629}
1630
1631/*
1632 * Below here are routines associated with translating the partial file locking
1633 * codes into useful codes to send back to the NFS RPC messaging system
1634 */
1635
1636/*
1637 * These routines translate the (relatively) useful return codes back onto
1638 * the few return codes which the nlm subsystems wishes to trasmit
1639 */
1640
1641enum nlm_stats
1642do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1643{
1644	enum partialfilelock_status pfsret;
1645	enum nlm_stats retval;
1646
1647	debuglog("Entering do_test...\n");
1648
1649	pfsret = test_partialfilelock(fl,conflicting_fl);
1650
1651	switch (pfsret) {
1652	case PFL_GRANTED:
1653		debuglog("PFL test lock granted\n");
1654		dump_filelock(fl);
1655		dump_filelock(*conflicting_fl);
1656		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1657		break;
1658	case PFL_GRANTED_DUPLICATE:
1659		debuglog("PFL test lock granted--duplicate id detected\n");
1660		dump_filelock(fl);
1661		dump_filelock(*conflicting_fl);
1662		debuglog("Clearing conflicting_fl for call semantics\n");
1663		*conflicting_fl = NULL;
1664		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1665		break;
1666	case PFL_NFSDENIED:
1667	case PFL_HWDENIED:
1668		debuglog("PFL test lock denied\n");
1669		dump_filelock(fl);
1670		dump_filelock(*conflicting_fl);
1671		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1672		break;
1673	case PFL_NFSRESERR:
1674	case PFL_HWRESERR:
1675		debuglog("PFL test lock resource fail\n");
1676		dump_filelock(fl);
1677		dump_filelock(*conflicting_fl);
1678		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1679		break;
1680	default:
1681		debuglog("PFL test lock *FAILED*\n");
1682		dump_filelock(fl);
1683		dump_filelock(*conflicting_fl);
1684		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1685		break;
1686	}
1687
1688	debuglog("Exiting do_test...\n");
1689
1690	return retval;
1691}
1692
1693/*
1694 * do_lock: Try to acquire a lock
1695 *
1696 * This routine makes a distinction between NLM versions.  I am pretty
1697 * convinced that this should be abstracted out and bounced up a level
1698 */
1699
1700enum nlm_stats
1701do_lock(struct file_lock *fl)
1702{
1703	enum partialfilelock_status pfsret;
1704	enum nlm_stats retval;
1705
1706	debuglog("Entering do_lock...\n");
1707
1708	pfsret = lock_partialfilelock(fl);
1709
1710	switch (pfsret) {
1711	case PFL_GRANTED:
1712		debuglog("PFL lock granted");
1713		dump_filelock(fl);
1714		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1715		break;
1716	case PFL_GRANTED_DUPLICATE:
1717		debuglog("PFL lock granted--duplicate id detected");
1718		dump_filelock(fl);
1719		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1720		break;
1721	case PFL_NFSDENIED:
1722	case PFL_HWDENIED:
1723		debuglog("PFL_NFS lock denied");
1724		dump_filelock(fl);
1725		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1726		break;
1727	case PFL_NFSBLOCKED:
1728	case PFL_HWBLOCKED:
1729		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1730		dump_filelock(fl);
1731		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1732		break;
1733	case PFL_NFSRESERR:
1734	case PFL_HWRESERR:
1735		debuglog("PFL lock resource alocation fail\n");
1736		dump_filelock(fl);
1737		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1738		break;
1739	default:
1740		debuglog("PFL lock *FAILED*");
1741		dump_filelock(fl);
1742		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1743		break;
1744	}
1745
1746	debuglog("Exiting do_lock...\n");
1747
1748	return retval;
1749}
1750
1751enum nlm_stats
1752do_unlock(struct file_lock *fl)
1753{
1754	enum partialfilelock_status pfsret;
1755	enum nlm_stats retval;
1756
1757	debuglog("Entering do_unlock...\n");
1758	pfsret = unlock_partialfilelock(fl);
1759
1760	switch (pfsret) {
1761	case PFL_GRANTED:
1762		debuglog("PFL unlock granted");
1763		dump_filelock(fl);
1764		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1765		break;
1766	case PFL_NFSDENIED:
1767	case PFL_HWDENIED:
1768		debuglog("PFL_NFS unlock denied");
1769		dump_filelock(fl);
1770		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1771		break;
1772	case PFL_NFSDENIED_NOLOCK:
1773	case PFL_HWDENIED_NOLOCK:
1774		debuglog("PFL_NFS no lock found\n");
1775		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1776		break;
1777	case PFL_NFSRESERR:
1778	case PFL_HWRESERR:
1779		debuglog("PFL unlock resource failure");
1780		dump_filelock(fl);
1781		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1782		break;
1783	default:
1784		debuglog("PFL unlock *FAILED*");
1785		dump_filelock(fl);
1786		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1787		break;
1788	}
1789
1790	debuglog("Exiting do_unlock...\n");
1791
1792	return retval;
1793}
1794
1795/*
1796 * do_clear
1797 *
1798 * This routine is non-existent because it doesn't have a return code.
1799 * It is here for completeness in case someone *does* need to do return
1800 * codes later.  A decent compiler should optimize this away.
1801 */
1802
1803void
1804do_clear(const char *hostname)
1805{
1806
1807	clear_partialfilelock(hostname);
1808}
1809
1810/*
1811 * The following routines are all called from the code which the
1812 * RPC layer invokes
1813 */
1814
1815/*
1816 * testlock(): inform the caller if the requested lock would be granted
1817 *
1818 * returns NULL if lock would granted
1819 * returns pointer to a conflicting nlm4_holder if not
1820 */
1821
1822struct nlm4_holder *
1823testlock(struct nlm4_lock *lock, bool_t exclusive, int flags)
1824{
1825	struct file_lock test_fl, *conflicting_fl;
1826
1827	bzero(&test_fl, sizeof(test_fl));
1828
1829	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1830	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1831
1832	siglock();
1833	do_test(&test_fl, &conflicting_fl);
1834
1835	if (conflicting_fl == NULL) {
1836		debuglog("No conflicting lock found\n");
1837		sigunlock();
1838		return NULL;
1839	} else {
1840		debuglog("Found conflicting lock\n");
1841		dump_filelock(conflicting_fl);
1842		sigunlock();
1843		return (&conflicting_fl->client);
1844	}
1845}
1846
1847/*
1848 * getlock: try to aquire the lock.
1849 * If file is already locked and we can sleep, put the lock in the list with
1850 * status LKST_WAITING; it'll be processed later.
1851 * Otherwise try to lock. If we're allowed to block, fork a child which
1852 * will do the blocking lock.
1853 */
1854
1855enum nlm_stats
1856getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1857{
1858	struct file_lock *newfl;
1859	enum nlm_stats retval;
1860
1861	debuglog("Entering getlock...\n");
1862
1863	if (grace_expired == 0 && lckarg->reclaim == 0)
1864		return (flags & LOCK_V4) ?
1865		    nlm4_denied_grace_period : nlm_denied_grace_period;
1866
1867	/* allocate new file_lock for this request */
1868	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie);
1869	if (newfl == NULL) {
1870		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1871		/* failed */
1872		return (flags & LOCK_V4) ?
1873		    nlm4_denied_nolocks : nlm_denied_nolocks;
1874	}
1875
1876	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1877		debuglog("recieved fhandle size %d, local size %d",
1878		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1879	}
1880
1881	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1882	    (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf,
1883	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, lckarg->alock.l_len,
1884	    lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block);
1885
1886	/*
1887	 * newfl is now fully constructed and deallocate_file_lock
1888	 * can now be used to delete it
1889	 */
1890
1891	siglock();
1892	debuglog("Pointer to new lock is %p\n",newfl);
1893
1894	retval = do_lock(newfl);
1895
1896	debuglog("Pointer to new lock is %p\n",newfl);
1897	sigunlock();
1898
1899	switch (retval)
1900		{
1901		case nlm4_granted:
1902			/* case nlm_granted: is the same as nlm4_granted */
1903			/* do_mon(lckarg->alock.caller_name); */
1904			break;
1905		case nlm4_blocked:
1906			/* case nlm_blocked: is the same as nlm4_blocked */
1907			/* do_mon(lckarg->alock.caller_name); */
1908			break;
1909		default:
1910			deallocate_file_lock(newfl);
1911			break;
1912		}
1913
1914	debuglog("Exiting getlock...\n");
1915
1916	return retval;
1917}
1918
1919
1920/* unlock a filehandle */
1921enum nlm_stats
1922unlock(nlm4_lock *lock, const int flags)
1923{
1924	struct file_lock fl;
1925	enum nlm_stats err;
1926
1927	siglock();
1928
1929	debuglog("Entering unlock...\n");
1930
1931	bzero(&fl,sizeof(struct file_lock));
1932	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1933
1934	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1935
1936	err = do_unlock(&fl);
1937
1938	sigunlock();
1939
1940	debuglog("Exiting unlock...\n");
1941
1942	return err;
1943}
1944
1945/*
1946 * XXX: The following monitor/unmonitor routines
1947 * have not been extensively tested (ie. no regression
1948 * script exists like for the locking sections
1949 */
1950
1951/*
1952 * monitor_lock_host: monitor lock hosts locally with a ref count and
1953 * inform statd
1954 */
1955void
1956monitor_lock_host(const char *hostname)
1957{
1958	struct host *ihp, *nhp;
1959	struct mon smon;
1960	struct sm_stat_res sres;
1961	int rpcret, statflag;
1962
1963	rpcret = 0;
1964	statflag = 0;
1965
1966	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
1967		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
1968			/* Host is already monitored, bump refcount */
1969			++ihp->refcnt;
1970			/* Host should only be in the monitor list once */
1971			return;
1972		}
1973	}
1974
1975	/* Host is not yet monitored, add it */
1976	nhp = malloc(sizeof(struct host));
1977
1978	if (nhp == NULL) {
1979		debuglog("Unable to allocate entry for statd mon\n");
1980		return;
1981	}
1982
1983	/* Allocated new host entry, now fill the fields */
1984	strncpy(nhp->name, hostname, SM_MAXSTRLEN);
1985	nhp->refcnt = 1;
1986	debuglog("Locally Monitoring host %16s\n",hostname);
1987
1988	debuglog("Attempting to tell statd\n");
1989
1990	bzero(&smon,sizeof(smon));
1991
1992	smon.mon_id.mon_name = nhp->name;
1993	smon.mon_id.my_id.my_name = "localhost\0";
1994
1995	smon.mon_id.my_id.my_prog = NLM_PROG;
1996	smon.mon_id.my_id.my_vers = NLM_SM;
1997	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
1998
1999	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon,
2000	    &smon, xdr_sm_stat_res, &sres);
2001
2002	if (rpcret == 0) {
2003		if (sres.res_stat == stat_fail) {
2004			debuglog("Statd call failed\n");
2005			statflag = 0;
2006		} else {
2007			statflag = 1;
2008		}
2009	} else {
2010		debuglog("Rpc call to statd failed with return value: %d\n",
2011		    rpcret);
2012		statflag = 0;
2013	}
2014
2015	if (statflag == 1) {
2016		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2017	} else {
2018		free(nhp);
2019	}
2020
2021}
2022
2023/*
2024 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2025 */
2026void
2027unmonitor_lock_host(const char *hostname)
2028{
2029	struct host *ihp;
2030	struct mon_id smon_id;
2031	struct sm_stat smstat;
2032	int rpcret;
2033
2034	rpcret = 0;
2035
2036	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2037	     ihp=LIST_NEXT(ihp, hostlst)) {
2038		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2039			/* Host is monitored, bump refcount */
2040			--ihp->refcnt;
2041			/* Host should only be in the monitor list once */
2042			break;
2043		}
2044	}
2045
2046	if (ihp == NULL) {
2047		debuglog("Could not find host %16s in mon list\n", hostname);
2048		return;
2049	}
2050
2051	if (ihp->refcnt > 0)
2052		return;
2053
2054	if (ihp->refcnt < 0) {
2055		debuglog("Negative refcount!: %d\n",
2056		    ihp->refcnt);
2057	}
2058
2059	debuglog("Attempting to unmonitor host %16s\n", hostname);
2060
2061	bzero(&smon_id,sizeof(smon_id));
2062
2063	smon_id.mon_name = (char *)hostname;
2064	smon_id.my_id.my_name = "localhost";
2065	smon_id.my_id.my_prog = NLM_PROG;
2066	smon_id.my_id.my_vers = NLM_SM;
2067	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2068
2069	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon,
2070	    &smon_id, xdr_sm_stat_res, &smstat);
2071
2072	if (rpcret != 0) {
2073		debuglog("Rpc call to unmonitor statd failed with "
2074		   " return value: %d\n", rpcret);
2075	}
2076
2077	LIST_REMOVE(ihp, hostlst);
2078	free(ihp);
2079}
2080
2081/*
2082 * notify: Clear all locks from a host if statd complains
2083 *
2084 * XXX: This routine has not been thoroughly tested.  However, neither
2085 * had the old one been.  It used to compare the statd crash state counter
2086 * to the current lock state.  The upshot of this was that it basically
2087 * cleared all locks from the specified host 99% of the time (with the
2088 * other 1% being a bug).  Consequently, the assumption is that clearing
2089 * all locks from a host when notified by statd is acceptable.
2090 *
2091 * Please note that this routine skips the usual level of redirection
2092 * through a do_* type routine.  This introduces a possible level of
2093 * error and might better be written as do_notify and take this one out.
2094
2095 */
2096
2097void
2098notify(const char *hostname, const int state)
2099{
2100	debuglog("notify from %s, new state %d", hostname, state);
2101
2102	siglock();
2103	do_clear(hostname);
2104	sigunlock();
2105
2106	debuglog("Leaving notify\n");
2107}
2108
2109void
2110send_granted(fl, opcode)
2111	struct file_lock *fl;
2112	int opcode;
2113{
2114	CLIENT *cli;
2115	static char dummy;
2116	struct timeval timeo;
2117	int success;
2118	static struct nlm_res retval;
2119	static struct nlm4_res retval4;
2120
2121	debuglog("About to send granted on blocked lock\n");
2122	sleep(1);
2123	debuglog("Blowing off return send\n");
2124
2125	cli = get_client(fl->addr,
2126	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2127	if (cli == NULL) {
2128		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2129		    fl->client_name);
2130		/*
2131		 * We fail to notify remote that the lock has been granted.
2132		 * The client will timeout and retry, the lock will be
2133		 * granted at this time.
2134		 */
2135		return;
2136	}
2137	timeo.tv_sec = 0;
2138	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2139
2140	if (fl->flags & LOCK_V4) {
2141		static nlm4_testargs res;
2142		res.cookie = fl->client_cookie;
2143		res.exclusive = fl->client.exclusive;
2144		res.alock.caller_name = fl->client_name;
2145		res.alock.fh.n_len = sizeof(fhandle_t);
2146		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2147		res.alock.oh = fl->client.oh;
2148		res.alock.svid = fl->client.svid;
2149		res.alock.l_offset = fl->client.l_offset;
2150		res.alock.l_len = fl->client.l_len;
2151		debuglog("sending v4 reply%s",
2152			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2153		if (fl->flags & LOCK_ASYNC) {
2154			success = clnt_call(cli, NLM4_GRANTED_MSG,
2155			    xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo);
2156		} else {
2157			success = clnt_call(cli, NLM4_GRANTED,
2158			    xdr_nlm4_testargs, &res, xdr_nlm4_res,
2159			    &retval4, timeo);
2160		}
2161	} else {
2162		static nlm_testargs res;
2163
2164		res.cookie = fl->client_cookie;
2165		res.exclusive = fl->client.exclusive;
2166		res.alock.caller_name = fl->client_name;
2167		res.alock.fh.n_len = sizeof(fhandle_t);
2168		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2169		res.alock.oh = fl->client.oh;
2170		res.alock.svid = fl->client.svid;
2171		res.alock.l_offset = fl->client.l_offset;
2172		res.alock.l_len = fl->client.l_len;
2173		debuglog("sending v1 reply%s",
2174			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2175		if (fl->flags & LOCK_ASYNC) {
2176			success = clnt_call(cli, NLM_GRANTED_MSG,
2177			    xdr_nlm_testargs, &res, xdr_void, &dummy, timeo);
2178		} else {
2179			success = clnt_call(cli, NLM_GRANTED,
2180			    xdr_nlm_testargs, &res, xdr_nlm_res,
2181			    &retval, timeo);
2182		}
2183	}
2184	if (debug_level > 2)
2185		debuglog("clnt_call returns %d(%s) for granted",
2186			 success, clnt_sperrno(success));
2187
2188}
2189
2190/*
2191 * Routines below here have not been modified in the overhaul
2192 */
2193
2194/*
2195 * Are these two routines still required since lockd is not spawning off
2196 * children to service locks anymore?  Presumably they were originally
2197 * put in place to prevent a one child from changing the lock list out
2198 * from under another one.
2199 */
2200
2201void
2202siglock(void)
2203{
2204  sigset_t block;
2205
2206  sigemptyset(&block);
2207  sigaddset(&block, SIGCHLD);
2208
2209  if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2210    syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2211  }
2212}
2213
2214void
2215sigunlock(void)
2216{
2217  sigset_t block;
2218
2219  sigemptyset(&block);
2220  sigaddset(&block, SIGCHLD);
2221
2222  if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2223    syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2224  }
2225}
2226
2227
2228