lockd_lock.c revision 161231
1/*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2
3/*
4 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
5 * Copyright (c) 2000 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by the University of
18 *	California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/usr.sbin/rpc.lockd/lockd_lock.c 161231 2006-08-11 23:03:16Z thomas $");
39
40#define LOCKD_DEBUG
41
42#include <stdio.h>
43#ifdef LOCKD_DEBUG
44#include <stdarg.h>
45#endif
46#include <stdlib.h>
47#include <unistd.h>
48#include <fcntl.h>
49#include <syslog.h>
50#include <errno.h>
51#include <string.h>
52#include <signal.h>
53#include <rpc/rpc.h>
54#include <sys/types.h>
55#include <sys/stat.h>
56#include <sys/socket.h>
57#include <sys/param.h>
58#include <sys/mount.h>
59#include <sys/wait.h>
60#include <rpcsvc/sm_inter.h>
61#include <rpcsvc/nlm_prot.h>
62#include "lockd_lock.h"
63#include "lockd.h"
64
65#define MAXOBJECTSIZE 64
66#define MAXBUFFERSIZE 1024
67
68/*
69 * A set of utilities for managing file locking
70 *
71 * XXX: All locks are in a linked list, a better structure should be used
72 * to improve search/access effeciency.
73 */
74
75/* struct describing a lock */
76struct file_lock {
77	LIST_ENTRY(file_lock) nfslocklist;
78	fhandle_t filehandle; /* NFS filehandle */
79	struct sockaddr *addr;
80	struct nlm4_holder client; /* lock holder */
81	/* XXX: client_cookie used *only* in send_granted */
82	netobj client_cookie; /* cookie sent by the client */
83	int nsm_status; /* status from the remote lock manager */
84	int status; /* lock status, see below */
85	int flags; /* lock flags, see lockd_lock.h */
86	int blocking; /* blocking lock or not */
87	char client_name[SM_MAXSTRLEN];	/* client_name is really variable
88					   length and must be last! */
89};
90
91LIST_HEAD(nfslocklist_head, file_lock);
92struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
93
94LIST_HEAD(blockedlocklist_head, file_lock);
95struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
96
97/* lock status */
98#define LKST_LOCKED	1 /* lock is locked */
99/* XXX: Is this flag file specific or lock specific? */
100#define LKST_WAITING	2 /* file is already locked by another host */
101#define LKST_PROCESSING	3 /* child is trying to aquire the lock */
102#define LKST_DYING	4 /* must dies when we get news from the child */
103
104/* struct describing a monitored host */
105struct host {
106	LIST_ENTRY(host) hostlst;
107	int refcnt;
108	char name[SM_MAXSTRLEN]; /* name is really variable length and
109                                    must be last! */
110};
111/* list of hosts we monitor */
112LIST_HEAD(hostlst_head, host);
113struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
114
115/*
116 * File monitoring handlers
117 * XXX: These might be able to be removed when kevent support
118 * is placed into the hardware lock/unlock routines.  (ie.
119 * let the kernel do all the file monitoring)
120 */
121
122/* Struct describing a monitored file */
123struct monfile {
124	LIST_ENTRY(monfile) monfilelist;
125	fhandle_t filehandle; /* Local access filehandle */
126	int fd; /* file descriptor: remains open until unlock! */
127	int refcount;
128	int exclusive;
129};
130
131/* List of files we monitor */
132LIST_HEAD(monfilelist_head, monfile);
133struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
134
135static int debugdelay = 0;
136
137enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
138		      NFS_DENIED, NFS_DENIED_NOLOCK,
139		      NFS_RESERR };
140
141enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
142		     HW_DENIED, HW_DENIED_NOLOCK,
143		     HW_STALEFH, HW_READONLY, HW_RESERR };
144
145enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
146			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
147			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
148
149enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
150enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
151/* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
152enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
153
154enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
155
156void send_granted(struct file_lock *fl, int opcode);
157void siglock(void);
158void sigunlock(void);
159void monitor_lock_host(const char *hostname);
160void unmonitor_lock_host(char *hostname);
161
162void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
163    const bool_t exclusive, struct nlm4_holder *dest);
164struct file_lock *	allocate_file_lock(const netobj *lockowner,
165					   const netobj *matchcookie,
166					   const struct sockaddr *addr,
167					   const char *caller_name);
168void	deallocate_file_lock(struct file_lock *fl);
169void	fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
170		       const bool_t exclusive, const int32_t svid,
171    const u_int64_t offset, const u_int64_t len,
172    const int state, const int status, const int flags, const int blocking);
173int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
174    const u_int64_t start2, const u_int64_t len2);
175enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
176    const u_int64_t startu, const u_int64_t lenu,
177    u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
178int	same_netobj(const netobj *n0, const netobj *n1);
179int	same_filelock_identity(const struct file_lock *fl0,
180    const struct file_lock *fl2);
181
182static void debuglog(char const *fmt, ...);
183void dump_static_object(const unsigned char* object, const int sizeof_object,
184                        unsigned char* hbuff, const int sizeof_hbuff,
185                        unsigned char* cbuff, const int sizeof_cbuff);
186void dump_netobj(const struct netobj *nobj);
187void dump_filelock(const struct file_lock *fl);
188struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
189enum nfslock_status	test_nfslock(const struct file_lock *fl,
190    struct file_lock **conflicting_fl);
191enum nfslock_status	lock_nfslock(struct file_lock *fl);
192enum nfslock_status	delete_nfslock(struct file_lock *fl);
193enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
194    struct file_lock **released_lock, struct file_lock **left_lock,
195    struct file_lock **right_lock);
196enum hwlock_status lock_hwlock(struct file_lock *fl);
197enum split_status split_nfslock(const struct file_lock *exist_lock,
198    const struct file_lock *unlock_lock, struct file_lock **left_lock,
199    struct file_lock **right_lock);
200void	add_blockingfilelock(struct file_lock *fl);
201enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
202enum hwlock_status	test_hwlock(const struct file_lock *fl,
203    struct file_lock **conflicting_fl);
204void	remove_blockingfilelock(struct file_lock *fl);
205void	clear_blockingfilelock(const char *hostname);
206void	retry_blockingfilelocklist(void);
207enum partialfilelock_status	unlock_partialfilelock(
208    const struct file_lock *fl);
209void	clear_partialfilelock(const char *hostname);
210enum partialfilelock_status	test_partialfilelock(
211    const struct file_lock *fl, struct file_lock **conflicting_fl);
212enum nlm_stats	do_test(struct file_lock *fl,
213    struct file_lock **conflicting_fl);
214enum nlm_stats	do_unlock(struct file_lock *fl);
215enum nlm_stats	do_lock(struct file_lock *fl);
216void	do_clear(const char *hostname);
217size_t	strnlen(const char *, size_t);
218
219void
220debuglog(char const *fmt, ...)
221{
222	va_list ap;
223
224	if (debug_level < 1) {
225		return;
226	}
227
228	sleep(debugdelay);
229
230	va_start(ap, fmt);
231	vsyslog(LOG_DEBUG, fmt, ap);
232	va_end(ap);
233}
234
235void
236dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
237	const unsigned char *object;
238	const int size_object;
239	unsigned char *hbuff;
240	const int size_hbuff;
241	unsigned char *cbuff;
242	const int size_cbuff;
243{
244	int i, objectsize;
245
246	if (debug_level < 2) {
247		return;
248	}
249
250	objectsize = size_object;
251
252	if (objectsize == 0) {
253		debuglog("object is size 0\n");
254	} else {
255		if (objectsize > MAXOBJECTSIZE) {
256			debuglog("Object of size %d being clamped"
257			    "to size %d\n", objectsize, MAXOBJECTSIZE);
258			objectsize = MAXOBJECTSIZE;
259		}
260
261		if (hbuff != NULL) {
262			if (size_hbuff < objectsize*2+1) {
263				debuglog("Hbuff not large enough."
264				    "  Increase size\n");
265			} else {
266				for(i=0;i<objectsize;i++) {
267					sprintf(hbuff+i*2,"%02x",*(object+i));
268				}
269				*(hbuff+i*2) = '\0';
270			}
271		}
272
273		if (cbuff != NULL) {
274			if (size_cbuff < objectsize+1) {
275				debuglog("Cbuff not large enough."
276				    "  Increase Size\n");
277			}
278
279			for(i=0;i<objectsize;i++) {
280				if (*(object+i) >= 32 && *(object+i) <= 127) {
281					*(cbuff+i) = *(object+i);
282				} else {
283					*(cbuff+i) = '.';
284				}
285			}
286			*(cbuff+i) = '\0';
287		}
288	}
289}
290
291void
292dump_netobj(const struct netobj *nobj)
293{
294	char hbuff[MAXBUFFERSIZE*2];
295	char cbuff[MAXBUFFERSIZE];
296
297	if (debug_level < 2) {
298		return;
299	}
300
301	if (nobj == NULL) {
302		debuglog("Null netobj pointer\n");
303	}
304	else if (nobj->n_len == 0) {
305		debuglog("Size zero netobj\n");
306	} else {
307		dump_static_object(nobj->n_bytes, nobj->n_len,
308		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
309		debuglog("netobj: len: %d  data: %s :::  %s\n",
310		    nobj->n_len, hbuff, cbuff);
311	}
312}
313
314/* #define DUMP_FILELOCK_VERBOSE */
315void
316dump_filelock(const struct file_lock *fl)
317{
318#ifdef DUMP_FILELOCK_VERBOSE
319	char hbuff[MAXBUFFERSIZE*2];
320	char cbuff[MAXBUFFERSIZE];
321#endif
322
323	if (debug_level < 2) {
324		return;
325	}
326
327	if (fl != NULL) {
328		debuglog("Dumping file lock structure @ %p\n", fl);
329
330#ifdef DUMP_FILELOCK_VERBOSE
331		dump_static_object((unsigned char *)&fl->filehandle,
332		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
333		    cbuff, sizeof(cbuff));
334		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
335#endif
336
337		debuglog("Dumping nlm4_holder:\n"
338		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
339		    fl->client.exclusive, fl->client.svid,
340		    fl->client.l_offset, fl->client.l_len);
341
342#ifdef DUMP_FILELOCK_VERBOSE
343		debuglog("Dumping client identity:\n");
344		dump_netobj(&fl->client.oh);
345
346		debuglog("Dumping client cookie:\n");
347		dump_netobj(&fl->client_cookie);
348
349		debuglog("nsm: %d  status: %d  flags: %d  locker: %d"
350		    "  fd:  %d\n", fl->nsm_status, fl->status,
351		    fl->flags, fl->locker, fl->fd);
352#endif
353	} else {
354		debuglog("NULL file lock structure\n");
355	}
356}
357
358void
359copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
360	const struct nlm4_lock *src;
361	const bool_t exclusive;
362	struct nlm4_holder *dest;
363{
364
365	dest->exclusive = exclusive;
366	dest->oh.n_len = src->oh.n_len;
367	dest->oh.n_bytes = src->oh.n_bytes;
368	dest->svid = src->svid;
369	dest->l_offset = src->l_offset;
370	dest->l_len = src->l_len;
371}
372
373
374size_t
375strnlen(const char *s, size_t len)
376{
377    size_t n;
378
379    for (n = 0;  s[n] != 0 && n < len; n++)
380        ;
381    return n;
382}
383
384/*
385 * allocate_file_lock: Create a lock with the given parameters
386 */
387
388struct file_lock *
389allocate_file_lock(const netobj *lockowner, const netobj *matchcookie,
390		   const struct sockaddr *addr, const char *caller_name)
391{
392	struct file_lock *newfl;
393	size_t n;
394
395	/* Beware of rubbish input! */
396	n = strnlen(caller_name, SM_MAXSTRLEN);
397	if (n == SM_MAXSTRLEN) {
398		return NULL;
399	}
400
401	newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
402	if (newfl == NULL) {
403		return NULL;
404	}
405	bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
406	memcpy(newfl->client_name, caller_name, n);
407	newfl->client_name[n] = 0;
408
409	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
410	if (newfl->client.oh.n_bytes == NULL) {
411		free(newfl);
412		return NULL;
413	}
414	newfl->client.oh.n_len = lockowner->n_len;
415	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
416
417	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
418	if (newfl->client_cookie.n_bytes == NULL) {
419		free(newfl->client.oh.n_bytes);
420		free(newfl);
421		return NULL;
422	}
423	newfl->client_cookie.n_len = matchcookie->n_len;
424	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
425
426	newfl->addr = malloc(addr->sa_len);
427	if (newfl->addr == NULL) {
428		free(newfl->client_cookie.n_bytes);
429		free(newfl->client.oh.n_bytes);
430		free(newfl);
431		return NULL;
432	}
433	memcpy(newfl->addr, addr, addr->sa_len);
434
435	return newfl;
436}
437
438/*
439 * file_file_lock: Force creation of a valid file lock
440 */
441void
442fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
443    const bool_t exclusive, const int32_t svid,
444    const u_int64_t offset, const u_int64_t len,
445    const int state, const int status, const int flags, const int blocking)
446{
447	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
448
449	fl->client.exclusive = exclusive;
450	fl->client.svid = svid;
451	fl->client.l_offset = offset;
452	fl->client.l_len = len;
453
454	fl->nsm_status = state;
455	fl->status = status;
456	fl->flags = flags;
457	fl->blocking = blocking;
458}
459
460/*
461 * deallocate_file_lock: Free all storage associated with a file lock
462 */
463void
464deallocate_file_lock(struct file_lock *fl)
465{
466	free(fl->addr);
467	free(fl->client.oh.n_bytes);
468	free(fl->client_cookie.n_bytes);
469	free(fl);
470}
471
472/*
473 * regions_overlap(): This function examines the two provided regions for
474 * overlap.
475 */
476int
477regions_overlap(start1, len1, start2, len2)
478	const u_int64_t start1, len1, start2, len2;
479{
480	u_int64_t d1,d2,d3,d4;
481	enum split_status result;
482
483	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
484		 start1, len1, start2, len2);
485
486	result = region_compare(start1, len1, start2, len2,
487	    &d1, &d2, &d3, &d4);
488
489	debuglog("Exiting region overlap with val: %d\n",result);
490
491	if (result == SPL_DISJOINT) {
492		return 0;
493	} else {
494		return 1;
495	}
496
497	return (result);
498}
499
500/*
501 * region_compare(): Examine lock regions and split appropriately
502 *
503 * XXX: Fix 64 bit overflow problems
504 * XXX: Check to make sure I got *ALL* the cases.
505 * XXX: This DESPERATELY needs a regression test.
506 */
507enum split_status
508region_compare(starte, lene, startu, lenu,
509    start1, len1, start2, len2)
510	const u_int64_t starte, lene, startu, lenu;
511	u_int64_t *start1, *len1, *start2, *len2;
512{
513	/*
514	 * Please pay attention to the sequential exclusions
515	 * of the if statements!!!
516	 */
517	enum LFLAGS lflags;
518	enum RFLAGS rflags;
519	enum split_status retval;
520
521	retval = SPL_DISJOINT;
522
523	if (lene == 0 && lenu == 0) {
524		/* Examine left edge of locker */
525		lflags = LEDGE_INSIDE;
526		if (startu < starte) {
527			lflags = LEDGE_LEFT;
528		} else if (startu == starte) {
529			lflags = LEDGE_LBOUNDARY;
530		}
531
532		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
533
534		if (lflags == LEDGE_INSIDE) {
535			*start1 = starte;
536			*len1 = startu - starte;
537		}
538
539		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
540			retval = SPL_CONTAINED;
541		} else {
542			retval = SPL_LOCK1;
543		}
544	} else if (lene == 0 && lenu != 0) {
545		/* Established lock is infinite */
546		/* Examine left edge of unlocker */
547		lflags = LEDGE_INSIDE;
548		if (startu < starte) {
549			lflags = LEDGE_LEFT;
550		} else if (startu == starte) {
551			lflags = LEDGE_LBOUNDARY;
552		}
553
554		/* Examine right edge of unlocker */
555		if (startu + lenu < starte) {
556			/* Right edge of unlocker left of established lock */
557			rflags = REDGE_LEFT;
558			return SPL_DISJOINT;
559		} else if (startu + lenu == starte) {
560			/* Right edge of unlocker on start of established lock */
561			rflags = REDGE_LBOUNDARY;
562			return SPL_DISJOINT;
563		} else { /* Infinifty is right of finity */
564			/* Right edge of unlocker inside established lock */
565			rflags = REDGE_INSIDE;
566		}
567
568		if (lflags == LEDGE_INSIDE) {
569			*start1 = starte;
570			*len1 = startu - starte;
571			retval |= SPL_LOCK1;
572		}
573
574		if (rflags == REDGE_INSIDE) {
575			/* Create right lock */
576			*start2 = startu+lenu;
577			*len2 = 0;
578			retval |= SPL_LOCK2;
579		}
580	} else if (lene != 0 && lenu == 0) {
581		/* Unlocker is infinite */
582		/* Examine left edge of unlocker */
583		lflags = LEDGE_RIGHT;
584		if (startu < starte) {
585			lflags = LEDGE_LEFT;
586			retval = SPL_CONTAINED;
587			return retval;
588		} else if (startu == starte) {
589			lflags = LEDGE_LBOUNDARY;
590			retval = SPL_CONTAINED;
591			return retval;
592		} else if ((startu > starte) && (startu < starte + lene - 1)) {
593			lflags = LEDGE_INSIDE;
594		} else if (startu == starte + lene - 1) {
595			lflags = LEDGE_RBOUNDARY;
596		} else { /* startu > starte + lene -1 */
597			lflags = LEDGE_RIGHT;
598			return SPL_DISJOINT;
599		}
600
601		rflags = REDGE_RIGHT; /* Infinity is right of finity */
602
603		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
604			*start1 = starte;
605			*len1 = startu - starte;
606			retval |= SPL_LOCK1;
607			return retval;
608		}
609	} else {
610		/* Both locks are finite */
611
612		/* Examine left edge of unlocker */
613		lflags = LEDGE_RIGHT;
614		if (startu < starte) {
615			lflags = LEDGE_LEFT;
616		} else if (startu == starte) {
617			lflags = LEDGE_LBOUNDARY;
618		} else if ((startu > starte) && (startu < starte + lene - 1)) {
619			lflags = LEDGE_INSIDE;
620		} else if (startu == starte + lene - 1) {
621			lflags = LEDGE_RBOUNDARY;
622		} else { /* startu > starte + lene -1 */
623			lflags = LEDGE_RIGHT;
624			return SPL_DISJOINT;
625		}
626
627		/* Examine right edge of unlocker */
628		if (startu + lenu < starte) {
629			/* Right edge of unlocker left of established lock */
630			rflags = REDGE_LEFT;
631			return SPL_DISJOINT;
632		} else if (startu + lenu == starte) {
633			/* Right edge of unlocker on start of established lock */
634			rflags = REDGE_LBOUNDARY;
635			return SPL_DISJOINT;
636		} else if (startu + lenu < starte + lene) {
637			/* Right edge of unlocker inside established lock */
638			rflags = REDGE_INSIDE;
639		} else if (startu + lenu == starte + lene) {
640			/* Right edge of unlocker on right edge of established lock */
641			rflags = REDGE_RBOUNDARY;
642		} else { /* startu + lenu > starte + lene */
643			/* Right edge of unlocker is right of established lock */
644			rflags = REDGE_RIGHT;
645		}
646
647		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
648			/* Create left lock */
649			*start1 = starte;
650			*len1 = (startu - starte);
651			retval |= SPL_LOCK1;
652		}
653
654		if (rflags == REDGE_INSIDE) {
655			/* Create right lock */
656			*start2 = startu+lenu;
657			*len2 = starte+lene-(startu+lenu);
658			retval |= SPL_LOCK2;
659		}
660
661		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
662		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
663			retval = SPL_CONTAINED;
664		}
665	}
666	return retval;
667}
668
669/*
670 * same_netobj: Compares the apprpriate bits of a netobj for identity
671 */
672int
673same_netobj(const netobj *n0, const netobj *n1)
674{
675	int retval;
676
677	retval = 0;
678
679	debuglog("Entering netobj identity check\n");
680
681	if (n0->n_len == n1->n_len) {
682		debuglog("Preliminary length check passed\n");
683		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
684		debuglog("netobj %smatch\n", retval ? "" : "mis");
685	}
686
687	return (retval);
688}
689
690/*
691 * same_filelock_identity: Compares the appropriate bits of a file_lock
692 */
693int
694same_filelock_identity(fl0, fl1)
695	const struct file_lock *fl0, *fl1;
696{
697	int retval;
698
699	retval = 0;
700
701	debuglog("Checking filelock identity\n");
702
703	/*
704	 * Check process ids and host information.
705	 */
706	retval = (fl0->client.svid == fl1->client.svid &&
707	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
708
709	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
710
711	return (retval);
712}
713
714/*
715 * Below here are routines associated with manipulating the NFS
716 * lock list.
717 */
718
719/*
720 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
721 *                           or NULL otehrwise
722 * XXX: It is a shame that this duplicates so much code from test_nfslock.
723 */
724struct file_lock *
725get_lock_matching_unlock(const struct file_lock *fl)
726{
727	struct file_lock *ifl; /* Iterator */
728
729	debuglog("Entering lock_matching_unlock\n");
730	debuglog("********Dump of fl*****************\n");
731	dump_filelock(fl);
732
733	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
734		debuglog("Pointer to file lock: %p\n",ifl);
735
736		debuglog("****Dump of ifl****\n");
737		dump_filelock(ifl);
738		debuglog("*******************\n");
739
740		/*
741		 * XXX: It is conceivable that someone could use the NLM RPC
742		 * system to directly access filehandles.  This may be a
743		 * security hazard as the filehandle code may bypass normal
744		 * file access controls
745		 */
746		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
747			continue;
748
749		debuglog("matching_unlock: Filehandles match, "
750		    "checking regions\n");
751
752		/* Filehandles match, check for region overlap */
753		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
754			ifl->client.l_offset, ifl->client.l_len))
755			continue;
756
757		debuglog("matching_unlock: Region overlap"
758		    " found %llu : %llu -- %llu : %llu\n",
759		    fl->client.l_offset,fl->client.l_len,
760		    ifl->client.l_offset,ifl->client.l_len);
761
762		/* Regions overlap, check the identity */
763		if (!same_filelock_identity(fl,ifl))
764			continue;
765
766		debuglog("matching_unlock: Duplicate lock id.  Granting\n");
767		return (ifl);
768	}
769
770	debuglog("Exiting lock_matching_unlock\n");
771
772	return (NULL);
773}
774
775/*
776 * test_nfslock: check for NFS lock in lock list
777 *
778 * This routine makes the following assumptions:
779 *    1) Nothing will adjust the lock list during a lookup
780 *
781 * This routine has an intersting quirk which bit me hard.
782 * The conflicting_fl is the pointer to the conflicting lock.
783 * However, to modify the "*pointer* to the conflicting lock" rather
784 * that the "conflicting lock itself" one must pass in a "pointer to
785 * the pointer of the conflicting lock".  Gross.
786 */
787
788enum nfslock_status
789test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
790{
791	struct file_lock *ifl; /* Iterator */
792	enum nfslock_status retval;
793
794	debuglog("Entering test_nfslock\n");
795
796	retval = NFS_GRANTED;
797	(*conflicting_fl) = NULL;
798
799	debuglog("Entering lock search loop\n");
800
801	debuglog("***********************************\n");
802	debuglog("Dumping match filelock\n");
803	debuglog("***********************************\n");
804	dump_filelock(fl);
805	debuglog("***********************************\n");
806
807	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
808		if (retval == NFS_DENIED)
809			break;
810
811		debuglog("Top of lock loop\n");
812		debuglog("Pointer to file lock: %p\n",ifl);
813
814		debuglog("***********************************\n");
815		debuglog("Dumping test filelock\n");
816		debuglog("***********************************\n");
817		dump_filelock(ifl);
818		debuglog("***********************************\n");
819
820		/*
821		 * XXX: It is conceivable that someone could use the NLM RPC
822		 * system to directly access filehandles.  This may be a
823		 * security hazard as the filehandle code may bypass normal
824		 * file access controls
825		 */
826		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
827			continue;
828
829		debuglog("test_nfslock: filehandle match found\n");
830
831		/* Filehandles match, check for region overlap */
832		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
833			ifl->client.l_offset, ifl->client.l_len))
834			continue;
835
836		debuglog("test_nfslock: Region overlap found"
837		    " %llu : %llu -- %llu : %llu\n",
838		    fl->client.l_offset,fl->client.l_len,
839		    ifl->client.l_offset,ifl->client.l_len);
840
841		/* Regions overlap, check the exclusivity */
842		if (!(fl->client.exclusive || ifl->client.exclusive))
843			continue;
844
845		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
846		    fl->client.exclusive,
847		    ifl->client.exclusive);
848
849		if (same_filelock_identity(fl,ifl)) {
850			debuglog("test_nfslock: Duplicate id.  Granting\n");
851			(*conflicting_fl) = ifl;
852			retval = NFS_GRANTED_DUPLICATE;
853		} else {
854			/* locking attempt fails */
855			debuglog("test_nfslock: Lock attempt failed\n");
856			debuglog("Desired lock\n");
857			dump_filelock(fl);
858			debuglog("Conflicting lock\n");
859			dump_filelock(ifl);
860			(*conflicting_fl) = ifl;
861			retval = NFS_DENIED;
862		}
863	}
864
865	debuglog("Dumping file locks\n");
866	debuglog("Exiting test_nfslock\n");
867
868	return (retval);
869}
870
871/*
872 * lock_nfslock: attempt to create a lock in the NFS lock list
873 *
874 * This routine tests whether the lock will be granted and then adds
875 * the entry to the lock list if so.
876 *
877 * Argument fl gets modified as its list housekeeping entries get modified
878 * upon insertion into the NFS lock list
879 *
880 * This routine makes several assumptions:
881 *    1) It is perfectly happy to grant a duplicate lock from the same pid.
882 *       While this seems to be intuitively wrong, it is required for proper
883 *       Posix semantics during unlock.  It is absolutely imperative to not
884 *       unlock the main lock before the two child locks are established. Thus,
885 *       one has be be able to create duplicate locks over an existing lock
886 *    2) It currently accepts duplicate locks from the same id,pid
887 */
888
889enum nfslock_status
890lock_nfslock(struct file_lock *fl)
891{
892	enum nfslock_status retval;
893	struct file_lock *dummy_fl;
894
895	dummy_fl = NULL;
896
897	debuglog("Entering lock_nfslock...\n");
898
899	retval = test_nfslock(fl,&dummy_fl);
900
901	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
902		debuglog("Inserting lock...\n");
903		dump_filelock(fl);
904		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
905	}
906
907	debuglog("Exiting lock_nfslock...\n");
908
909	return (retval);
910}
911
912/*
913 * delete_nfslock: delete an NFS lock list entry
914 *
915 * This routine is used to delete a lock out of the NFS lock list
916 * without regard to status, underlying locks, regions or anything else
917 *
918 * Note that this routine *does not deallocate memory* of the lock.
919 * It just disconnects it from the list.  The lock can then be used
920 * by other routines without fear of trashing the list.
921 */
922
923enum nfslock_status
924delete_nfslock(struct file_lock *fl)
925{
926
927	LIST_REMOVE(fl, nfslocklist);
928
929	return (NFS_GRANTED);
930}
931
932enum split_status
933split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
934	const struct file_lock *exist_lock, *unlock_lock;
935	struct file_lock **left_lock, **right_lock;
936{
937	u_int64_t start1, len1, start2, len2;
938	enum split_status spstatus;
939
940	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
941	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
942	    &start1, &len1, &start2, &len2);
943
944	if ((spstatus & SPL_LOCK1) != 0) {
945		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
946		if (*left_lock == NULL) {
947			debuglog("Unable to allocate resource for split 1\n");
948			return SPL_RESERR;
949		}
950
951		fill_file_lock(*left_lock, &exist_lock->filehandle,
952		    exist_lock->client.exclusive, exist_lock->client.svid,
953		    start1, len1,
954		    exist_lock->nsm_status,
955		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
956	}
957
958	if ((spstatus & SPL_LOCK2) != 0) {
959		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
960		if (*right_lock == NULL) {
961			debuglog("Unable to allocate resource for split 1\n");
962			if (*left_lock != NULL) {
963				deallocate_file_lock(*left_lock);
964			}
965			return SPL_RESERR;
966		}
967
968		fill_file_lock(*right_lock, &exist_lock->filehandle,
969		    exist_lock->client.exclusive, exist_lock->client.svid,
970		    start2, len2,
971		    exist_lock->nsm_status,
972		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
973	}
974
975	return spstatus;
976}
977
978enum nfslock_status
979unlock_nfslock(fl, released_lock, left_lock, right_lock)
980	const struct file_lock *fl;
981	struct file_lock **released_lock;
982	struct file_lock **left_lock;
983	struct file_lock **right_lock;
984{
985	struct file_lock *mfl; /* Matching file lock */
986	enum nfslock_status retval;
987	enum split_status spstatus;
988
989	debuglog("Entering unlock_nfslock\n");
990
991	*released_lock = NULL;
992	*left_lock = NULL;
993	*right_lock = NULL;
994
995	retval = NFS_DENIED_NOLOCK;
996
997	debuglog("Attempting to match lock...\n");
998	mfl = get_lock_matching_unlock(fl);
999
1000	if (mfl != NULL) {
1001		debuglog("Unlock matched.  Querying for split\n");
1002
1003		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
1004
1005		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
1006		debuglog("********Split dumps********");
1007		dump_filelock(mfl);
1008		dump_filelock(fl);
1009		dump_filelock(*left_lock);
1010		dump_filelock(*right_lock);
1011		debuglog("********End Split dumps********");
1012
1013		if (spstatus == SPL_RESERR) {
1014			if (*left_lock != NULL) {
1015				deallocate_file_lock(*left_lock);
1016				*left_lock = NULL;
1017			}
1018
1019			if (*right_lock != NULL) {
1020				deallocate_file_lock(*right_lock);
1021				*right_lock = NULL;
1022			}
1023
1024			return NFS_RESERR;
1025		}
1026
1027		/* Insert new locks from split if required */
1028		if (*left_lock != NULL) {
1029			debuglog("Split left activated\n");
1030			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1031		}
1032
1033		if (*right_lock != NULL) {
1034			debuglog("Split right activated\n");
1035			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1036		}
1037
1038		/* Unlock the lock since it matches identity */
1039		LIST_REMOVE(mfl, nfslocklist);
1040		*released_lock = mfl;
1041		retval = NFS_GRANTED;
1042	}
1043
1044	debuglog("Exiting unlock_nfslock\n");
1045
1046	return retval;
1047}
1048
1049/*
1050 * Below here are the routines for manipulating the file lock directly
1051 * on the disk hardware itself
1052 */
1053enum hwlock_status
1054lock_hwlock(struct file_lock *fl)
1055{
1056	struct monfile *imf,*nmf;
1057	int lflags, flerror;
1058
1059	/* Scan to see if filehandle already present */
1060	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1061		if (bcmp(&fl->filehandle, &imf->filehandle,
1062			sizeof(fl->filehandle)) == 0) {
1063			/* imf is the correct filehandle */
1064			break;
1065		}
1066	}
1067
1068	/*
1069	 * Filehandle already exists (we control the file)
1070	 * *AND* NFS has already cleared the lock for availability
1071	 * Grant it and bump the refcount.
1072	 */
1073	if (imf != NULL) {
1074		++(imf->refcount);
1075		return (HW_GRANTED);
1076	}
1077
1078	/* No filehandle found, create and go */
1079	nmf = malloc(sizeof(struct monfile));
1080	if (nmf == NULL) {
1081		debuglog("hwlock resource allocation failure\n");
1082		return (HW_RESERR);
1083	}
1084
1085	/* XXX: Is O_RDWR always the correct mode? */
1086	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1087	if (nmf->fd < 0) {
1088		debuglog("fhopen failed (from %16s): %32s\n",
1089		    fl->client_name, strerror(errno));
1090		free(nmf);
1091		switch (errno) {
1092		case ESTALE:
1093			return (HW_STALEFH);
1094		case EROFS:
1095			return (HW_READONLY);
1096		default:
1097			return (HW_RESERR);
1098		}
1099	}
1100
1101	/* File opened correctly, fill the monitor struct */
1102	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1103	nmf->refcount = 1;
1104	nmf->exclusive = fl->client.exclusive;
1105
1106	lflags = (nmf->exclusive == 1) ?
1107	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1108
1109	flerror = flock(nmf->fd, lflags);
1110
1111	if (flerror != 0) {
1112		debuglog("flock failed (from %16s): %32s\n",
1113		    fl->client_name, strerror(errno));
1114		close(nmf->fd);
1115		free(nmf);
1116		switch (errno) {
1117		case EAGAIN:
1118			return (HW_DENIED);
1119		case ESTALE:
1120			return (HW_STALEFH);
1121		case EROFS:
1122			return (HW_READONLY);
1123		default:
1124			return (HW_RESERR);
1125			break;
1126		}
1127	}
1128
1129	/* File opened and locked */
1130	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1131
1132	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1133	return (HW_GRANTED);
1134}
1135
1136enum hwlock_status
1137unlock_hwlock(const struct file_lock *fl)
1138{
1139	struct monfile *imf;
1140
1141	debuglog("Entering unlock_hwlock\n");
1142	debuglog("Entering loop interation\n");
1143
1144	/* Scan to see if filehandle already present */
1145	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1146		if (bcmp(&fl->filehandle, &imf->filehandle,
1147			sizeof(fl->filehandle)) == 0) {
1148			/* imf is the correct filehandle */
1149			break;
1150		}
1151	}
1152
1153	debuglog("Completed iteration.  Proceeding\n");
1154
1155	if (imf == NULL) {
1156		/* No lock found */
1157		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1158		return (HW_DENIED_NOLOCK);
1159	}
1160
1161	/* Lock found */
1162	--imf->refcount;
1163
1164	if (imf->refcount < 0) {
1165		debuglog("Negative hardware reference count\n");
1166	}
1167
1168	if (imf->refcount <= 0) {
1169		close(imf->fd);
1170		LIST_REMOVE(imf, monfilelist);
1171		free(imf);
1172	}
1173	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1174	return (HW_GRANTED);
1175}
1176
1177enum hwlock_status
1178test_hwlock(fl, conflicting_fl)
1179	const struct file_lock *fl __unused;
1180	struct file_lock **conflicting_fl __unused;
1181{
1182
1183	/*
1184	 * XXX: lock tests on hardware are not required until
1185	 * true partial file testing is done on the underlying file
1186	 */
1187	return (HW_RESERR);
1188}
1189
1190
1191
1192/*
1193 * Below here are routines for manipulating blocked lock requests
1194 * They should only be called from the XXX_partialfilelock routines
1195 * if at all possible
1196 */
1197
1198void
1199add_blockingfilelock(struct file_lock *fl)
1200{
1201
1202	debuglog("Entering add_blockingfilelock\n");
1203
1204	/*
1205	 * Clear the blocking flag so that it can be reused without
1206	 * adding it to the blocking queue a second time
1207	 */
1208
1209	fl->blocking = 0;
1210	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1211
1212	debuglog("Exiting add_blockingfilelock\n");
1213}
1214
1215void
1216remove_blockingfilelock(struct file_lock *fl)
1217{
1218
1219	debuglog("Entering remove_blockingfilelock\n");
1220
1221	LIST_REMOVE(fl, nfslocklist);
1222
1223	debuglog("Exiting remove_blockingfilelock\n");
1224}
1225
1226void
1227clear_blockingfilelock(const char *hostname)
1228{
1229	struct file_lock *ifl,*nfl;
1230
1231	/*
1232	 * Normally, LIST_FOREACH is called for, but since
1233	 * the current element *is* the iterator, deleting it
1234	 * would mess up the iteration.  Thus, a next element
1235	 * must be used explicitly
1236	 */
1237
1238	ifl = LIST_FIRST(&blockedlocklist_head);
1239
1240	while (ifl != NULL) {
1241		nfl = LIST_NEXT(ifl, nfslocklist);
1242
1243		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1244			remove_blockingfilelock(ifl);
1245			deallocate_file_lock(ifl);
1246		}
1247
1248		ifl = nfl;
1249	}
1250}
1251
1252void
1253retry_blockingfilelocklist(void)
1254{
1255	/* Retry all locks in the blocked list */
1256	struct file_lock *ifl, *nfl; /* Iterator */
1257	enum partialfilelock_status pflstatus;
1258
1259	debuglog("Entering retry_blockingfilelocklist\n");
1260
1261	LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) {
1262		debuglog("Iterator choice %p\n",ifl);
1263		debuglog("Next iterator choice %p\n",nfl);
1264
1265		/*
1266		 * SUBTLE BUG: The file_lock must be removed from the
1267		 * old list so that it's list pointers get disconnected
1268		 * before being allowed to participate in the new list
1269		 * which will automatically add it in if necessary.
1270		 */
1271
1272		LIST_REMOVE(ifl, nfslocklist);
1273		pflstatus = lock_partialfilelock(ifl);
1274
1275		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1276			debuglog("Granted blocked lock\n");
1277			/* lock granted and is now being used */
1278			send_granted(ifl,0);
1279		} else {
1280			/* Reinsert lock back into blocked list */
1281			debuglog("Replacing blocked lock\n");
1282			LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1283		}
1284	}
1285
1286	debuglog("Exiting retry_blockingfilelocklist\n");
1287}
1288
1289/*
1290 * Below here are routines associated with manipulating all
1291 * aspects of the partial file locking system (list, hardware, etc.)
1292 */
1293
1294/*
1295 * Please note that lock monitoring must be done at this level which
1296 * keeps track of *individual* lock requests on lock and unlock
1297 *
1298 * XXX: Split unlocking is going to make the unlock code miserable
1299 */
1300
1301/*
1302 * lock_partialfilelock:
1303 *
1304 * Argument fl gets modified as its list housekeeping entries get modified
1305 * upon insertion into the NFS lock list
1306 *
1307 * This routine makes several assumptions:
1308 * 1) It (will) pass locks through to flock to lock the entire underlying file
1309 *     and then parcel out NFS locks if it gets control of the file.
1310 *         This matches the old rpc.lockd file semantics (except where it
1311 *         is now more correct).  It is the safe solution, but will cause
1312 *         overly restrictive blocking if someone is trying to use the
1313 *         underlying files without using NFS.  This appears to be an
1314 *         acceptable tradeoff since most people use standalone NFS servers.
1315 * XXX: The right solution is probably kevent combined with fcntl
1316 *
1317 *    2) Nothing modifies the lock lists between testing and granting
1318 *           I have no idea whether this is a useful assumption or not
1319 */
1320
1321enum partialfilelock_status
1322lock_partialfilelock(struct file_lock *fl)
1323{
1324	enum partialfilelock_status retval;
1325	enum nfslock_status lnlstatus;
1326	enum hwlock_status hwstatus;
1327
1328	debuglog("Entering lock_partialfilelock\n");
1329
1330	retval = PFL_DENIED;
1331
1332	/*
1333	 * Execute the NFS lock first, if possible, as it is significantly
1334	 * easier and less expensive to undo than the filesystem lock
1335	 */
1336
1337	lnlstatus = lock_nfslock(fl);
1338
1339	switch (lnlstatus) {
1340	case NFS_GRANTED:
1341	case NFS_GRANTED_DUPLICATE:
1342		/*
1343		 * At this point, the NFS lock is allocated and active.
1344		 * Remember to clean it up if the hardware lock fails
1345		 */
1346		hwstatus = lock_hwlock(fl);
1347
1348		switch (hwstatus) {
1349		case HW_GRANTED:
1350		case HW_GRANTED_DUPLICATE:
1351			debuglog("HW GRANTED\n");
1352			/*
1353			 * XXX: Fixme: Check hwstatus for duplicate when
1354			 * true partial file locking and accounting is
1355			 * done on the hardware
1356			 */
1357			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1358				retval = PFL_GRANTED_DUPLICATE;
1359			} else {
1360				retval = PFL_GRANTED;
1361			}
1362			monitor_lock_host(fl->client_name);
1363			break;
1364		case HW_RESERR:
1365			debuglog("HW RESERR\n");
1366			retval = PFL_HWRESERR;
1367			break;
1368		case HW_DENIED:
1369			debuglog("HW DENIED\n");
1370			retval = PFL_HWDENIED;
1371			break;
1372		default:
1373			debuglog("Unmatched hwstatus %d\n",hwstatus);
1374			break;
1375		}
1376
1377		if (retval != PFL_GRANTED &&
1378		    retval != PFL_GRANTED_DUPLICATE) {
1379			/* Clean up the NFS lock */
1380			debuglog("Deleting trial NFS lock\n");
1381			delete_nfslock(fl);
1382		}
1383		break;
1384	case NFS_DENIED:
1385		retval = PFL_NFSDENIED;
1386		break;
1387	case NFS_RESERR:
1388		retval = PFL_NFSRESERR;
1389	default:
1390		debuglog("Unmatched lnlstatus %d\n");
1391		retval = PFL_NFSDENIED_NOLOCK;
1392		break;
1393	}
1394
1395	/*
1396	 * By the time fl reaches here, it is completely free again on
1397	 * failure.  The NFS lock done before attempting the
1398	 * hardware lock has been backed out
1399	 */
1400
1401	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1402		/* Once last chance to check the lock */
1403		if (fl->blocking == 1) {
1404			if (retval == PFL_NFSDENIED) {
1405				/* Queue the lock */
1406				debuglog("BLOCKING LOCK RECEIVED\n");
1407				retval = PFL_NFSBLOCKED;
1408				add_blockingfilelock(fl);
1409				dump_filelock(fl);
1410			} else {
1411				/* retval is okay as PFL_HWDENIED */
1412				debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1413				dump_filelock(fl);
1414			}
1415		} else {
1416			/* Leave retval alone, it's already correct */
1417			debuglog("Lock denied.  Non-blocking failure\n");
1418			dump_filelock(fl);
1419		}
1420	}
1421
1422	debuglog("Exiting lock_partialfilelock\n");
1423
1424	return retval;
1425}
1426
1427/*
1428 * unlock_partialfilelock:
1429 *
1430 * Given a file_lock, unlock all locks which match.
1431 *
1432 * Note that a given lock might have to unlock ITSELF!  See
1433 * clear_partialfilelock for example.
1434 */
1435
1436enum partialfilelock_status
1437unlock_partialfilelock(const struct file_lock *fl)
1438{
1439	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1440	enum partialfilelock_status retval;
1441	enum nfslock_status unlstatus;
1442	enum hwlock_status unlhwstatus, lhwstatus;
1443
1444	debuglog("Entering unlock_partialfilelock\n");
1445
1446	selffl = NULL;
1447	lfl = NULL;
1448	rfl = NULL;
1449	releasedfl = NULL;
1450	retval = PFL_DENIED;
1451
1452	/*
1453	 * There are significant overlap and atomicity issues
1454	 * with partially releasing a lock.  For example, releasing
1455	 * part of an NFS shared lock does *not* always release the
1456	 * corresponding part of the file since there is only one
1457	 * rpc.lockd UID but multiple users could be requesting it
1458	 * from NFS.  Also, an unlock request should never allow
1459	 * another process to gain a lock on the remaining parts.
1460	 * ie. Always apply the new locks before releasing the
1461	 * old one
1462	 */
1463
1464	/*
1465	 * Loop is required since multiple little locks
1466	 * can be allocated and then deallocated with one
1467	 * big unlock.
1468	 *
1469	 * The loop is required to be here so that the nfs &
1470	 * hw subsystems do not need to communicate with one
1471	 * one another
1472	 */
1473
1474	do {
1475		debuglog("Value of releasedfl: %p\n",releasedfl);
1476		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1477		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1478		debuglog("Value of releasedfl: %p\n",releasedfl);
1479
1480
1481		/* XXX: This is grungy.  It should be refactored to be cleaner */
1482		if (lfl != NULL) {
1483			lhwstatus = lock_hwlock(lfl);
1484			if (lhwstatus != HW_GRANTED &&
1485			    lhwstatus != HW_GRANTED_DUPLICATE) {
1486				debuglog("HW duplicate lock failure for left split\n");
1487			}
1488			monitor_lock_host(lfl->client_name);
1489		}
1490
1491		if (rfl != NULL) {
1492			lhwstatus = lock_hwlock(rfl);
1493			if (lhwstatus != HW_GRANTED &&
1494			    lhwstatus != HW_GRANTED_DUPLICATE) {
1495				debuglog("HW duplicate lock failure for right split\n");
1496			}
1497			monitor_lock_host(rfl->client_name);
1498		}
1499
1500		switch (unlstatus) {
1501		case NFS_GRANTED:
1502			/* Attempt to unlock on the hardware */
1503			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1504
1505			/* This call *MUST NOT* unlock the two newly allocated locks */
1506			unlhwstatus = unlock_hwlock(fl);
1507			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1508
1509			switch (unlhwstatus) {
1510			case HW_GRANTED:
1511				debuglog("HW unlock granted\n");
1512				unmonitor_lock_host(releasedfl->client_name);
1513				retval = PFL_GRANTED;
1514				break;
1515			case HW_DENIED_NOLOCK:
1516				/* Huh?!?!  This shouldn't happen */
1517				debuglog("HW unlock denied no lock\n");
1518				retval = PFL_HWRESERR;
1519				/* Break out of do-while */
1520				unlstatus = NFS_RESERR;
1521				break;
1522			default:
1523				debuglog("HW unlock failed\n");
1524				retval = PFL_HWRESERR;
1525				/* Break out of do-while */
1526				unlstatus = NFS_RESERR;
1527				break;
1528			}
1529
1530			debuglog("Exiting with status retval: %d\n",retval);
1531
1532			retry_blockingfilelocklist();
1533			break;
1534		case NFS_DENIED_NOLOCK:
1535			retval = PFL_GRANTED;
1536			debuglog("All locks cleaned out\n");
1537			break;
1538		default:
1539			retval = PFL_NFSRESERR;
1540			debuglog("NFS unlock failure\n");
1541			dump_filelock(fl);
1542			break;
1543		}
1544
1545		if (releasedfl != NULL) {
1546			if (fl == releasedfl) {
1547				/*
1548				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1549				 * but we can't deallocate the space yet.  This is what
1550				 * happens when you don't write malloc and free together
1551				 */
1552				debuglog("Attempt to unlock self\n");
1553				selffl = releasedfl;
1554			} else {
1555				/*
1556				 * XXX: this deallocation *still* needs to migrate closer
1557				 * to the allocation code way up in get_lock or the allocation
1558				 * code needs to migrate down (violation of "When you write
1559				 * malloc you must write free")
1560				 */
1561
1562				deallocate_file_lock(releasedfl);
1563			}
1564		}
1565
1566	} while (unlstatus == NFS_GRANTED);
1567
1568	if (selffl != NULL) {
1569		/*
1570		 * This statement wipes out the incoming file lock (fl)
1571		 * in spite of the fact that it is declared const
1572		 */
1573		debuglog("WARNING!  Destroying incoming lock pointer\n");
1574		deallocate_file_lock(selffl);
1575	}
1576
1577	debuglog("Exiting unlock_partialfilelock\n");
1578
1579	return retval;
1580}
1581
1582/*
1583 * clear_partialfilelock
1584 *
1585 * Normally called in response to statd state number change.
1586 * Wipe out all locks held by a host.  As a bonus, the act of
1587 * doing so should automatically clear their statd entries and
1588 * unmonitor the host.
1589 */
1590
1591void
1592clear_partialfilelock(const char *hostname)
1593{
1594	struct file_lock *ifl, *nfl;
1595
1596	/* Clear blocking file lock list */
1597	clear_blockingfilelock(hostname);
1598
1599	/* do all required unlocks */
1600	/* Note that unlock can smash the current pointer to a lock */
1601
1602	/*
1603	 * Normally, LIST_FOREACH is called for, but since
1604	 * the current element *is* the iterator, deleting it
1605	 * would mess up the iteration.  Thus, a next element
1606	 * must be used explicitly
1607	 */
1608
1609	ifl = LIST_FIRST(&nfslocklist_head);
1610
1611	while (ifl != NULL) {
1612		nfl = LIST_NEXT(ifl, nfslocklist);
1613
1614		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1615			/* Unlock destroys ifl out from underneath */
1616			unlock_partialfilelock(ifl);
1617			/* ifl is NO LONGER VALID AT THIS POINT */
1618		}
1619		ifl = nfl;
1620	}
1621}
1622
1623/*
1624 * test_partialfilelock:
1625 */
1626enum partialfilelock_status
1627test_partialfilelock(const struct file_lock *fl,
1628    struct file_lock **conflicting_fl)
1629{
1630	enum partialfilelock_status retval;
1631	enum nfslock_status teststatus;
1632
1633	debuglog("Entering testpartialfilelock...\n");
1634
1635	retval = PFL_DENIED;
1636
1637	teststatus = test_nfslock(fl, conflicting_fl);
1638	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1639
1640	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1641		/* XXX: Add the underlying filesystem locking code */
1642		retval = (teststatus == NFS_GRANTED) ?
1643		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1644		debuglog("Dumping locks...\n");
1645		dump_filelock(fl);
1646		dump_filelock(*conflicting_fl);
1647		debuglog("Done dumping locks...\n");
1648	} else {
1649		retval = PFL_NFSDENIED;
1650		debuglog("NFS test denied.\n");
1651		dump_filelock(fl);
1652		debuglog("Conflicting.\n");
1653		dump_filelock(*conflicting_fl);
1654	}
1655
1656	debuglog("Exiting testpartialfilelock...\n");
1657
1658	return retval;
1659}
1660
1661/*
1662 * Below here are routines associated with translating the partial file locking
1663 * codes into useful codes to send back to the NFS RPC messaging system
1664 */
1665
1666/*
1667 * These routines translate the (relatively) useful return codes back onto
1668 * the few return codes which the nlm subsystems wishes to trasmit
1669 */
1670
1671enum nlm_stats
1672do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1673{
1674	enum partialfilelock_status pfsret;
1675	enum nlm_stats retval;
1676
1677	debuglog("Entering do_test...\n");
1678
1679	pfsret = test_partialfilelock(fl,conflicting_fl);
1680
1681	switch (pfsret) {
1682	case PFL_GRANTED:
1683		debuglog("PFL test lock granted\n");
1684		dump_filelock(fl);
1685		dump_filelock(*conflicting_fl);
1686		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1687		break;
1688	case PFL_GRANTED_DUPLICATE:
1689		debuglog("PFL test lock granted--duplicate id detected\n");
1690		dump_filelock(fl);
1691		dump_filelock(*conflicting_fl);
1692		debuglog("Clearing conflicting_fl for call semantics\n");
1693		*conflicting_fl = NULL;
1694		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1695		break;
1696	case PFL_NFSDENIED:
1697	case PFL_HWDENIED:
1698		debuglog("PFL test lock denied\n");
1699		dump_filelock(fl);
1700		dump_filelock(*conflicting_fl);
1701		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1702		break;
1703	case PFL_NFSRESERR:
1704	case PFL_HWRESERR:
1705		debuglog("PFL test lock resource fail\n");
1706		dump_filelock(fl);
1707		dump_filelock(*conflicting_fl);
1708		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1709		break;
1710	default:
1711		debuglog("PFL test lock *FAILED*\n");
1712		dump_filelock(fl);
1713		dump_filelock(*conflicting_fl);
1714		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1715		break;
1716	}
1717
1718	debuglog("Exiting do_test...\n");
1719
1720	return retval;
1721}
1722
1723/*
1724 * do_lock: Try to acquire a lock
1725 *
1726 * This routine makes a distinction between NLM versions.  I am pretty
1727 * convinced that this should be abstracted out and bounced up a level
1728 */
1729
1730enum nlm_stats
1731do_lock(struct file_lock *fl)
1732{
1733	enum partialfilelock_status pfsret;
1734	enum nlm_stats retval;
1735
1736	debuglog("Entering do_lock...\n");
1737
1738	pfsret = lock_partialfilelock(fl);
1739
1740	switch (pfsret) {
1741	case PFL_GRANTED:
1742		debuglog("PFL lock granted");
1743		dump_filelock(fl);
1744		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1745		break;
1746	case PFL_GRANTED_DUPLICATE:
1747		debuglog("PFL lock granted--duplicate id detected");
1748		dump_filelock(fl);
1749		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1750		break;
1751	case PFL_NFSDENIED:
1752	case PFL_HWDENIED:
1753		debuglog("PFL_NFS lock denied");
1754		dump_filelock(fl);
1755		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1756		break;
1757	case PFL_NFSBLOCKED:
1758	case PFL_HWBLOCKED:
1759		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1760		dump_filelock(fl);
1761		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1762		break;
1763	case PFL_NFSRESERR:
1764	case PFL_HWRESERR:
1765		debuglog("PFL lock resource alocation fail\n");
1766		dump_filelock(fl);
1767		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1768		break;
1769	default:
1770		debuglog("PFL lock *FAILED*");
1771		dump_filelock(fl);
1772		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1773		break;
1774	}
1775
1776	debuglog("Exiting do_lock...\n");
1777
1778	return retval;
1779}
1780
1781enum nlm_stats
1782do_unlock(struct file_lock *fl)
1783{
1784	enum partialfilelock_status pfsret;
1785	enum nlm_stats retval;
1786
1787	debuglog("Entering do_unlock...\n");
1788	pfsret = unlock_partialfilelock(fl);
1789
1790	switch (pfsret) {
1791	case PFL_GRANTED:
1792		debuglog("PFL unlock granted");
1793		dump_filelock(fl);
1794		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1795		break;
1796	case PFL_NFSDENIED:
1797	case PFL_HWDENIED:
1798		debuglog("PFL_NFS unlock denied");
1799		dump_filelock(fl);
1800		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1801		break;
1802	case PFL_NFSDENIED_NOLOCK:
1803	case PFL_HWDENIED_NOLOCK:
1804		debuglog("PFL_NFS no lock found\n");
1805		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1806		break;
1807	case PFL_NFSRESERR:
1808	case PFL_HWRESERR:
1809		debuglog("PFL unlock resource failure");
1810		dump_filelock(fl);
1811		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1812		break;
1813	default:
1814		debuglog("PFL unlock *FAILED*");
1815		dump_filelock(fl);
1816		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1817		break;
1818	}
1819
1820	debuglog("Exiting do_unlock...\n");
1821
1822	return retval;
1823}
1824
1825/*
1826 * do_clear
1827 *
1828 * This routine is non-existent because it doesn't have a return code.
1829 * It is here for completeness in case someone *does* need to do return
1830 * codes later.  A decent compiler should optimize this away.
1831 */
1832
1833void
1834do_clear(const char *hostname)
1835{
1836
1837	clear_partialfilelock(hostname);
1838}
1839
1840/*
1841 * The following routines are all called from the code which the
1842 * RPC layer invokes
1843 */
1844
1845/*
1846 * testlock(): inform the caller if the requested lock would be granted
1847 *
1848 * returns NULL if lock would granted
1849 * returns pointer to a conflicting nlm4_holder if not
1850 */
1851
1852struct nlm4_holder *
1853testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1854{
1855	struct file_lock test_fl, *conflicting_fl;
1856
1857	bzero(&test_fl, sizeof(test_fl));
1858
1859	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1860	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1861
1862	siglock();
1863	do_test(&test_fl, &conflicting_fl);
1864
1865	if (conflicting_fl == NULL) {
1866		debuglog("No conflicting lock found\n");
1867		sigunlock();
1868		return NULL;
1869	} else {
1870		debuglog("Found conflicting lock\n");
1871		dump_filelock(conflicting_fl);
1872		sigunlock();
1873		return (&conflicting_fl->client);
1874	}
1875}
1876
1877/*
1878 * getlock: try to aquire the lock.
1879 * If file is already locked and we can sleep, put the lock in the list with
1880 * status LKST_WAITING; it'll be processed later.
1881 * Otherwise try to lock. If we're allowed to block, fork a child which
1882 * will do the blocking lock.
1883 */
1884
1885enum nlm_stats
1886getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1887{
1888	struct file_lock *newfl;
1889	enum nlm_stats retval;
1890
1891	debuglog("Entering getlock...\n");
1892
1893	if (grace_expired == 0 && lckarg->reclaim == 0)
1894		return (flags & LOCK_V4) ?
1895		    nlm4_denied_grace_period : nlm_denied_grace_period;
1896
1897	/* allocate new file_lock for this request */
1898	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie,
1899				   (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name);
1900	if (newfl == NULL) {
1901		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1902		/* failed */
1903		return (flags & LOCK_V4) ?
1904		    nlm4_denied_nolocks : nlm_denied_nolocks;
1905	}
1906
1907	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1908		debuglog("recieved fhandle size %d, local size %d",
1909		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1910	}
1911
1912	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1913	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1914	    lckarg->alock.l_len,
1915	    lckarg->state, 0, flags, lckarg->block);
1916
1917	/*
1918	 * newfl is now fully constructed and deallocate_file_lock
1919	 * can now be used to delete it
1920	 */
1921
1922	siglock();
1923	debuglog("Pointer to new lock is %p\n",newfl);
1924
1925	retval = do_lock(newfl);
1926
1927	debuglog("Pointer to new lock is %p\n",newfl);
1928	sigunlock();
1929
1930	switch (retval)
1931		{
1932		case nlm4_granted:
1933			/* case nlm_granted: is the same as nlm4_granted */
1934			/* do_mon(lckarg->alock.caller_name); */
1935			break;
1936		case nlm4_blocked:
1937			/* case nlm_blocked: is the same as nlm4_blocked */
1938			/* do_mon(lckarg->alock.caller_name); */
1939			break;
1940		default:
1941			deallocate_file_lock(newfl);
1942			break;
1943		}
1944
1945	debuglog("Exiting getlock...\n");
1946
1947	return retval;
1948}
1949
1950
1951/* unlock a filehandle */
1952enum nlm_stats
1953unlock(nlm4_lock *lock, const int flags __unused)
1954{
1955	struct file_lock fl;
1956	enum nlm_stats err;
1957
1958	siglock();
1959
1960	debuglog("Entering unlock...\n");
1961
1962	bzero(&fl,sizeof(struct file_lock));
1963	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1964
1965	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1966
1967	err = do_unlock(&fl);
1968
1969	sigunlock();
1970
1971	debuglog("Exiting unlock...\n");
1972
1973	return err;
1974}
1975
1976/*
1977 * XXX: The following monitor/unmonitor routines
1978 * have not been extensively tested (ie. no regression
1979 * script exists like for the locking sections
1980 */
1981
1982/*
1983 * monitor_lock_host: monitor lock hosts locally with a ref count and
1984 * inform statd
1985 */
1986void
1987monitor_lock_host(const char *hostname)
1988{
1989	struct host *ihp, *nhp;
1990	struct mon smon;
1991	struct sm_stat_res sres;
1992	int rpcret, statflag;
1993	size_t n;
1994
1995	rpcret = 0;
1996	statflag = 0;
1997
1998	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
1999		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2000			/* Host is already monitored, bump refcount */
2001			++ihp->refcnt;
2002			/* Host should only be in the monitor list once */
2003			return;
2004		}
2005	}
2006
2007	/* Host is not yet monitored, add it */
2008	n = strnlen(hostname, SM_MAXSTRLEN);
2009	if (n == SM_MAXSTRLEN) {
2010		return;
2011	}
2012	nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2013	if (nhp == NULL) {
2014		debuglog("Unable to allocate entry for statd mon\n");
2015		return;
2016	}
2017
2018	/* Allocated new host entry, now fill the fields */
2019	memcpy(nhp->name, hostname, n);
2020	nhp->name[n] = 0;
2021	nhp->refcnt = 1;
2022	debuglog("Locally Monitoring host %16s\n",hostname);
2023
2024	debuglog("Attempting to tell statd\n");
2025
2026	bzero(&smon,sizeof(smon));
2027
2028	smon.mon_id.mon_name = nhp->name;
2029	smon.mon_id.my_id.my_name = "localhost";
2030	smon.mon_id.my_id.my_prog = NLM_PROG;
2031	smon.mon_id.my_id.my_vers = NLM_SM;
2032	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2033
2034	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON,
2035	    (xdrproc_t)xdr_mon, &smon,
2036	    (xdrproc_t)xdr_sm_stat_res, &sres);
2037
2038	if (rpcret == 0) {
2039		if (sres.res_stat == stat_fail) {
2040			debuglog("Statd call failed\n");
2041			statflag = 0;
2042		} else {
2043			statflag = 1;
2044		}
2045	} else {
2046		debuglog("Rpc call to statd failed with return value: %d\n",
2047		    rpcret);
2048		statflag = 0;
2049	}
2050
2051	if (statflag == 1) {
2052		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2053	} else {
2054		free(nhp);
2055	}
2056
2057}
2058
2059/*
2060 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2061 */
2062void
2063unmonitor_lock_host(char *hostname)
2064{
2065	struct host *ihp;
2066	struct mon_id smon_id;
2067	struct sm_stat smstat;
2068	int rpcret;
2069
2070	rpcret = 0;
2071
2072	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2073	     ihp=LIST_NEXT(ihp, hostlst)) {
2074		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2075			/* Host is monitored, bump refcount */
2076			--ihp->refcnt;
2077			/* Host should only be in the monitor list once */
2078			break;
2079		}
2080	}
2081
2082	if (ihp == NULL) {
2083		debuglog("Could not find host %16s in mon list\n", hostname);
2084		return;
2085	}
2086
2087	if (ihp->refcnt > 0)
2088		return;
2089
2090	if (ihp->refcnt < 0) {
2091		debuglog("Negative refcount!: %d\n",
2092		    ihp->refcnt);
2093	}
2094
2095	debuglog("Attempting to unmonitor host %16s\n", hostname);
2096
2097	bzero(&smon_id,sizeof(smon_id));
2098
2099	smon_id.mon_name = hostname;
2100	smon_id.my_id.my_name = "localhost";
2101	smon_id.my_id.my_prog = NLM_PROG;
2102	smon_id.my_id.my_vers = NLM_SM;
2103	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2104
2105	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON,
2106	    (xdrproc_t)xdr_mon_id, &smon_id,
2107	    (xdrproc_t)xdr_sm_stat, &smstat);
2108
2109	if (rpcret != 0) {
2110		debuglog("Rpc call to unmonitor statd failed with "
2111		   " return value: %d\n", rpcret);
2112	}
2113
2114	LIST_REMOVE(ihp, hostlst);
2115	free(ihp);
2116}
2117
2118/*
2119 * notify: Clear all locks from a host if statd complains
2120 *
2121 * XXX: This routine has not been thoroughly tested.  However, neither
2122 * had the old one been.  It used to compare the statd crash state counter
2123 * to the current lock state.  The upshot of this was that it basically
2124 * cleared all locks from the specified host 99% of the time (with the
2125 * other 1% being a bug).  Consequently, the assumption is that clearing
2126 * all locks from a host when notified by statd is acceptable.
2127 *
2128 * Please note that this routine skips the usual level of redirection
2129 * through a do_* type routine.  This introduces a possible level of
2130 * error and might better be written as do_notify and take this one out.
2131
2132 */
2133
2134void
2135notify(const char *hostname, const int state)
2136{
2137	debuglog("notify from %s, new state %d", hostname, state);
2138
2139	siglock();
2140	do_clear(hostname);
2141	sigunlock();
2142
2143	debuglog("Leaving notify\n");
2144}
2145
2146void
2147send_granted(fl, opcode)
2148	struct file_lock *fl;
2149	int opcode __unused;
2150{
2151	CLIENT *cli;
2152	static char dummy;
2153	struct timeval timeo;
2154	int success;
2155	static struct nlm_res retval;
2156	static struct nlm4_res retval4;
2157
2158	debuglog("About to send granted on blocked lock\n");
2159
2160	cli = get_client(fl->addr,
2161	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2162	if (cli == NULL) {
2163		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2164		    fl->client_name);
2165		/*
2166		 * We fail to notify remote that the lock has been granted.
2167		 * The client will timeout and retry, the lock will be
2168		 * granted at this time.
2169		 */
2170		return;
2171	}
2172	timeo.tv_sec = 0;
2173	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2174
2175	if (fl->flags & LOCK_V4) {
2176		static nlm4_testargs res;
2177		res.cookie = fl->client_cookie;
2178		res.exclusive = fl->client.exclusive;
2179		res.alock.caller_name = fl->client_name;
2180		res.alock.fh.n_len = sizeof(fhandle_t);
2181		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2182		res.alock.oh = fl->client.oh;
2183		res.alock.svid = fl->client.svid;
2184		res.alock.l_offset = fl->client.l_offset;
2185		res.alock.l_len = fl->client.l_len;
2186		debuglog("sending v4 reply%s",
2187			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2188		if (fl->flags & LOCK_ASYNC) {
2189			success = clnt_call(cli, NLM4_GRANTED_MSG,
2190			    (xdrproc_t)xdr_nlm4_testargs, &res,
2191			    (xdrproc_t)xdr_void, &dummy, timeo);
2192		} else {
2193			success = clnt_call(cli, NLM4_GRANTED,
2194			    (xdrproc_t)xdr_nlm4_testargs, &res,
2195			    (xdrproc_t)xdr_nlm4_res, &retval4, timeo);
2196		}
2197	} else {
2198		static nlm_testargs res;
2199
2200		res.cookie = fl->client_cookie;
2201		res.exclusive = fl->client.exclusive;
2202		res.alock.caller_name = fl->client_name;
2203		res.alock.fh.n_len = sizeof(fhandle_t);
2204		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2205		res.alock.oh = fl->client.oh;
2206		res.alock.svid = fl->client.svid;
2207		res.alock.l_offset = fl->client.l_offset;
2208		res.alock.l_len = fl->client.l_len;
2209		debuglog("sending v1 reply%s",
2210			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2211		if (fl->flags & LOCK_ASYNC) {
2212			success = clnt_call(cli, NLM_GRANTED_MSG,
2213			    (xdrproc_t)xdr_nlm_testargs, &res,
2214			    (xdrproc_t)xdr_void, &dummy, timeo);
2215		} else {
2216			success = clnt_call(cli, NLM_GRANTED,
2217			    (xdrproc_t)xdr_nlm_testargs, &res,
2218			    (xdrproc_t)xdr_nlm_res, &retval, timeo);
2219		}
2220	}
2221	if (debug_level > 2)
2222		debuglog("clnt_call returns %d(%s) for granted",
2223			 success, clnt_sperrno(success));
2224
2225}
2226
2227/*
2228 * Routines below here have not been modified in the overhaul
2229 */
2230
2231/*
2232 * Are these two routines still required since lockd is not spawning off
2233 * children to service locks anymore?  Presumably they were originally
2234 * put in place to prevent a one child from changing the lock list out
2235 * from under another one.
2236 */
2237
2238void
2239siglock(void)
2240{
2241  sigset_t block;
2242
2243  sigemptyset(&block);
2244  sigaddset(&block, SIGCHLD);
2245
2246  if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2247    syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2248  }
2249}
2250
2251void
2252sigunlock(void)
2253{
2254  sigset_t block;
2255
2256  sigemptyset(&block);
2257  sigaddset(&block, SIGCHLD);
2258
2259  if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2260    syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2261  }
2262}
2263
2264
2265