lockd_lock.c revision 92975
1/* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2/* $FreeBSD: head/usr.sbin/rpc.lockd/lockd_lock.c 92975 2002-03-22 19:57:09Z alfred $ */ 3 4/* 5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 6 * Copyright (c) 2000 Manuel Bouyer. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 */ 37 38#define LOCKD_DEBUG 39 40#include <stdio.h> 41#ifdef LOCKD_DEBUG 42#include <stdarg.h> 43#endif 44#include <stdlib.h> 45#include <unistd.h> 46#include <fcntl.h> 47#include <syslog.h> 48#include <errno.h> 49#include <string.h> 50#include <signal.h> 51#include <rpc/rpc.h> 52#include <sys/types.h> 53#include <sys/stat.h> 54#include <sys/socket.h> 55#include <sys/param.h> 56#include <sys/mount.h> 57#include <sys/wait.h> 58#include <rpcsvc/sm_inter.h> 59#include <rpcsvc/nlm_prot.h> 60#include "lockd_lock.h" 61#include "lockd.h" 62 63#define MAXOBJECTSIZE 64 64#define MAXBUFFERSIZE 1024 65 66/* 67 * SM_MAXSTRLEN is usually 1024. This means that lock requests and 68 * host name monitoring entries are *MUCH* larger than they should be 69 */ 70 71/* 72 * A set of utilities for managing file locking 73 * 74 * XXX: All locks are in a linked list, a better structure should be used 75 * to improve search/access effeciency. 76 */ 77 78/* struct describing a lock */ 79struct file_lock { 80 LIST_ENTRY(file_lock) nfslocklist; 81 fhandle_t filehandle; /* NFS filehandle */ 82 struct sockaddr *addr; 83 struct nlm4_holder client; /* lock holder */ 84 /* XXX: client_cookie used *only* in send_granted */ 85 netobj client_cookie; /* cookie sent by the client */ 86 char client_name[SM_MAXSTRLEN]; 87 int nsm_status; /* status from the remote lock manager */ 88 int status; /* lock status, see below */ 89 int flags; /* lock flags, see lockd_lock.h */ 90 int blocking; /* blocking lock or not */ 91 pid_t locker; /* pid of the child process trying to get the lock */ 92 int fd; /* file descriptor for this lock */ 93}; 94 95LIST_HEAD(nfslocklist_head, file_lock); 96struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 97 98LIST_HEAD(blockedlocklist_head, file_lock); 99struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 100 101/* lock status */ 102#define LKST_LOCKED 1 /* lock is locked */ 103/* XXX: Is this flag file specific or lock specific? */ 104#define LKST_WAITING 2 /* file is already locked by another host */ 105#define LKST_PROCESSING 3 /* child is trying to aquire the lock */ 106#define LKST_DYING 4 /* must dies when we get news from the child */ 107 108/* struct describing a monitored host */ 109struct host { 110 LIST_ENTRY(host) hostlst; 111 char name[SM_MAXSTRLEN]; 112 int refcnt; 113}; 114/* list of hosts we monitor */ 115LIST_HEAD(hostlst_head, host); 116struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 117 118/* 119 * File monitoring handlers 120 * XXX: These might be able to be removed when kevent support 121 * is placed into the hardware lock/unlock routines. (ie. 122 * let the kernel do all the file monitoring) 123 */ 124 125/* Struct describing a monitored file */ 126struct monfile { 127 LIST_ENTRY(monfile) monfilelist; 128 fhandle_t filehandle; /* Local access filehandle */ 129 int fd; /* file descriptor: remains open until unlock! */ 130 int refcount; 131 int exclusive; 132}; 133 134/* List of files we monitor */ 135LIST_HEAD(monfilelist_head, monfile); 136struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 137 138static int debugdelay = 0; 139 140enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 141 NFS_DENIED, NFS_DENIED_NOLOCK, 142 NFS_RESERR }; 143 144enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 145 HW_DENIED, HW_DENIED_NOLOCK, 146 HW_STALEFH, HW_READONLY, HW_RESERR }; 147 148enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 149 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 150 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 151 152enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 153enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 154/* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 155enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 156 157enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 158 159void send_granted(struct file_lock *fl, int opcode); 160void siglock(void); 161void sigunlock(void); 162void monitor_lock_host(const char *hostname); 163void unmonitor_lock_host(const char *hostname); 164 165void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 166 const bool_t exclusive, struct nlm4_holder *dest); 167struct file_lock * allocate_file_lock(const netobj *lockowner, 168 const netobj *matchcookie); 169void deallocate_file_lock(struct file_lock *fl); 170void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 171 struct sockaddr *addr, const bool_t exclusive, const int32_t svid, 172 const u_int64_t offset, const u_int64_t len, const char *caller_name, 173 const int state, const int status, const int flags, const int blocking); 174int regions_overlap(const u_int64_t start1, const u_int64_t len1, 175 const u_int64_t start2, const u_int64_t len2);; 176enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 177 const u_int64_t startu, const u_int64_t lenu, 178 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 179int same_netobj(const netobj *n0, const netobj *n1); 180int same_filelock_identity(const struct file_lock *fl0, 181 const struct file_lock *fl2); 182 183static void debuglog(char const *fmt, ...); 184void dump_static_object(const unsigned char* object, const int sizeof_object, 185 unsigned char* hbuff, const int sizeof_hbuff, 186 unsigned char* cbuff, const int sizeof_cbuff); 187void dump_netobj(const struct netobj *nobj); 188void dump_filelock(const struct file_lock *fl); 189struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 190enum nfslock_status test_nfslock(const struct file_lock *fl, 191 struct file_lock **conflicting_fl); 192enum nfslock_status lock_nfslock(struct file_lock *fl); 193enum nfslock_status delete_nfslock(struct file_lock *fl); 194enum nfslock_status unlock_nfslock(const struct file_lock *fl, 195 struct file_lock **released_lock, struct file_lock **left_lock, 196 struct file_lock **right_lock); 197enum hwlock_status lock_hwlock(struct file_lock *fl); 198enum split_status split_nfslock(const struct file_lock *exist_lock, 199 const struct file_lock *unlock_lock, struct file_lock **left_lock, 200 struct file_lock **right_lock); 201void add_blockingfilelock(struct file_lock *fl); 202enum hwlock_status unlock_hwlock(const struct file_lock *fl); 203enum hwlock_status test_hwlock(const struct file_lock *fl, 204 struct file_lock **conflicting_fl); 205void remove_blockingfilelock(struct file_lock *fl); 206void clear_blockingfilelock(const char *hostname); 207void retry_blockingfilelocklist(void); 208enum partialfilelock_status unlock_partialfilelock( 209 const struct file_lock *fl); 210void clear_partialfilelock(const char *hostname); 211enum partialfilelock_status test_partialfilelock( 212 const struct file_lock *fl, struct file_lock **conflicting_fl); 213enum nlm_stats do_test(struct file_lock *fl, 214 struct file_lock **conflicting_fl); 215enum nlm_stats do_unlock(struct file_lock *fl); 216enum nlm_stats do_lock(struct file_lock *fl); 217void do_clear(const char *hostname); 218 219 220void 221debuglog(char const *fmt, ...) 222{ 223 va_list ap; 224 225 if (debug_level < 1) { 226 return; 227 } 228 229 sleep(debugdelay); 230 231 va_start(ap, fmt); 232 vsyslog(LOG_DEBUG, fmt, ap); 233 va_end(ap); 234} 235 236void 237dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 238 const unsigned char *object; 239 const int size_object; 240 unsigned char *hbuff; 241 const int size_hbuff; 242 unsigned char *cbuff; 243 const int size_cbuff; 244{ 245 int i, objectsize; 246 247 if (debug_level < 2) { 248 return; 249 } 250 251 objectsize = size_object; 252 253 if (objectsize == 0) { 254 debuglog("object is size 0\n"); 255 } else { 256 if (objectsize > MAXOBJECTSIZE) { 257 debuglog("Object of size %d being clamped" 258 "to size %d\n", objectsize, MAXOBJECTSIZE); 259 objectsize = MAXOBJECTSIZE; 260 } 261 262 if (hbuff != NULL) { 263 if (size_hbuff < objectsize*2+1) { 264 debuglog("Hbuff not large enough." 265 " Increase size\n"); 266 } else { 267 for(i=0;i<objectsize;i++) { 268 sprintf(hbuff+i*2,"%02x",*(object+i)); 269 } 270 *(hbuff+i*2) = '\0'; 271 } 272 } 273 274 if (cbuff != NULL) { 275 if (size_cbuff < objectsize+1) { 276 debuglog("Cbuff not large enough." 277 " Increase Size\n"); 278 } 279 280 for(i=0;i<objectsize;i++) { 281 if (*(object+i) >= 32 && *(object+i) <= 127) { 282 *(cbuff+i) = *(object+i); 283 } else { 284 *(cbuff+i) = '.'; 285 } 286 } 287 *(cbuff+i) = '\0'; 288 } 289 } 290} 291 292void 293dump_netobj(const struct netobj *nobj) 294{ 295 char hbuff[MAXBUFFERSIZE*2]; 296 char cbuff[MAXBUFFERSIZE]; 297 298 if (debug_level < 2) { 299 return; 300 } 301 302 if (nobj == NULL) { 303 debuglog("Null netobj pointer\n"); 304 } 305 else if (nobj->n_len == 0) { 306 debuglog("Size zero netobj\n"); 307 } else { 308 dump_static_object(nobj->n_bytes, nobj->n_len, 309 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 310 debuglog("netobj: len: %d data: %s ::: %s\n", 311 nobj->n_len, hbuff, cbuff); 312 } 313} 314 315/* #define DUMP_FILELOCK_VERBOSE */ 316void 317dump_filelock(const struct file_lock *fl) 318{ 319#ifdef DUMP_FILELOCK_VERBOSE 320 char hbuff[MAXBUFFERSIZE*2]; 321 char cbuff[MAXBUFFERSIZE]; 322#endif 323 324 if (debug_level < 2) { 325 return; 326 } 327 328 if (fl != NULL) { 329 debuglog("Dumping file lock structure @ %p\n", fl); 330 331#ifdef DUMP_FILELOCK_VERBOSE 332 dump_static_object((unsigned char *)&fl->filehandle, 333 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 334 cbuff, sizeof(cbuff)); 335 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 336#endif 337 338 debuglog("Dumping nlm4_holder:\n" 339 "exc: %x svid: %x offset:len %llx:%llx\n", 340 fl->client.exclusive, fl->client.svid, 341 fl->client.l_offset, fl->client.l_len); 342 343#ifdef DUMP_FILELOCK_VERBOSE 344 debuglog("Dumping client identity:\n"); 345 dump_netobj(&fl->client.oh); 346 347 debuglog("Dumping client cookie:\n"); 348 dump_netobj(&fl->client_cookie); 349 350 debuglog("nsm: %d status: %d flags: %d locker: %d" 351 " fd: %d\n", fl->nsm_status, fl->status, 352 fl->flags, fl->locker, fl->fd); 353#endif 354 } else { 355 debuglog("NULL file lock structure\n"); 356 } 357} 358 359void 360copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 361 const struct nlm4_lock *src; 362 const bool_t exclusive; 363 struct nlm4_holder *dest; 364{ 365 366 dest->exclusive = exclusive; 367 dest->oh.n_len = src->oh.n_len; 368 dest->oh.n_bytes = src->oh.n_bytes; 369 dest->svid = src->svid; 370 dest->l_offset = src->l_offset; 371 dest->l_len = src->l_len; 372} 373 374 375/* 376 * allocate_file_lock: Create a lock with the given parameters 377 */ 378 379struct file_lock * 380allocate_file_lock(const netobj *lockowner, const netobj *matchcookie) 381{ 382 struct file_lock *newfl; 383 384 newfl = malloc(sizeof(struct file_lock)); 385 if (newfl == NULL) { 386 return NULL; 387 } 388 bzero(newfl, sizeof(newfl)); 389 390 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 391 if (newfl->client.oh.n_bytes == NULL) { 392 free(newfl); 393 return NULL; 394 } 395 newfl->client.oh.n_len = lockowner->n_len; 396 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 397 398 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 399 if (newfl->client_cookie.n_bytes == NULL) { 400 free(newfl->client.oh.n_bytes); 401 free(newfl); 402 return NULL; 403 } 404 newfl->client_cookie.n_len = matchcookie->n_len; 405 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 406 407 return newfl; 408} 409 410/* 411 * file_file_lock: Force creation of a valid file lock 412 */ 413void 414fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 415 struct sockaddr *addr, const bool_t exclusive, const int32_t svid, 416 const u_int64_t offset, const u_int64_t len, const char *caller_name, 417 const int state, const int status, const int flags, const int blocking) 418{ 419 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 420 fl->addr = addr; 421 422 fl->client.exclusive = exclusive; 423 fl->client.svid = svid; 424 fl->client.l_offset = offset; 425 fl->client.l_len = len; 426 427 strncpy(fl->client_name, caller_name, SM_MAXSTRLEN); 428 429 fl->nsm_status = state; 430 fl->status = status; 431 fl->flags = flags; 432 fl->blocking = blocking; 433} 434 435/* 436 * deallocate_file_lock: Free all storage associated with a file lock 437 */ 438void 439deallocate_file_lock(struct file_lock *fl) 440{ 441 free(fl->client.oh.n_bytes); 442 free(fl->client_cookie.n_bytes); 443 free(fl); 444} 445 446/* 447 * regions_overlap(): This function examines the two provided regions for 448 * overlap. 449 */ 450int 451regions_overlap(start1, len1, start2, len2) 452 const u_int64_t start1, len1, start2, len2; 453{ 454 u_int64_t d1,d2,d3,d4; 455 enum split_status result; 456 457 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 458 start1, len1, start2, len2); 459 460 result = region_compare(start1, len1, start2, len2, 461 &d1, &d2, &d3, &d4); 462 463 debuglog("Exiting region overlap with val: %d\n",result); 464 465 if (result == SPL_DISJOINT) { 466 return 0; 467 } else { 468 return 1; 469 } 470 471 return (result); 472} 473 474/* 475 * region_compare(): Examine lock regions and split appropriately 476 * 477 * XXX: Fix 64 bit overflow problems 478 * XXX: Check to make sure I got *ALL* the cases. 479 * XXX: This DESPERATELY needs a regression test. 480 */ 481enum split_status 482region_compare(starte, lene, startu, lenu, 483 start1, len1, start2, len2) 484 const u_int64_t starte, lene, startu, lenu; 485 u_int64_t *start1, *len1, *start2, *len2; 486{ 487 /* 488 * Please pay attention to the sequential exclusions 489 * of the if statements!!! 490 */ 491 enum LFLAGS lflags; 492 enum RFLAGS rflags; 493 enum split_status retval; 494 495 retval = SPL_DISJOINT; 496 497 if (lene == 0 && lenu == 0) { 498 /* Examine left edge of locker */ 499 if (startu < starte) { 500 lflags = LEDGE_LEFT; 501 } else if (startu == starte) { 502 lflags = LEDGE_LBOUNDARY; 503 } else { 504 lflags = LEDGE_INSIDE; 505 } 506 507 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 508 509 if (lflags == LEDGE_INSIDE) { 510 *start1 = starte; 511 *len1 = startu - starte; 512 } 513 514 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 515 retval = SPL_CONTAINED; 516 } else { 517 retval = SPL_LOCK1; 518 } 519 } else if (lene == 0 && lenu != 0) { 520 /* Established lock is infinite */ 521 /* Examine left edge of unlocker */ 522 if (startu < starte) { 523 lflags = LEDGE_LEFT; 524 } else if (startu == starte) { 525 lflags = LEDGE_LBOUNDARY; 526 } else if (startu > starte) { 527 lflags = LEDGE_INSIDE; 528 } 529 530 /* Examine right edge of unlocker */ 531 if (startu + lenu < starte) { 532 /* Right edge of unlocker left of established lock */ 533 rflags = REDGE_LEFT; 534 return SPL_DISJOINT; 535 } else if (startu + lenu == starte) { 536 /* Right edge of unlocker on start of established lock */ 537 rflags = REDGE_LBOUNDARY; 538 return SPL_DISJOINT; 539 } else { /* Infinifty is right of finity */ 540 /* Right edge of unlocker inside established lock */ 541 rflags = REDGE_INSIDE; 542 } 543 544 if (lflags == LEDGE_INSIDE) { 545 *start1 = starte; 546 *len1 = startu - starte; 547 retval |= SPL_LOCK1; 548 } 549 550 if (rflags == REDGE_INSIDE) { 551 /* Create right lock */ 552 *start2 = startu+lenu; 553 *len2 = 0; 554 retval |= SPL_LOCK2; 555 } 556 } else if (lene != 0 && lenu == 0) { 557 /* Unlocker is infinite */ 558 /* Examine left edge of unlocker */ 559 if (startu < starte) { 560 lflags = LEDGE_LEFT; 561 retval = SPL_CONTAINED; 562 return retval; 563 } else if (startu == starte) { 564 lflags = LEDGE_LBOUNDARY; 565 retval = SPL_CONTAINED; 566 return retval; 567 } else if ((startu > starte) && (startu < starte + lene - 1)) { 568 lflags = LEDGE_INSIDE; 569 } else if (startu == starte + lene - 1) { 570 lflags = LEDGE_RBOUNDARY; 571 } else { /* startu > starte + lene -1 */ 572 lflags = LEDGE_RIGHT; 573 return SPL_DISJOINT; 574 } 575 576 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 577 578 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 579 *start1 = starte; 580 *len1 = startu - starte; 581 retval |= SPL_LOCK1; 582 return retval; 583 } 584 585 } else { 586 /* Both locks are finite */ 587 588 /* Examine left edge of unlocker */ 589 if (startu < starte) { 590 lflags = LEDGE_LEFT; 591 } else if (startu == starte) { 592 lflags = LEDGE_LBOUNDARY; 593 } else if ((startu > starte) && (startu < starte + lene - 1)) { 594 lflags = LEDGE_INSIDE; 595 } else if (startu == starte + lene - 1) { 596 lflags = LEDGE_RBOUNDARY; 597 } else { /* startu > starte + lene -1 */ 598 lflags = LEDGE_RIGHT; 599 return SPL_DISJOINT; 600 } 601 602 /* Examine right edge of unlocker */ 603 if (startu + lenu < starte) { 604 /* Right edge of unlocker left of established lock */ 605 rflags = REDGE_LEFT; 606 return SPL_DISJOINT; 607 } else if (startu + lenu == starte) { 608 /* Right edge of unlocker on start of established lock */ 609 rflags = REDGE_LBOUNDARY; 610 return SPL_DISJOINT; 611 } else if (startu + lenu < starte + lene) { 612 /* Right edge of unlocker inside established lock */ 613 rflags = REDGE_INSIDE; 614 } else if (startu + lenu == starte + lene) { 615 /* Right edge of unlocker on right edge of established lock */ 616 rflags = REDGE_RBOUNDARY; 617 } else { /* startu + lenu > starte + lene */ 618 /* Right edge of unlocker is right of established lock */ 619 rflags = REDGE_RIGHT; 620 } 621 622 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 623 /* Create left lock */ 624 *start1 = starte; 625 *len1 = (startu - starte); 626 retval |= SPL_LOCK1; 627 } 628 629 if (rflags == REDGE_INSIDE) { 630 /* Create right lock */ 631 *start2 = startu+lenu; 632 *len2 = starte+lene-(startu+lenu); 633 retval |= SPL_LOCK2; 634 } 635 636 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 637 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 638 retval = SPL_CONTAINED; 639 } 640 } 641 642 return retval; 643} 644 645/* 646 * same_netobj: Compares the apprpriate bits of a netobj for identity 647 */ 648int 649same_netobj(const netobj *n0, const netobj *n1) 650{ 651 int retval; 652 653 retval = 0; 654 655 debuglog("Entering netobj identity check\n"); 656 657 if (n0->n_len == n1->n_len) { 658 debuglog("Preliminary length check passed\n"); 659 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 660 debuglog("netobj %smatch\n", retval ? "" : "mis"); 661 } 662 663 return (retval); 664} 665 666/* 667 * same_filelock_identity: Compares the appropriate bits of a file_lock 668 */ 669int 670same_filelock_identity(fl0, fl1) 671 const struct file_lock *fl0, *fl1; 672{ 673 int retval; 674 675 retval = 0; 676 677 debuglog("Checking filelock identity\n"); 678 679 /* 680 * Check process ids and host information. 681 */ 682 retval = (fl0->client.svid == fl1->client.svid && 683 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 684 685 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 686 687 return (retval); 688} 689 690/* 691 * Below here are routines associated with manipulating the NFS 692 * lock list. 693 */ 694 695/* 696 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 697 * or NULL otehrwise 698 * XXX: It is a shame that this duplicates so much code from test_nfslock. 699 */ 700struct file_lock * 701get_lock_matching_unlock(const struct file_lock *fl) 702{ 703 struct file_lock *ifl; /* Iterator */ 704 705 debuglog("Entering lock_matching_unlock\n"); 706 debuglog("********Dump of fl*****************\n"); 707 dump_filelock(fl); 708 709 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 710 debuglog("Pointer to file lock: %p\n",ifl); 711 712 debuglog("****Dump of ifl****\n"); 713 dump_filelock(ifl); 714 debuglog("*******************\n"); 715 716 /* 717 * XXX: It is conceivable that someone could use the NLM RPC 718 * system to directly access filehandles. This may be a 719 * security hazard as the filehandle code may bypass normal 720 * file access controls 721 */ 722 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 723 continue; 724 725 debuglog("matching_unlock: Filehandles match, " 726 "checking regions\n"); 727 728 /* Filehandles match, check for region overlap */ 729 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 730 ifl->client.l_offset, ifl->client.l_len)) 731 continue; 732 733 debuglog("matching_unlock: Region overlap" 734 " found %llu : %llu -- %llu : %llu\n", 735 fl->client.l_offset,fl->client.l_len, 736 ifl->client.l_offset,ifl->client.l_len); 737 738 /* Regions overlap, check the identity */ 739 if (!same_filelock_identity(fl,ifl)) 740 continue; 741 742 debuglog("matching_unlock: Duplicate lock id. Granting\n"); 743 return (ifl); 744 } 745 746 debuglog("Exiting lock_matching_unlock\n"); 747 748 return (NULL); 749} 750 751/* 752 * test_nfslock: check for NFS lock in lock list 753 * 754 * This routine makes the following assumptions: 755 * 1) Nothing will adjust the lock list during a lookup 756 * 757 * This routine has an intersting quirk which bit me hard. 758 * The conflicting_fl is the pointer to the conflicting lock. 759 * However, to modify the "*pointer* to the conflicting lock" rather 760 * that the "conflicting lock itself" one must pass in a "pointer to 761 * the pointer of the conflicting lock". Gross. 762 */ 763 764enum nfslock_status 765test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 766{ 767 struct file_lock *ifl; /* Iterator */ 768 enum nfslock_status retval; 769 770 debuglog("Entering test_nfslock\n"); 771 772 retval = NFS_GRANTED; 773 (*conflicting_fl) = NULL; 774 775 debuglog("Entering lock search loop\n"); 776 777 debuglog("***********************************\n"); 778 debuglog("Dumping match filelock\n"); 779 debuglog("***********************************\n"); 780 dump_filelock(fl); 781 debuglog("***********************************\n"); 782 783 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 784 if (retval == NFS_DENIED) 785 break; 786 787 debuglog("Top of lock loop\n"); 788 debuglog("Pointer to file lock: %p\n",ifl); 789 790 debuglog("***********************************\n"); 791 debuglog("Dumping test filelock\n"); 792 debuglog("***********************************\n"); 793 dump_filelock(ifl); 794 debuglog("***********************************\n"); 795 796 /* 797 * XXX: It is conceivable that someone could use the NLM RPC 798 * system to directly access filehandles. This may be a 799 * security hazard as the filehandle code may bypass normal 800 * file access controls 801 */ 802 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 803 continue; 804 805 debuglog("test_nfslock: filehandle match found\n"); 806 807 /* Filehandles match, check for region overlap */ 808 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 809 ifl->client.l_offset, ifl->client.l_len)) 810 continue; 811 812 debuglog("test_nfslock: Region overlap found" 813 " %llu : %llu -- %llu : %llu\n", 814 fl->client.l_offset,fl->client.l_len, 815 ifl->client.l_offset,ifl->client.l_len); 816 817 /* Regions overlap, check the exclusivity */ 818 if (!(fl->client.exclusive || ifl->client.exclusive)) 819 continue; 820 821 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 822 fl->client.exclusive, 823 ifl->client.exclusive); 824 825 if (same_filelock_identity(fl,ifl)) { 826 debuglog("test_nfslock: Duplicate id. Granting\n"); 827 (*conflicting_fl) = ifl; 828 retval = NFS_GRANTED_DUPLICATE; 829 } else { 830 /* locking attempt fails */ 831 debuglog("test_nfslock: Lock attempt failed\n"); 832 debuglog("Desired lock\n"); 833 dump_filelock(fl); 834 debuglog("Conflicting lock\n"); 835 dump_filelock(ifl); 836 (*conflicting_fl) = ifl; 837 retval = NFS_DENIED; 838 } 839 } 840 841 debuglog("Dumping file locks\n"); 842 debuglog("Exiting test_nfslock\n"); 843 844 return (retval); 845} 846 847/* 848 * lock_nfslock: attempt to create a lock in the NFS lock list 849 * 850 * This routine tests whether the lock will be granted and then adds 851 * the entry to the lock list if so. 852 * 853 * Argument fl gets modified as its list housekeeping entries get modified 854 * upon insertion into the NFS lock list 855 * 856 * This routine makes several assumptions: 857 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 858 * While this seems to be intuitively wrong, it is required for proper 859 * Posix semantics during unlock. It is absolutely imperative to not 860 * unlock the main lock before the two child locks are established. Thus, 861 * one has be be able to create duplicate locks over an existing lock 862 * 2) It currently accepts duplicate locks from the same id,pid 863 */ 864 865enum nfslock_status 866lock_nfslock(struct file_lock *fl) 867{ 868 enum nfslock_status retval; 869 struct file_lock *dummy_fl; 870 871 dummy_fl = NULL; 872 873 debuglog("Entering lock_nfslock...\n"); 874 875 retval = test_nfslock(fl,&dummy_fl); 876 877 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 878 debuglog("Inserting lock...\n"); 879 dump_filelock(fl); 880 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 881 } 882 883 debuglog("Exiting lock_nfslock...\n"); 884 885 return (retval); 886} 887 888/* 889 * delete_nfslock: delete an NFS lock list entry 890 * 891 * This routine is used to delete a lock out of the NFS lock list 892 * without regard to status, underlying locks, regions or anything else 893 * 894 * Note that this routine *does not deallocate memory* of the lock. 895 * It just disconnects it from the list. The lock can then be used 896 * by other routines without fear of trashing the list. 897 */ 898 899enum nfslock_status 900delete_nfslock(struct file_lock *fl) 901{ 902 903 LIST_REMOVE(fl, nfslocklist); 904 905 return (NFS_GRANTED); 906} 907 908enum split_status 909split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 910 const struct file_lock *exist_lock, *unlock_lock; 911 struct file_lock **left_lock, **right_lock; 912{ 913 u_int64_t start1, len1, start2, len2; 914 enum split_status spstatus; 915 916 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 917 unlock_lock->client.l_offset, unlock_lock->client.l_len, 918 &start1, &len1, &start2, &len2); 919 920 if ((spstatus & SPL_LOCK1) != 0) { 921 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie); 922 if (*left_lock == NULL) { 923 debuglog("Unable to allocate resource for split 1\n"); 924 return SPL_RESERR; 925 } 926 927 fill_file_lock(*left_lock, &exist_lock->filehandle, 928 exist_lock->addr, 929 exist_lock->client.exclusive, exist_lock->client.svid, 930 start1, len1, 931 exist_lock->client_name, exist_lock->nsm_status, 932 exist_lock->status, exist_lock->flags, exist_lock->blocking); 933 } 934 935 if ((spstatus & SPL_LOCK2) != 0) { 936 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie); 937 if (*right_lock == NULL) { 938 debuglog("Unable to allocate resource for split 1\n"); 939 if (*left_lock != NULL) { 940 deallocate_file_lock(*left_lock); 941 } 942 return SPL_RESERR; 943 } 944 945 fill_file_lock(*right_lock, &exist_lock->filehandle, 946 exist_lock->addr, 947 exist_lock->client.exclusive, exist_lock->client.svid, 948 start2, len2, 949 exist_lock->client_name, exist_lock->nsm_status, 950 exist_lock->status, exist_lock->flags, exist_lock->blocking); 951 } 952 953 return spstatus; 954} 955 956enum nfslock_status 957unlock_nfslock(fl, released_lock, left_lock, right_lock) 958 const struct file_lock *fl; 959 struct file_lock **released_lock; 960 struct file_lock **left_lock; 961 struct file_lock **right_lock; 962{ 963 struct file_lock *mfl; /* Matching file lock */ 964 enum nfslock_status retval; 965 enum split_status spstatus; 966 967 debuglog("Entering unlock_nfslock\n"); 968 969 *released_lock = NULL; 970 *left_lock = NULL; 971 *right_lock = NULL; 972 973 retval = NFS_DENIED_NOLOCK; 974 975 printf("Attempting to match lock...\n"); 976 mfl = get_lock_matching_unlock(fl); 977 978 if (mfl != NULL) { 979 debuglog("Unlock matched. Querying for split\n"); 980 981 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 982 983 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 984 debuglog("********Split dumps********"); 985 dump_filelock(mfl); 986 dump_filelock(fl); 987 dump_filelock(*left_lock); 988 dump_filelock(*right_lock); 989 debuglog("********End Split dumps********"); 990 991 if (spstatus == SPL_RESERR) { 992 if (*left_lock != NULL) { 993 deallocate_file_lock(*left_lock); 994 *left_lock = NULL; 995 } 996 997 if (*right_lock != NULL) { 998 deallocate_file_lock(*right_lock); 999 *right_lock = NULL; 1000 } 1001 1002 return NFS_RESERR; 1003 } 1004 1005 /* Insert new locks from split if required */ 1006 if (*left_lock != NULL) { 1007 debuglog("Split left activated\n"); 1008 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1009 } 1010 1011 if (*right_lock != NULL) { 1012 debuglog("Split right activated\n"); 1013 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1014 } 1015 1016 /* Unlock the lock since it matches identity */ 1017 LIST_REMOVE(mfl, nfslocklist); 1018 *released_lock = mfl; 1019 retval = NFS_GRANTED; 1020 } 1021 1022 debuglog("Exiting unlock_nfslock\n"); 1023 1024 return retval; 1025} 1026 1027/* 1028 * Below here are the routines for manipulating the file lock directly 1029 * on the disk hardware itself 1030 */ 1031enum hwlock_status 1032lock_hwlock(struct file_lock *fl) 1033{ 1034 struct monfile *imf,*nmf; 1035 int lflags, flerror; 1036 1037 /* Scan to see if filehandle already present */ 1038 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1039 if (bcmp(&fl->filehandle, &imf->filehandle, 1040 sizeof(fl->filehandle)) == 0) { 1041 /* imf is the correct filehandle */ 1042 break; 1043 } 1044 } 1045 1046 /* 1047 * Filehandle already exists (we control the file) 1048 * *AND* NFS has already cleared the lock for availability 1049 * Grant it and bump the refcount. 1050 */ 1051 if (imf != NULL) { 1052 ++(imf->refcount); 1053 return (HW_GRANTED); 1054 } 1055 1056 /* No filehandle found, create and go */ 1057 nmf = malloc(sizeof(struct monfile)); 1058 if (nmf == NULL) { 1059 debuglog("hwlock resource allocation failure\n"); 1060 return (HW_RESERR); 1061 } 1062 1063 /* XXX: Is O_RDWR always the correct mode? */ 1064 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1065 if (nmf->fd < 0) { 1066 debuglog("fhopen failed (from %16s): %32s\n", 1067 fl->client_name, strerror(errno)); 1068 free(nmf); 1069 switch (errno) { 1070 case ESTALE: 1071 return (HW_STALEFH); 1072 case EROFS: 1073 return (HW_READONLY); 1074 default: 1075 return (HW_RESERR); 1076 } 1077 } 1078 1079 /* File opened correctly, fill the monitor struct */ 1080 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1081 nmf->refcount = 1; 1082 nmf->exclusive = fl->client.exclusive; 1083 1084 lflags = (nmf->exclusive == 1) ? 1085 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1086 1087 flerror = flock(nmf->fd, lflags); 1088 1089 if (flerror != 0) { 1090 debuglog("flock failed (from %16s): %32s\n", 1091 fl->client_name, strerror(errno)); 1092 close(nmf->fd); 1093 free(nmf); 1094 switch (errno) { 1095 case EAGAIN: 1096 return (HW_DENIED); 1097 case ESTALE: 1098 return (HW_STALEFH); 1099 case EROFS: 1100 return (HW_READONLY); 1101 default: 1102 return (HW_RESERR); 1103 break; 1104 } 1105 } 1106 1107 /* File opened and locked */ 1108 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1109 1110 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1111 return (HW_GRANTED); 1112} 1113 1114enum hwlock_status 1115unlock_hwlock(const struct file_lock *fl) 1116{ 1117 struct monfile *imf; 1118 1119 debuglog("Entering unlock_hwlock\n"); 1120 debuglog("Entering loop interation\n"); 1121 1122 /* Scan to see if filehandle already present */ 1123 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1124 if (bcmp(&fl->filehandle, &imf->filehandle, 1125 sizeof(fl->filehandle)) == 0) { 1126 /* imf is the correct filehandle */ 1127 break; 1128 } 1129 } 1130 1131 debuglog("Completed iteration. Proceeding\n"); 1132 1133 if (imf == NULL) { 1134 /* No lock found */ 1135 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1136 return (HW_DENIED_NOLOCK); 1137 } 1138 1139 /* Lock found */ 1140 --imf->refcount; 1141 1142 if (imf->refcount < 0) { 1143 debuglog("Negative hardware reference count\n"); 1144 } 1145 1146 if (imf->refcount <= 0) { 1147 close(imf->fd); 1148 LIST_REMOVE(imf, monfilelist); 1149 free(imf); 1150 } 1151 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1152 return (HW_GRANTED); 1153} 1154 1155enum hwlock_status 1156test_hwlock(fl, conflicting_fl) 1157 const struct file_lock *fl __unused; 1158 struct file_lock **conflicting_fl __unused; 1159{ 1160 1161 /* 1162 * XXX: lock tests on hardware are not required until 1163 * true partial file testing is done on the underlying file 1164 */ 1165 return (HW_RESERR); 1166} 1167 1168 1169 1170/* 1171 * Below here are routines for manipulating blocked lock requests 1172 * They should only be called from the XXX_partialfilelock routines 1173 * if at all possible 1174 */ 1175 1176void 1177add_blockingfilelock(struct file_lock *fl) 1178{ 1179 1180 debuglog("Entering add_blockingfilelock\n"); 1181 1182 /* 1183 * Clear the blocking flag so that it can be reused without 1184 * adding it to the blocking queue a second time 1185 */ 1186 1187 fl->blocking = 0; 1188 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1189 1190 debuglog("Exiting add_blockingfilelock\n"); 1191} 1192 1193void 1194remove_blockingfilelock(struct file_lock *fl) 1195{ 1196 1197 debuglog("Entering remove_blockingfilelock\n"); 1198 1199 LIST_REMOVE(fl, nfslocklist); 1200 1201 debuglog("Exiting remove_blockingfilelock\n"); 1202} 1203 1204void 1205clear_blockingfilelock(const char *hostname) 1206{ 1207 struct file_lock *ifl,*nfl; 1208 1209 /* 1210 * Normally, LIST_FOREACH is called for, but since 1211 * the current element *is* the iterator, deleting it 1212 * would mess up the iteration. Thus, a next element 1213 * must be used explicitly 1214 */ 1215 1216 ifl = LIST_FIRST(&blockedlocklist_head); 1217 1218 while (ifl != NULL) { 1219 nfl = LIST_NEXT(ifl, nfslocklist); 1220 1221 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1222 remove_blockingfilelock(ifl); 1223 deallocate_file_lock(ifl); 1224 } 1225 1226 ifl = nfl; 1227 } 1228} 1229 1230void 1231retry_blockingfilelocklist(void) 1232{ 1233 /* Retry all locks in the blocked list */ 1234 struct file_lock *ifl, *nfl, *pfl; /* Iterator */ 1235 enum partialfilelock_status pflstatus; 1236 1237 debuglog("Entering retry_blockingfilelocklist\n"); 1238 1239 pfl = NULL; 1240 ifl = LIST_FIRST(&blockedlocklist_head); 1241 debuglog("Iterator choice %p\n",ifl); 1242 1243 while (ifl != NULL) { 1244 /* 1245 * SUBTLE BUG: The next element must be worked out before the 1246 * current element has been moved 1247 */ 1248 nfl = LIST_NEXT(ifl, nfslocklist); 1249 debuglog("Iterator choice %p\n",ifl); 1250 debuglog("Prev iterator choice %p\n",pfl); 1251 debuglog("Next iterator choice %p\n",nfl); 1252 1253 /* 1254 * SUBTLE BUG: The file_lock must be removed from the 1255 * old list so that it's list pointers get disconnected 1256 * before being allowed to participate in the new list 1257 * which will automatically add it in if necessary. 1258 */ 1259 1260 LIST_REMOVE(ifl, nfslocklist); 1261 pflstatus = lock_partialfilelock(ifl); 1262 1263 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1264 debuglog("Granted blocked lock\n"); 1265 /* lock granted and is now being used */ 1266 send_granted(ifl,0); 1267 } else { 1268 /* Reinsert lock back into same place in blocked list */ 1269 debuglog("Replacing blocked lock\n"); 1270 if (pfl != NULL) 1271 LIST_INSERT_AFTER(pfl, ifl, nfslocklist); 1272 else 1273 /* ifl is the only elem. in the list */ 1274 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1275 } 1276 1277 /* Valid increment behavior regardless of state of ifl */ 1278 ifl = nfl; 1279 /* if a lock was granted incrementing pfl would make it nfl */ 1280 if (pfl != NULL && (LIST_NEXT(pfl, nfslocklist) != nfl)) 1281 pfl = LIST_NEXT(pfl, nfslocklist); 1282 else 1283 pfl = LIST_FIRST(&blockedlocklist_head); 1284 } 1285 1286 debuglog("Exiting retry_blockingfilelocklist\n"); 1287} 1288 1289/* 1290 * Below here are routines associated with manipulating all 1291 * aspects of the partial file locking system (list, hardware, etc.) 1292 */ 1293 1294/* 1295 * Please note that lock monitoring must be done at this level which 1296 * keeps track of *individual* lock requests on lock and unlock 1297 * 1298 * XXX: Split unlocking is going to make the unlock code miserable 1299 */ 1300 1301/* 1302 * lock_partialfilelock: 1303 * 1304 * Argument fl gets modified as its list housekeeping entries get modified 1305 * upon insertion into the NFS lock list 1306 * 1307 * This routine makes several assumptions: 1308 * 1) It (will) pass locks through to flock to lock the entire underlying file 1309 * and then parcel out NFS locks if it gets control of the file. 1310 * This matches the old rpc.lockd file semantics (except where it 1311 * is now more correct). It is the safe solution, but will cause 1312 * overly restrictive blocking if someone is trying to use the 1313 * underlying files without using NFS. This appears to be an 1314 * acceptable tradeoff since most people use standalone NFS servers. 1315 * XXX: The right solution is probably kevent combined with fcntl 1316 * 1317 * 2) Nothing modifies the lock lists between testing and granting 1318 * I have no idea whether this is a useful assumption or not 1319 */ 1320 1321enum partialfilelock_status 1322lock_partialfilelock(struct file_lock *fl) 1323{ 1324 enum partialfilelock_status retval; 1325 enum nfslock_status lnlstatus; 1326 enum hwlock_status hwstatus; 1327 1328 debuglog("Entering lock_partialfilelock\n"); 1329 1330 retval = PFL_DENIED; 1331 1332 /* 1333 * Execute the NFS lock first, if possible, as it is significantly 1334 * easier and less expensive to undo than the filesystem lock 1335 */ 1336 1337 lnlstatus = lock_nfslock(fl); 1338 1339 switch (lnlstatus) { 1340 case NFS_GRANTED: 1341 case NFS_GRANTED_DUPLICATE: 1342 /* 1343 * At this point, the NFS lock is allocated and active. 1344 * Remember to clean it up if the hardware lock fails 1345 */ 1346 hwstatus = lock_hwlock(fl); 1347 1348 switch (hwstatus) { 1349 case HW_GRANTED: 1350 case HW_GRANTED_DUPLICATE: 1351 debuglog("HW GRANTED\n"); 1352 /* 1353 * XXX: Fixme: Check hwstatus for duplicate when 1354 * true partial file locking and accounting is 1355 * done on the hardware 1356 */ 1357 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1358 retval = PFL_GRANTED_DUPLICATE; 1359 } else { 1360 retval = PFL_GRANTED; 1361 } 1362 monitor_lock_host(fl->client_name); 1363 break; 1364 case HW_RESERR: 1365 debuglog("HW RESERR\n"); 1366 retval = PFL_HWRESERR; 1367 break; 1368 case HW_DENIED: 1369 debuglog("HW DENIED\n"); 1370 retval = PFL_HWDENIED; 1371 break; 1372 default: 1373 debuglog("Unmatched hwstatus %d\n",hwstatus); 1374 break; 1375 } 1376 1377 if (retval != PFL_GRANTED && 1378 retval != PFL_GRANTED_DUPLICATE) { 1379 /* Clean up the NFS lock */ 1380 debuglog("Deleting trial NFS lock\n"); 1381 delete_nfslock(fl); 1382 } 1383 break; 1384 case NFS_DENIED: 1385 retval = PFL_NFSDENIED; 1386 break; 1387 case NFS_RESERR: 1388 retval = PFL_NFSRESERR; 1389 default: 1390 debuglog("Unmatched lnlstatus %d\n"); 1391 retval = PFL_NFSDENIED_NOLOCK; 1392 break; 1393 } 1394 1395 /* 1396 * By the time fl reaches here, it is completely free again on 1397 * failure. The NFS lock done before attempting the 1398 * hardware lock has been backed out 1399 */ 1400 1401 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1402 /* Once last chance to check the lock */ 1403 if (fl->blocking == 1) { 1404 /* Queue the lock */ 1405 debuglog("BLOCKING LOCK RECEIVED\n"); 1406 retval = (retval == PFL_NFSDENIED ? 1407 PFL_NFSBLOCKED : PFL_HWBLOCKED); 1408 add_blockingfilelock(fl); 1409 dump_filelock(fl); 1410 } else { 1411 /* Leave retval alone, it's already correct */ 1412 debuglog("Lock denied. Non-blocking failure\n"); 1413 dump_filelock(fl); 1414 } 1415 } 1416 1417 debuglog("Exiting lock_partialfilelock\n"); 1418 1419 return retval; 1420} 1421 1422/* 1423 * unlock_partialfilelock: 1424 * 1425 * Given a file_lock, unlock all locks which match. 1426 * 1427 * Note that a given lock might have to unlock ITSELF! See 1428 * clear_partialfilelock for example. 1429 */ 1430 1431enum partialfilelock_status 1432unlock_partialfilelock(const struct file_lock *fl) 1433{ 1434 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1435 enum partialfilelock_status retval; 1436 enum nfslock_status unlstatus; 1437 enum hwlock_status unlhwstatus, lhwstatus; 1438 1439 debuglog("Entering unlock_partialfilelock\n"); 1440 1441 selffl = NULL; 1442 lfl = NULL; 1443 rfl = NULL; 1444 releasedfl = NULL; 1445 retval = PFL_DENIED; 1446 1447 /* 1448 * There are significant overlap and atomicity issues 1449 * with partially releasing a lock. For example, releasing 1450 * part of an NFS shared lock does *not* always release the 1451 * corresponding part of the file since there is only one 1452 * rpc.lockd UID but multiple users could be requesting it 1453 * from NFS. Also, an unlock request should never allow 1454 * another process to gain a lock on the remaining parts. 1455 * ie. Always apply the new locks before releasing the 1456 * old one 1457 */ 1458 1459 /* 1460 * Loop is required since multiple little locks 1461 * can be allocated and then deallocated with one 1462 * big unlock. 1463 * 1464 * The loop is required to be here so that the nfs & 1465 * hw subsystems do not need to communicate with one 1466 * one another 1467 */ 1468 1469 do { 1470 debuglog("Value of releasedfl: %p\n",releasedfl); 1471 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1472 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1473 debuglog("Value of releasedfl: %p\n",releasedfl); 1474 1475 1476 /* XXX: This is grungy. It should be refactored to be cleaner */ 1477 if (lfl != NULL) { 1478 lhwstatus = lock_hwlock(lfl); 1479 if (lhwstatus != HW_GRANTED && 1480 lhwstatus != HW_GRANTED_DUPLICATE) { 1481 debuglog("HW duplicate lock failure for left split\n"); 1482 } 1483 monitor_lock_host(lfl->client_name); 1484 } 1485 1486 if (rfl != NULL) { 1487 lhwstatus = lock_hwlock(rfl); 1488 if (lhwstatus != HW_GRANTED && 1489 lhwstatus != HW_GRANTED_DUPLICATE) { 1490 debuglog("HW duplicate lock failure for right split\n"); 1491 } 1492 monitor_lock_host(rfl->client_name); 1493 } 1494 1495 switch (unlstatus) { 1496 case NFS_GRANTED: 1497 /* Attempt to unlock on the hardware */ 1498 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1499 1500 /* This call *MUST NOT* unlock the two newly allocated locks */ 1501 unlhwstatus = unlock_hwlock(fl); 1502 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1503 1504 switch (unlhwstatus) { 1505 case HW_GRANTED: 1506 debuglog("HW unlock granted\n"); 1507 unmonitor_lock_host(releasedfl->client_name); 1508 retval = PFL_GRANTED; 1509 break; 1510 case HW_DENIED_NOLOCK: 1511 /* Huh?!?! This shouldn't happen */ 1512 debuglog("HW unlock denied no lock\n"); 1513 retval = PFL_HWRESERR; 1514 /* Break out of do-while */ 1515 unlstatus = NFS_RESERR; 1516 break; 1517 default: 1518 debuglog("HW unlock failed\n"); 1519 retval = PFL_HWRESERR; 1520 /* Break out of do-while */ 1521 unlstatus = NFS_RESERR; 1522 break; 1523 } 1524 1525 debuglog("Exiting with status retval: %d\n",retval); 1526 1527 retry_blockingfilelocklist(); 1528 break; 1529 case NFS_DENIED_NOLOCK: 1530 retval = PFL_GRANTED; 1531 debuglog("All locks cleaned out\n"); 1532 break; 1533 default: 1534 retval = PFL_NFSRESERR; 1535 debuglog("NFS unlock failure\n"); 1536 dump_filelock(fl); 1537 break; 1538 } 1539 1540 if (releasedfl != NULL) { 1541 if (fl == releasedfl) { 1542 /* 1543 * XXX: YECHHH!!! Attempt to unlock self succeeded 1544 * but we can't deallocate the space yet. This is what 1545 * happens when you don't write malloc and free together 1546 */ 1547 debuglog("Attempt to unlock self\n"); 1548 selffl = releasedfl; 1549 } else { 1550 /* 1551 * XXX: this deallocation *still* needs to migrate closer 1552 * to the allocation code way up in get_lock or the allocation 1553 * code needs to migrate down (violation of "When you write 1554 * malloc you must write free") 1555 */ 1556 1557 deallocate_file_lock(releasedfl); 1558 } 1559 } 1560 1561 } while (unlstatus == NFS_GRANTED); 1562 1563 if (selffl != NULL) { 1564 /* 1565 * This statement wipes out the incoming file lock (fl) 1566 * in spite of the fact that it is declared const 1567 */ 1568 debuglog("WARNING! Destroying incoming lock pointer\n"); 1569 deallocate_file_lock(selffl); 1570 } 1571 1572 debuglog("Exiting unlock_partialfilelock\n"); 1573 1574 return retval; 1575} 1576 1577/* 1578 * clear_partialfilelock 1579 * 1580 * Normally called in response to statd state number change. 1581 * Wipe out all locks held by a host. As a bonus, the act of 1582 * doing so should automatically clear their statd entries and 1583 * unmonitor the host. 1584 */ 1585 1586void 1587clear_partialfilelock(const char *hostname) 1588{ 1589 struct file_lock *ifl, *nfl; 1590 1591 /* Clear blocking file lock list */ 1592 clear_blockingfilelock(hostname); 1593 1594 /* do all required unlocks */ 1595 /* Note that unlock can smash the current pointer to a lock */ 1596 1597 /* 1598 * Normally, LIST_FOREACH is called for, but since 1599 * the current element *is* the iterator, deleting it 1600 * would mess up the iteration. Thus, a next element 1601 * must be used explicitly 1602 */ 1603 1604 ifl = LIST_FIRST(&nfslocklist_head); 1605 1606 while (ifl != NULL) { 1607 nfl = LIST_NEXT(ifl, nfslocklist); 1608 1609 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1610 /* Unlock destroys ifl out from underneath */ 1611 unlock_partialfilelock(ifl); 1612 /* ifl is NO LONGER VALID AT THIS POINT */ 1613 } 1614 ifl = nfl; 1615 } 1616} 1617 1618/* 1619 * test_partialfilelock: 1620 */ 1621enum partialfilelock_status 1622test_partialfilelock(const struct file_lock *fl, 1623 struct file_lock **conflicting_fl) 1624{ 1625 enum partialfilelock_status retval; 1626 enum nfslock_status teststatus; 1627 1628 debuglog("Entering testpartialfilelock...\n"); 1629 1630 retval = PFL_DENIED; 1631 1632 teststatus = test_nfslock(fl, conflicting_fl); 1633 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1634 1635 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1636 /* XXX: Add the underlying filesystem locking code */ 1637 retval = (teststatus == NFS_GRANTED) ? 1638 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1639 debuglog("Dumping locks...\n"); 1640 dump_filelock(fl); 1641 dump_filelock(*conflicting_fl); 1642 debuglog("Done dumping locks...\n"); 1643 } else { 1644 retval = PFL_NFSDENIED; 1645 debuglog("NFS test denied.\n"); 1646 dump_filelock(fl); 1647 debuglog("Conflicting.\n"); 1648 dump_filelock(*conflicting_fl); 1649 } 1650 1651 debuglog("Exiting testpartialfilelock...\n"); 1652 1653 return retval; 1654} 1655 1656/* 1657 * Below here are routines associated with translating the partial file locking 1658 * codes into useful codes to send back to the NFS RPC messaging system 1659 */ 1660 1661/* 1662 * These routines translate the (relatively) useful return codes back onto 1663 * the few return codes which the nlm subsystems wishes to trasmit 1664 */ 1665 1666enum nlm_stats 1667do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1668{ 1669 enum partialfilelock_status pfsret; 1670 enum nlm_stats retval; 1671 1672 debuglog("Entering do_test...\n"); 1673 1674 pfsret = test_partialfilelock(fl,conflicting_fl); 1675 1676 switch (pfsret) { 1677 case PFL_GRANTED: 1678 debuglog("PFL test lock granted\n"); 1679 dump_filelock(fl); 1680 dump_filelock(*conflicting_fl); 1681 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1682 break; 1683 case PFL_GRANTED_DUPLICATE: 1684 debuglog("PFL test lock granted--duplicate id detected\n"); 1685 dump_filelock(fl); 1686 dump_filelock(*conflicting_fl); 1687 debuglog("Clearing conflicting_fl for call semantics\n"); 1688 *conflicting_fl = NULL; 1689 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1690 break; 1691 case PFL_NFSDENIED: 1692 case PFL_HWDENIED: 1693 debuglog("PFL test lock denied\n"); 1694 dump_filelock(fl); 1695 dump_filelock(*conflicting_fl); 1696 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1697 break; 1698 case PFL_NFSRESERR: 1699 case PFL_HWRESERR: 1700 debuglog("PFL test lock resource fail\n"); 1701 dump_filelock(fl); 1702 dump_filelock(*conflicting_fl); 1703 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1704 break; 1705 default: 1706 debuglog("PFL test lock *FAILED*\n"); 1707 dump_filelock(fl); 1708 dump_filelock(*conflicting_fl); 1709 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1710 break; 1711 } 1712 1713 debuglog("Exiting do_test...\n"); 1714 1715 return retval; 1716} 1717 1718/* 1719 * do_lock: Try to acquire a lock 1720 * 1721 * This routine makes a distinction between NLM versions. I am pretty 1722 * convinced that this should be abstracted out and bounced up a level 1723 */ 1724 1725enum nlm_stats 1726do_lock(struct file_lock *fl) 1727{ 1728 enum partialfilelock_status pfsret; 1729 enum nlm_stats retval; 1730 1731 debuglog("Entering do_lock...\n"); 1732 1733 pfsret = lock_partialfilelock(fl); 1734 1735 switch (pfsret) { 1736 case PFL_GRANTED: 1737 debuglog("PFL lock granted"); 1738 dump_filelock(fl); 1739 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1740 break; 1741 case PFL_GRANTED_DUPLICATE: 1742 debuglog("PFL lock granted--duplicate id detected"); 1743 dump_filelock(fl); 1744 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1745 break; 1746 case PFL_NFSDENIED: 1747 case PFL_HWDENIED: 1748 debuglog("PFL_NFS lock denied"); 1749 dump_filelock(fl); 1750 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1751 break; 1752 case PFL_NFSBLOCKED: 1753 case PFL_HWBLOCKED: 1754 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1755 dump_filelock(fl); 1756 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1757 break; 1758 case PFL_NFSRESERR: 1759 case PFL_HWRESERR: 1760 debuglog("PFL lock resource alocation fail\n"); 1761 dump_filelock(fl); 1762 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1763 break; 1764 default: 1765 debuglog("PFL lock *FAILED*"); 1766 dump_filelock(fl); 1767 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1768 break; 1769 } 1770 1771 debuglog("Exiting do_lock...\n"); 1772 1773 return retval; 1774} 1775 1776enum nlm_stats 1777do_unlock(struct file_lock *fl) 1778{ 1779 enum partialfilelock_status pfsret; 1780 enum nlm_stats retval; 1781 1782 debuglog("Entering do_unlock...\n"); 1783 pfsret = unlock_partialfilelock(fl); 1784 1785 switch (pfsret) { 1786 case PFL_GRANTED: 1787 debuglog("PFL unlock granted"); 1788 dump_filelock(fl); 1789 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1790 break; 1791 case PFL_NFSDENIED: 1792 case PFL_HWDENIED: 1793 debuglog("PFL_NFS unlock denied"); 1794 dump_filelock(fl); 1795 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1796 break; 1797 case PFL_NFSDENIED_NOLOCK: 1798 case PFL_HWDENIED_NOLOCK: 1799 debuglog("PFL_NFS no lock found\n"); 1800 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1801 break; 1802 case PFL_NFSRESERR: 1803 case PFL_HWRESERR: 1804 debuglog("PFL unlock resource failure"); 1805 dump_filelock(fl); 1806 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1807 break; 1808 default: 1809 debuglog("PFL unlock *FAILED*"); 1810 dump_filelock(fl); 1811 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1812 break; 1813 } 1814 1815 debuglog("Exiting do_unlock...\n"); 1816 1817 return retval; 1818} 1819 1820/* 1821 * do_clear 1822 * 1823 * This routine is non-existent because it doesn't have a return code. 1824 * It is here for completeness in case someone *does* need to do return 1825 * codes later. A decent compiler should optimize this away. 1826 */ 1827 1828void 1829do_clear(const char *hostname) 1830{ 1831 1832 clear_partialfilelock(hostname); 1833} 1834 1835/* 1836 * The following routines are all called from the code which the 1837 * RPC layer invokes 1838 */ 1839 1840/* 1841 * testlock(): inform the caller if the requested lock would be granted 1842 * 1843 * returns NULL if lock would granted 1844 * returns pointer to a conflicting nlm4_holder if not 1845 */ 1846 1847struct nlm4_holder * 1848testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) 1849{ 1850 struct file_lock test_fl, *conflicting_fl; 1851 1852 bzero(&test_fl, sizeof(test_fl)); 1853 1854 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1855 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1856 1857 siglock(); 1858 do_test(&test_fl, &conflicting_fl); 1859 1860 if (conflicting_fl == NULL) { 1861 debuglog("No conflicting lock found\n"); 1862 sigunlock(); 1863 return NULL; 1864 } else { 1865 debuglog("Found conflicting lock\n"); 1866 dump_filelock(conflicting_fl); 1867 sigunlock(); 1868 return (&conflicting_fl->client); 1869 } 1870} 1871 1872/* 1873 * getlock: try to aquire the lock. 1874 * If file is already locked and we can sleep, put the lock in the list with 1875 * status LKST_WAITING; it'll be processed later. 1876 * Otherwise try to lock. If we're allowed to block, fork a child which 1877 * will do the blocking lock. 1878 */ 1879 1880enum nlm_stats 1881getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1882{ 1883 struct file_lock *newfl; 1884 enum nlm_stats retval; 1885 1886 debuglog("Entering getlock...\n"); 1887 1888 if (grace_expired == 0 && lckarg->reclaim == 0) 1889 return (flags & LOCK_V4) ? 1890 nlm4_denied_grace_period : nlm_denied_grace_period; 1891 1892 /* allocate new file_lock for this request */ 1893 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie); 1894 if (newfl == NULL) { 1895 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1896 /* failed */ 1897 return (flags & LOCK_V4) ? 1898 nlm4_denied_nolocks : nlm_denied_nolocks; 1899 } 1900 1901 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1902 debuglog("recieved fhandle size %d, local size %d", 1903 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1904 } 1905 1906 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1907 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, 1908 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1909 lckarg->alock.l_len, 1910 lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block); 1911 1912 /* 1913 * newfl is now fully constructed and deallocate_file_lock 1914 * can now be used to delete it 1915 */ 1916 1917 siglock(); 1918 debuglog("Pointer to new lock is %p\n",newfl); 1919 1920 retval = do_lock(newfl); 1921 1922 debuglog("Pointer to new lock is %p\n",newfl); 1923 sigunlock(); 1924 1925 switch (retval) 1926 { 1927 case nlm4_granted: 1928 /* case nlm_granted: is the same as nlm4_granted */ 1929 /* do_mon(lckarg->alock.caller_name); */ 1930 break; 1931 case nlm4_blocked: 1932 /* case nlm_blocked: is the same as nlm4_blocked */ 1933 /* do_mon(lckarg->alock.caller_name); */ 1934 break; 1935 default: 1936 deallocate_file_lock(newfl); 1937 break; 1938 } 1939 1940 debuglog("Exiting getlock...\n"); 1941 1942 return retval; 1943} 1944 1945 1946/* unlock a filehandle */ 1947enum nlm_stats 1948unlock(nlm4_lock *lock, const int flags __unused) 1949{ 1950 struct file_lock fl; 1951 enum nlm_stats err; 1952 1953 siglock(); 1954 1955 debuglog("Entering unlock...\n"); 1956 1957 bzero(&fl,sizeof(struct file_lock)); 1958 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 1959 1960 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 1961 1962 err = do_unlock(&fl); 1963 1964 sigunlock(); 1965 1966 debuglog("Exiting unlock...\n"); 1967 1968 return err; 1969} 1970 1971/* 1972 * XXX: The following monitor/unmonitor routines 1973 * have not been extensively tested (ie. no regression 1974 * script exists like for the locking sections 1975 */ 1976 1977/* 1978 * monitor_lock_host: monitor lock hosts locally with a ref count and 1979 * inform statd 1980 */ 1981void 1982monitor_lock_host(const char *hostname) 1983{ 1984 struct host *ihp, *nhp; 1985 struct mon smon; 1986 struct sm_stat_res sres; 1987 int rpcret, statflag; 1988 1989 rpcret = 0; 1990 statflag = 0; 1991 1992 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 1993 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 1994 /* Host is already monitored, bump refcount */ 1995 ++ihp->refcnt; 1996 /* Host should only be in the monitor list once */ 1997 return; 1998 } 1999 } 2000 2001 /* Host is not yet monitored, add it */ 2002 nhp = malloc(sizeof(struct host)); 2003 2004 if (nhp == NULL) { 2005 debuglog("Unable to allocate entry for statd mon\n"); 2006 return; 2007 } 2008 2009 /* Allocated new host entry, now fill the fields */ 2010 strncpy(nhp->name, hostname, SM_MAXSTRLEN); 2011 nhp->refcnt = 1; 2012 debuglog("Locally Monitoring host %16s\n",hostname); 2013 2014 debuglog("Attempting to tell statd\n"); 2015 2016 bzero(&smon,sizeof(smon)); 2017 2018 smon.mon_id.mon_name = nhp->name; 2019 smon.mon_id.my_id.my_name = "localhost\0"; 2020 2021 smon.mon_id.my_id.my_prog = NLM_PROG; 2022 smon.mon_id.my_id.my_vers = NLM_SM; 2023 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2024 2025 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon, 2026 &smon, xdr_sm_stat_res, &sres); 2027 2028 if (rpcret == 0) { 2029 if (sres.res_stat == stat_fail) { 2030 debuglog("Statd call failed\n"); 2031 statflag = 0; 2032 } else { 2033 statflag = 1; 2034 } 2035 } else { 2036 debuglog("Rpc call to statd failed with return value: %d\n", 2037 rpcret); 2038 statflag = 0; 2039 } 2040 2041 if (statflag == 1) { 2042 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2043 } else { 2044 free(nhp); 2045 } 2046 2047} 2048 2049/* 2050 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2051 */ 2052void 2053unmonitor_lock_host(const char *hostname) 2054{ 2055 struct host *ihp; 2056 struct mon_id smon_id; 2057 struct sm_stat smstat; 2058 int rpcret; 2059 2060 rpcret = 0; 2061 2062 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2063 ihp=LIST_NEXT(ihp, hostlst)) { 2064 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2065 /* Host is monitored, bump refcount */ 2066 --ihp->refcnt; 2067 /* Host should only be in the monitor list once */ 2068 break; 2069 } 2070 } 2071 2072 if (ihp == NULL) { 2073 debuglog("Could not find host %16s in mon list\n", hostname); 2074 return; 2075 } 2076 2077 if (ihp->refcnt > 0) 2078 return; 2079 2080 if (ihp->refcnt < 0) { 2081 debuglog("Negative refcount!: %d\n", 2082 ihp->refcnt); 2083 } 2084 2085 debuglog("Attempting to unmonitor host %16s\n", hostname); 2086 2087 bzero(&smon_id,sizeof(smon_id)); 2088 2089 smon_id.mon_name = hostname; 2090 smon_id.my_id.my_name = "localhost"; 2091 smon_id.my_id.my_prog = NLM_PROG; 2092 smon_id.my_id.my_vers = NLM_SM; 2093 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2094 2095 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon, 2096 &smon_id, xdr_sm_stat_res, &smstat); 2097 2098 if (rpcret != 0) { 2099 debuglog("Rpc call to unmonitor statd failed with " 2100 " return value: %d\n", rpcret); 2101 } 2102 2103 LIST_REMOVE(ihp, hostlst); 2104 free(ihp); 2105} 2106 2107/* 2108 * notify: Clear all locks from a host if statd complains 2109 * 2110 * XXX: This routine has not been thoroughly tested. However, neither 2111 * had the old one been. It used to compare the statd crash state counter 2112 * to the current lock state. The upshot of this was that it basically 2113 * cleared all locks from the specified host 99% of the time (with the 2114 * other 1% being a bug). Consequently, the assumption is that clearing 2115 * all locks from a host when notified by statd is acceptable. 2116 * 2117 * Please note that this routine skips the usual level of redirection 2118 * through a do_* type routine. This introduces a possible level of 2119 * error and might better be written as do_notify and take this one out. 2120 2121 */ 2122 2123void 2124notify(const char *hostname, const int state) 2125{ 2126 debuglog("notify from %s, new state %d", hostname, state); 2127 2128 siglock(); 2129 do_clear(hostname); 2130 sigunlock(); 2131 2132 debuglog("Leaving notify\n"); 2133} 2134 2135void 2136send_granted(fl, opcode) 2137 struct file_lock *fl; 2138 int opcode __unused; 2139{ 2140 CLIENT *cli; 2141 static char dummy; 2142 struct timeval timeo; 2143 int success; 2144 static struct nlm_res retval; 2145 static struct nlm4_res retval4; 2146 2147 debuglog("About to send granted on blocked lock\n"); 2148 sleep(1); 2149 debuglog("Blowing off return send\n"); 2150 2151 cli = get_client(fl->addr, 2152 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2153 if (cli == NULL) { 2154 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2155 fl->client_name); 2156 /* 2157 * We fail to notify remote that the lock has been granted. 2158 * The client will timeout and retry, the lock will be 2159 * granted at this time. 2160 */ 2161 return; 2162 } 2163 timeo.tv_sec = 0; 2164 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2165 2166 if (fl->flags & LOCK_V4) { 2167 static nlm4_testargs res; 2168 res.cookie = fl->client_cookie; 2169 res.exclusive = fl->client.exclusive; 2170 res.alock.caller_name = fl->client_name; 2171 res.alock.fh.n_len = sizeof(fhandle_t); 2172 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2173 res.alock.oh = fl->client.oh; 2174 res.alock.svid = fl->client.svid; 2175 res.alock.l_offset = fl->client.l_offset; 2176 res.alock.l_len = fl->client.l_len; 2177 debuglog("sending v4 reply%s", 2178 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2179 if (fl->flags & LOCK_ASYNC) { 2180 success = clnt_call(cli, NLM4_GRANTED_MSG, 2181 xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo); 2182 } else { 2183 success = clnt_call(cli, NLM4_GRANTED, 2184 xdr_nlm4_testargs, &res, xdr_nlm4_res, 2185 &retval4, timeo); 2186 } 2187 } else { 2188 static nlm_testargs res; 2189 2190 res.cookie = fl->client_cookie; 2191 res.exclusive = fl->client.exclusive; 2192 res.alock.caller_name = fl->client_name; 2193 res.alock.fh.n_len = sizeof(fhandle_t); 2194 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2195 res.alock.oh = fl->client.oh; 2196 res.alock.svid = fl->client.svid; 2197 res.alock.l_offset = fl->client.l_offset; 2198 res.alock.l_len = fl->client.l_len; 2199 debuglog("sending v1 reply%s", 2200 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2201 if (fl->flags & LOCK_ASYNC) { 2202 success = clnt_call(cli, NLM_GRANTED_MSG, 2203 xdr_nlm_testargs, &res, xdr_void, &dummy, timeo); 2204 } else { 2205 success = clnt_call(cli, NLM_GRANTED, 2206 xdr_nlm_testargs, &res, xdr_nlm_res, 2207 &retval, timeo); 2208 } 2209 } 2210 if (debug_level > 2) 2211 debuglog("clnt_call returns %d(%s) for granted", 2212 success, clnt_sperrno(success)); 2213 2214} 2215 2216/* 2217 * Routines below here have not been modified in the overhaul 2218 */ 2219 2220/* 2221 * Are these two routines still required since lockd is not spawning off 2222 * children to service locks anymore? Presumably they were originally 2223 * put in place to prevent a one child from changing the lock list out 2224 * from under another one. 2225 */ 2226 2227void 2228siglock(void) 2229{ 2230 sigset_t block; 2231 2232 sigemptyset(&block); 2233 sigaddset(&block, SIGCHLD); 2234 2235 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2236 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2237 } 2238} 2239 2240void 2241sigunlock(void) 2242{ 2243 sigset_t block; 2244 2245 sigemptyset(&block); 2246 sigaddset(&block, SIGCHLD); 2247 2248 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2249 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2250 } 2251} 2252 2253 2254