• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/kern/

Lines Matching refs:lock

66 #include <sys/lock.h>
89 void lf_print(const char *tag, struct lockf *lock);
90 void lf_printlist(const char *tag, struct lockf *lock);
116 * Overlapping lock states
152 * lock operation to be attempted.
175 struct lockf *lock;
189 LOCKF_DEBUG(0, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context)->p_comm);
260 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
261 if (lock == NULL)
263 lock->lf_start = start;
264 lock->lf_end = end;
265 lock->lf_id = ap->a_id;
266 lock->lf_vnode = vp;
267 lock->lf_type = fl->l_type;
268 lock->lf_head = head;
269 lock->lf_next = (struct lockf *)0;
270 lock->lf_waiters = 0;
271 TAILQ_INIT(&lock->lf_blkhd);
272 lock->lf_flags = ap->a_flags;
275 lock->lf_flags |= F_WAKE1_SAFE;
283 error = lf_setlock(lock);
287 error = lf_clearlock(lock);
288 FREE(lock, M_LOCKF);
292 error = lf_getlock(lock, fl);
293 FREE(lock, M_LOCKF);
297 FREE(lock, M_LOCKF);
311 * Description: Helper function: when setting a lock, coelesce adjacent
315 * Parameters: lock The new lock which may be adjacent
322 lf_coelesce_adjacent(struct lockf *lock)
324 struct lockf **lf = lock->lf_head;
328 if ((*lf == lock) ||
329 ((*lf)->lf_id != lock->lf_id) ||
330 ((*lf)->lf_type != lock->lf_type)) {
335 /* If the lock ends adjacent to us, we can coelesce it */
337 ((*lf)->lf_end + 1) == lock->lf_start) {
341 lock->lf_start = (*lf)->lf_start;
342 *lf = lock;
347 /* If the lock starts adjacent to us, we can coelesce it */
348 if (lock->lf_end != -1 &&
349 (lock->lf_end + 1) == (*lf)->lf_start) {
353 lock->lf_end = (*lf)->lf_end;
354 lock->lf_next = (*lf)->lf_next;
355 lf = &lock->lf_next;
360 /* no matching conditions; go on to next lock */
369 * Description: Set a byte-range lock.
371 * Parameters: lock The lock structure describing the lock
373 * will be linked into the lock list if
384 * Notes: We add the lock to the provisional lock list. We do not
385 * coelesce at this time; this has implications for other lock
389 lf_setlock(struct lockf *lock)
392 struct lockf **head = lock->lf_head;
396 struct vnode *vp = lock->lf_vnode;
401 lf_print("lf_setlock", lock);
402 lf_printlist("lf_setlock(in)", lock);
410 if (lock->lf_type == F_WRLCK)
414 * Scan lock list for this file looking for locks that would block us.
416 while ((block = lf_getblock(lock))) {
420 if ((lock->lf_flags & F_WAIT) == 0) {
421 FREE(lock, M_LOCKF);
435 if ((lock->lf_flags & F_POSIX) &&
451 * cycle to see if the lock is blocked behind
459 * Get the lock blocking the lock
473 * lock and not an overall file lock;
474 * if we mix lock types, it's our own
481 * If the owner of the lock that's
482 * blocking a lock that's blocking us
483 * getting the requested lock, then we
487 if (bproc == (struct proc *)lock->lf_id) {
489 FREE(lock, M_LOCKF);
500 * waiting for an exclusive lock.
502 if ((lock->lf_flags & F_FLOCK) &&
503 lock->lf_type == F_WRLCK) {
504 lock->lf_type = F_UNLCK;
505 if ((error = lf_clearlock(lock)) != 0) {
506 FREE(lock, M_LOCKF);
509 lock->lf_type = F_WRLCK;
512 * Add our lock to the blocked list and sleep until we're free.
515 lock->lf_next = block;
516 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
519 if ( !(lock->lf_flags & F_FLOCK))
528 error = msleep(lock, &vp->v_lock, priority, lockstr, 0);
530 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
533 if ((block = lf_getblock(lock))) {
534 TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
537 TAILQ_CONCAT(&block->lf_blkhd, &lock->lf_blkhd, lf_block);
539 block->lf_waiters += lock->lf_waiters;
540 lock->lf_waiters = 0;
548 * process releasing a lock (in which case we have
552 if (lock->lf_next) {
553 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
554 lock->lf_next->lf_waiters--;
555 lock->lf_next = NOLOCKF;
557 if (!TAILQ_EMPTY(&lock->lf_blkhd))
558 lf_wakelock(lock, TRUE);
560 FREE(lock, M_LOCKF);
565 * No blocks!! Add the lock. Note that we will
576 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
582 * 1) overlap == lock
583 * 2) overlap contains lock
584 * 3) lock contains overlap
585 * 4) overlap starts before lock
586 * 5) overlap ends after lock
591 *prev = lock;
592 lock->lf_next = overlap;
598 * If downgrading lock, others may be
601 if (lock->lf_type == F_RDLCK &&
604 overlap->lf_type = lock->lf_type;
605 FREE(lock, M_LOCKF);
606 lock = overlap; /* for lf_coelesce_adjacent() */
613 if (overlap->lf_type == lock->lf_type) {
614 FREE(lock, M_LOCKF);
615 lock = overlap; /* for lf_coelesce_adjacent() */
618 if (overlap->lf_start == lock->lf_start) {
619 *prev = lock;
620 lock->lf_next = overlap;
621 overlap->lf_start = lock->lf_end + 1;
624 * If we can't split the lock, we can't
628 if (lf_split(overlap, lock)) {
629 FREE(lock, M_LOCKF);
638 * If downgrading lock, others may be able to
641 if (lock->lf_type == F_RDLCK &&
651 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
653 lock->lf_waiters++;
655 ltmp->lf_next = lock;
659 * Add the new lock if necessary and delete the overlap.
662 *prev = lock;
663 lock->lf_next = overlap->lf_next;
664 prev = &lock->lf_next;
673 * Add lock after overlap on the list.
675 lock->lf_next = overlap->lf_next;
676 overlap->lf_next = lock;
677 overlap->lf_end = lock->lf_start - 1;
678 prev = &lock->lf_next;
685 * Add the new lock before overlap.
688 *prev = lock;
689 lock->lf_next = overlap;
691 overlap->lf_start = lock->lf_end + 1;
698 lf_coelesce_adjacent(lock);
701 lf_print("lf_setlock: got the lock", lock);
702 lf_printlist("lf_setlock(out)", lock);
712 * Description: Remove a byte-range lock on an vnode. Generally, find the
713 * lock (or an overlap to that lock) and remove it (or shrink
716 * Parameters: unlock The lock to clear
763 * If we can't split the lock, we can't grant it.
800 * Description: Check whether there is a blocking lock, and if so return
801 * its process identifier into the lock being requested.
803 * Parameters: lock Pointer to lock to test for blocks
805 * the blocking lock information, if a
806 * blocking lock is found.
812 * blocking lock, if one is found; not
819 lf_getlock(struct lockf *lock, struct flock *fl)
825 lf_print("lf_getlock", lock);
828 if ((block = lf_getblock(lock))) {
851 * blocking lock. A lock is considered blocking if we are not
852 * the lock owner; otherwise, we are permitted to upgrade or
855 * Parameters: lock The lock for which we are interested
856 * in obtaining the blocking lock, if any
858 * Returns: NOLOCKF No blocking lock exists
859 * !NOLOCKF The address of the blocking lock's
863 lf_getblock(struct lockf *lock)
865 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
868 prev = lock->lf_head;
869 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) != OVERLAP_NONE) {
873 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
888 * Description: Walk the list of locks to find an overlapping lock (if any).
890 * Parameters: lf First lock on lock list
891 * lock The lock we are checking for an overlap
894 * address of pointer to previous lock
895 * pointer to overlapping lock, if overlap
897 * of overlapping lock
908 * lock previous to the overlapping lock;
910 * lock list, avoiding a second iteration.
911 * *overlap The pointer to the overlapping lock
914 * caller to modify the overlapping lock,
917 * Note: This returns only the FIRST overlapping lock. There may be
918 * more than one. lf_getlock will return the first blocking lock,
924 * we can report a blocking lock on an F_GETLK request.
927 * no overlapping lock found; always check the return code.
930 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
940 lf_print("lf_findoverlap: looking for overlap in", lock);
942 start = lock->lf_start;
943 end = lock->lf_end;
945 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
946 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
969 LOCKF_DEBUG(2, "overlap == lock\n");
975 LOCKF_DEBUG(2, "overlap contains lock\n");
981 LOCKF_DEBUG(2, "lock contains overlap\n");
986 LOCKF_DEBUG(2, "overlap starts before lock\n");
992 LOCKF_DEBUG(2, "overlap ends after lock\n");
1004 * Description: Split a lock and a contained region into two or three locks
1008 * lock2 Overlapping lock region requiring the
1012 * ENOLCK No memory for new lock
1015 * *lock1 Modified original lock
1016 * *lock2 Overlapping lock (inserted into list)
1017 * (new lock) Potential new lock inserted into list
1022 * lock; in that case, neither of the locks will be modified.
1050 * Make a new lock consisting of the last part of
1051 * the encompassing lock
1075 * waiting on the lock may now be able to acquire it.
1134 * Print out a lock; lock information is prefixed by the string in 'tag'
1137 * lock The lock whose information should be
1143 lf_print(const char *tag, struct lockf *lock)
1145 printf("%s: lock %p for ", tag, (void *)lock);
1146 if (lock->lf_flags & F_POSIX)
1147 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
1149 printf("id %p", (void *)lock->lf_id);
1150 if (lock->lf_vnode != 0)
1152 lock->lf_vnode,
1153 lock->lf_type == F_RDLCK ? "shared" :
1154 lock->lf_type == F_WRLCK ? "exclusive" :
1155 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1156 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1159 lock->lf_type == F_RDLCK ? "shared" :
1160 lock->lf_type == F_WRLCK ? "exclusive" :
1161 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1162 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1163 if (!TAILQ_EMPTY(&lock->lf_blkhd))
1164 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1173 * Print out a lock list for the vnode associated with 'lock'; lock information
1177 * lock The lock whose vnode's lock list should
1183 lf_printlist(const char *tag, struct lockf *lock)
1187 if (lock->lf_vnode == 0)
1191 tag, lock->lf_vnode);
1192 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {