• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/ap/gpl/timemachine/netatalk-2.2.0/libatalk/adouble/
1/*
2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
4 *
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
10 *
11 * TODO: fix the race when reading/writing.
12 *       keep a pool of both locks and reference counters around so that
13 *       we can save on mallocs. we should also use a tree to keep things
14 *       sorted.
15 */
16
17#ifdef HAVE_CONFIG_H
18#include "config.h"
19#endif /* HAVE_CONFIG_H */
20
21#include <atalk/adouble.h>
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <errno.h>
26
27#include <string.h>
28
29#include "ad_private.h"
30
31/* translate between ADLOCK styles and specific locking mechanisms */
32#define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
33((type) == ADLOCK_WR ? LOCK_EX : \
34 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
35
36#ifdef DISABLE_LOCKING
37#define fcntl(a, b, c ) (0)
38#endif
39
40/* ----------------------- */
41static int set_lock(int fd, int cmd,  struct flock *lock)
42{
43  if (fd == -2) {
44      /* We assign fd = -2 for symlinks -> do nothing */
45      if (cmd == F_GETLK)
46	    lock->l_type = F_UNLCK;
47      return 0;
48  }
49  return fcntl(fd, cmd, lock);
50}
51
52/* ----------------------- */
53static int XLATE_FCNTL_LOCK(int type)
54{
55    switch(type) {
56    case ADLOCK_RD:
57        return F_RDLCK;
58    case ADLOCK_WR:
59         return F_WRLCK;
60    case ADLOCK_CLR:
61         return F_UNLCK;
62    }
63    return -1;
64}
65
66/* ----------------------- */
67static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
68{
69 return (!alen && a <= b) ||
70	(!blen && b <= a) ||
71	( (a + alen > b) && (b + blen > a) );
72}
73
74/* allocation for lock regions. we allocate aggressively and shrink
75 * only in large chunks. */
76#define ARRAY_BLOCK_SIZE 10
77#define ARRAY_FREE_DELTA 100
78
79/* remove a lock and compact space if necessary */
80static void adf_freelock(struct ad_fd *ad, const int i)
81{
82    adf_lock_t *lock = ad->adf_lock + i;
83
84    if (--(*lock->refcount) < 1) {
85	free(lock->refcount);
86	if (!ad->adf_excl) {
87	    lock->lock.l_type = F_UNLCK;
88	    set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
89	}
90    }
91
92    ad->adf_lockcount--;
93
94    /* move another lock into the empty space */
95    if (i < ad->adf_lockcount) {
96        memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
97    }
98
99    /* free extra cruft if we go past a boundary. we always want to
100     * keep at least some stuff around for allocations. this wastes
101     * a bit of space to save time on reallocations. */
102    if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
103	(ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
104	    struct adf_lock_t *tmp;
105
106	    tmp = (struct adf_lock_t *)
107		    realloc(ad->adf_lock, sizeof(adf_lock_t)*
108			    (ad->adf_lockcount + ARRAY_FREE_DELTA));
109	    if (tmp) {
110		ad->adf_lock = tmp;
111		ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
112	    }
113    }
114}
115
116
117/* this needs to deal with the following cases:
118 * 1) fork is the only user of the lock
119 * 2) fork shares a read lock with another open fork
120 *
121 * i converted to using arrays of locks. everytime a lock
122 * gets removed, we shift all of the locks down.
123 */
124static void adf_unlock(struct ad_fd *ad, const int fork)
125{
126    adf_lock_t *lock = ad->adf_lock;
127    int i;
128
129    for (i = 0; i < ad->adf_lockcount; i++) {
130
131      if (lock[i].user == fork) {
132	/* we're really going to delete this lock. note: read locks
133           are the only ones that allow refcounts > 1 */
134	 adf_freelock(ad, i);
135	 i--; /* we shifted things down, so we need to backtrack */
136	 /* unlikely but realloc may have change adf_lock */
137	 lock = ad->adf_lock;
138      }
139    }
140}
141
142/* relock any byte lock that overlaps off/len. unlock everything
143 * else. */
144static void adf_relockrange(struct ad_fd *ad, int fd,
145				       const off_t off, const off_t len)
146{
147    adf_lock_t *lock = ad->adf_lock;
148    int i;
149
150    if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
151      if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
152	set_lock(fd, F_SETLK, &lock[i].lock);
153    }
154}
155
156
157/* find a byte lock that overlaps off/len for a particular open fork */
158static int adf_findlock(struct ad_fd *ad,
159				   const int fork, const int type,
160				   const off_t off,
161				   const off_t len)
162{
163  adf_lock_t *lock = ad->adf_lock;
164  int i;
165
166  for (i = 0; i < ad->adf_lockcount; i++) {
167    if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
168	((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
169	(lock[i].user == fork) &&
170	OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
171      return i;
172    }
173  }
174
175  return -1;
176}
177
178
179/* search other fork lock lists */
180static int adf_findxlock(struct ad_fd *ad,
181				     const int fork, const int type,
182				     const off_t off,
183				     const off_t len)
184{
185  adf_lock_t *lock = ad->adf_lock;
186  int i;
187
188  for (i = 0; i < ad->adf_lockcount; i++) {
189    if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
190	 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
191	(lock[i].user != fork) &&
192	OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
193	    return i;
194  }
195  return -1;
196}
197
198/* okay, this needs to do the following:
199 * 1) check current list of locks. error on conflict.
200 * 2) apply the lock. error on conflict with another process.
201 * 3) update the list of locks this file has.
202 *
203 * NOTE: this treats synchronization locks a little differently. we
204 *       do the following things for those:
205 *       1) if the header file exists, all the locks go in the beginning
206 *          of that.
207 *       2) if the header file doesn't exist, we stick the locks
208 *          in the locations specified by AD_FILELOCK_RD/WR.
209 */
210#define LOCK_DATA_WR (0)
211#define LOCK_DATA_RD (1)
212#define LOCK_RSRC_WR (2)
213#define LOCK_RSRC_RD (3)
214
215#define LOCK_RSRC_DRD (4)
216#define LOCK_RSRC_DWR (5)
217#define LOCK_DATA_DRD (6)
218#define LOCK_DATA_DWR (7)
219
220#define LOCK_RSRC_NONE (8)
221#define LOCK_DATA_NONE (9)
222
223/* --------------
224	translate a data fork lock to an offset
225*/
226
227static off_t df2off(int off)
228{
229int start = off;
230	if (off == AD_FILELOCK_OPEN_WR)
231		start = LOCK_DATA_WR;
232	else if (off == AD_FILELOCK_OPEN_RD)
233		start = LOCK_DATA_RD;
234    else if (off == AD_FILELOCK_DENY_RD)
235		start = LOCK_DATA_DRD;
236	else if (off == AD_FILELOCK_DENY_WR)
237		start = LOCK_DATA_DWR;
238	else if (off == AD_FILELOCK_OPEN_NONE)
239		start = LOCK_DATA_NONE;
240	return start;
241}
242
243/* --------------
244	translate a resource fork lock to an offset
245*/
246
247static off_t hf2off(int off)
248{
249int start = off;
250	if (off == AD_FILELOCK_OPEN_WR)
251		start = LOCK_RSRC_WR;
252	else if (off == AD_FILELOCK_OPEN_RD)
253		start = LOCK_RSRC_RD;
254    else if (off == AD_FILELOCK_DENY_RD)
255		start = LOCK_RSRC_DRD;
256	else if (off == AD_FILELOCK_DENY_WR)
257		start = LOCK_RSRC_DWR;
258	else if (off == AD_FILELOCK_OPEN_NONE)
259		start = LOCK_RSRC_NONE;
260	return start;
261}
262
263/* ------------------ */
264int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
265		  const off_t off, const off_t len, const int fork)
266{
267  struct flock lock;
268  struct ad_fd *adf;
269  adf_lock_t *adflock;
270  int oldlock;
271  int i;
272  int type;
273
274  lock.l_start = off;
275  type = locktype;
276  if (eid == ADEID_DFORK) {
277    adf = &ad->ad_data_fork;
278    if ((type & ADLOCK_FILELOCK)) {
279        if (ad_meta_fileno(ad) != -1) { /* META */
280            adf = ad->ad_md;
281            lock.l_start = df2off(off);
282        }
283    }
284  } else { /* rfork */
285    if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
286        /* there's no meta data. return a lock error
287         * otherwise if a second process is able to create it
288         * locks are a mess.
289         */
290        errno = EACCES;
291        return -1;
292    }
293    if (type & ADLOCK_FILELOCK) {
294      adf = ad->ad_md;			/* either resource or meta data (set in ad_open) */
295      lock.l_start = hf2off(off);
296    }
297    else {
298      /* we really want the resource fork it's a byte lock */
299      adf = &ad->ad_resource_fork;
300      lock.l_start += ad_getentryoff(ad, eid);
301    }
302  }
303  /* NOTE: we can't write lock a read-only file. on those, we just
304    * make sure that we have a read lock set. that way, we at least prevent
305    * someone else from really setting a deny read/write on the file.
306    */
307  if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
308      type = (type & ~ADLOCK_WR) | ADLOCK_RD;
309  }
310
311  lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
312  lock.l_whence = SEEK_SET;
313  lock.l_len = len;
314
315  /* byte_lock(len=-1) lock whole file */
316  if (len == BYTELOCK_MAX) {
317      lock.l_len -= lock.l_start; /* otherwise  EOVERFLOW error */
318  }
319
320  /* see if it's locked by another fork.
321   * NOTE: this guarantees that any existing locks must be at most
322   * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
323   * guaranteed to be ORable. */
324  if (adf_findxlock(adf, fork, ADLOCK_WR |
325		    ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
326		    lock.l_start, lock.l_len) > -1) {
327    errno = EACCES;
328    return -1;
329  }
330
331  /* look for any existing lock that we may have */
332  i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
333  adflock = (i < 0) ? NULL : adf->adf_lock + i;
334
335  /* here's what we check for:
336     1) we're trying to re-lock a lock, but we didn't specify an update.
337     2) we're trying to free only part of a lock.
338     3) we're trying to free a non-existent lock. */
339  if ( (!adflock && (lock.l_type == F_UNLCK))
340       ||
341       (adflock
342        && !(type & ADLOCK_UPGRADE)
343        && ((lock.l_type != F_UNLCK)
344            || (adflock->lock.l_start != lock.l_start)
345            || (adflock->lock.l_len != lock.l_len) ))
346      ) {
347      errno = EINVAL;
348      return -1;
349  }
350
351
352  /* now, update our list of locks */
353  /* clear the lock */
354  if (lock.l_type == F_UNLCK) {
355    adf_freelock(adf, i);
356    return 0;
357  }
358
359  /* attempt to lock the file. */
360  if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
361    return -1;
362
363  /* we upgraded this lock. */
364  if (adflock && (type & ADLOCK_UPGRADE)) {
365    memcpy(&adflock->lock, &lock, sizeof(lock));
366    return 0;
367  }
368
369  /* it wasn't an upgrade */
370  oldlock = -1;
371  if (lock.l_type == F_RDLCK) {
372    oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
373  }
374
375  /* no more space. this will also happen if lockmax == lockcount == 0 */
376  if (adf->adf_lockmax == adf->adf_lockcount) {
377    adf_lock_t *tmp = (adf_lock_t *)
378	    realloc(adf->adf_lock, sizeof(adf_lock_t)*
379		    (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
380    if (!tmp)
381      goto fcntl_lock_err;
382    adf->adf_lock = tmp;
383    adf->adf_lockmax += ARRAY_BLOCK_SIZE;
384  }
385  adflock = adf->adf_lock + adf->adf_lockcount;
386
387  /* fill in fields */
388  memcpy(&adflock->lock, &lock, sizeof(lock));
389  adflock->user = fork;
390  if (oldlock > -1) {
391    adflock->refcount = (adf->adf_lock + oldlock)->refcount;
392  } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
393    goto fcntl_lock_err;
394  }
395
396  (*adflock->refcount)++;
397  adf->adf_lockcount++;
398  return 0;
399
400fcntl_lock_err:
401  lock.l_type = F_UNLCK;
402  if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
403  return -1;
404}
405
406/* -------------------------
407   we are using lock as tristate variable
408
409   we have a lock ==> 1
410   no             ==> 0
411   error          ==> -1
412
413*/
414static int testlock(struct ad_fd *adf, off_t off, off_t len)
415{
416  struct flock lock;
417  adf_lock_t *plock;
418  int i;
419
420  lock.l_start = off;
421
422  plock = adf->adf_lock;
423  lock.l_whence = SEEK_SET;
424  lock.l_len = len;
425
426  /* Do we have a lock? */
427  for (i = 0; i < adf->adf_lockcount; i++) {
428    if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
429        return 1;   /* */
430  }
431  /* Does another process have a lock?
432  */
433  lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
434
435  if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
436      /* is that kind of error possible ?*/
437      return (errno == EACCES || errno == EAGAIN)?1:-1;
438  }
439
440  if (lock.l_type == F_UNLCK) {
441      return 0;
442  }
443  return 1;
444}
445
446/* --------------- */
447int ad_testlock(struct adouble *ad, int eid, const off_t off)
448{
449  struct ad_fd *adf;
450  off_t      lock_offset;
451
452  lock_offset = off;
453  if (eid == ADEID_DFORK) {
454    adf = &ad->ad_data_fork;
455    if (ad_meta_fileno(ad) != -1) {
456      	adf = ad->ad_md;
457    	lock_offset = df2off(off);
458    }
459  }
460  else { /* rfork */
461    if (ad_meta_fileno(ad) == -1) {
462        /* there's no resource fork. return no lock */
463        return 0;
464    }
465    adf = ad->ad_md;
466    lock_offset = hf2off(off);
467  }
468  return testlock(adf, lock_offset, 1);
469}
470
471/* -------------------------
472   return if a file is open by another process.
473   Optimized for the common case:
474   - there's no locks held by another process (clients)
475   - or we already know the answer and don't need to test.
476*/
477u_int16_t ad_openforks(struct adouble *ad, u_int16_t attrbits)
478{
479  u_int16_t ret = 0;
480  struct ad_fd *adf;
481  off_t off;
482
483  if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
484      off_t len;
485      /* XXX know the locks layout:
486         AD_FILELOCK_OPEN_WR is first
487         and use it for merging requests
488      */
489      if (ad_meta_fileno(ad) != -1) {
490          /* there's a resource fork test the four bytes for
491           * data RW/RD and fork RW/RD locks in one request
492          */
493      	  adf = ad->ad_md;
494      	  off = LOCK_DATA_WR;
495      	  len = 4;
496      }
497      else {
498          /* no resource fork, only data RD/RW may exist */
499          adf = &ad->ad_data_fork;
500          off = AD_FILELOCK_OPEN_WR;
501          len = 2;
502      }
503      if (!testlock(adf, off, len))
504          return ret;
505  }
506  /* either there's a lock or we already know one
507     fork is open
508  */
509  if (!(attrbits & ATTRBIT_DOPEN)) {
510      if (ad_meta_fileno(ad) != -1) {
511      	  adf = ad->ad_md;
512      	  off = LOCK_DATA_WR;
513      }
514      else {
515          adf = &ad->ad_data_fork;
516          off = AD_FILELOCK_OPEN_WR;
517      }
518      ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
519  }
520
521  if (!(attrbits & ATTRBIT_ROPEN)) {
522      if (ad_meta_fileno(ad) != -1) {
523      	  adf = ad->ad_md;
524          off = LOCK_RSRC_WR;
525          ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
526      }
527  }
528
529  return ret;
530}
531
532/* -------------------------
533*/
534int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
535	             const off_t off, const off_t len, const int fork)
536{
537  struct flock lock;
538  struct ad_fd *adf;
539  int err;
540  int type;
541
542  lock.l_start = off;
543  type = locktype;
544  if (eid == ADEID_DFORK) {
545    adf = &ad->ad_data_fork;
546  } else {
547    /* FIXME META */
548    adf = &ad->ad_resource_fork;
549    if (adf->adf_fd == -1) {
550        /* there's no resource fork. return success */
551        return 0;
552    }
553    /* if ADLOCK_FILELOCK we want a lock from offset 0
554     * it's used when deleting a file:
555     * in open we put read locks on meta datas
556     * in delete a write locks on the whole file
557     * so if the file is open by somebody else it fails
558    */
559    if (!(type & ADLOCK_FILELOCK))
560        lock.l_start += ad_getentryoff(ad, eid);
561  }
562
563  if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
564      type = (type & ~ADLOCK_WR) | ADLOCK_RD;
565  }
566
567  lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
568  lock.l_whence = SEEK_SET;
569  lock.l_len = len;
570
571  /* see if it's locked by another fork. */
572  if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
573		    ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
574		    lock.l_start, lock.l_len) > -1) {
575    errno = EACCES;
576    return -1;
577  }
578
579  /* okay, we might have ranges byte-locked. we need to make sure that
580   * we restore the appropriate ranges once we're done. so, we check
581   * for overlap on an unlock and relock.
582   * XXX: in the future, all the byte locks will be sorted and contiguous.
583   *      we just want to upgrade all the locks and then downgrade them
584   *      here. */
585  if (!adf->adf_excl) {
586       err = set_lock(adf->adf_fd, F_SETLK, &lock);
587  }
588  else {
589      err = 0;
590  }
591  if (!err && (lock.l_type == F_UNLCK))
592    adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
593
594  return err;
595}
596
597/* -------------------------
598   the fork is opened in Read Write, Deny Read, Deny Write mode
599   lock the whole file once
600*/
601int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
602{
603  struct ad_fd *adf;
604  struct flock lock;
605  int    err;
606
607  lock.l_start = 0;
608  lock.l_type = F_WRLCK;
609  lock.l_whence = SEEK_SET;
610  lock.l_len = 0;
611
612  if (eid == ADEID_DFORK) {
613    adf = &ad->ad_data_fork;
614  } else {
615    adf = &ad->ad_resource_fork;
616    lock.l_start = ad_getentryoff(ad, eid);
617  }
618
619  err = set_lock(adf->adf_fd, F_SETLK, &lock);
620  if (!err)
621      adf->adf_excl = 1;
622  return err;
623}
624
625/* --------------------- */
626void ad_fcntl_unlock(struct adouble *ad, const int fork)
627{
628  if (ad_data_fileno(ad) != -1) {
629    adf_unlock(&ad->ad_data_fork, fork);
630  }
631  if (ad_reso_fileno(ad) != -1) {
632    adf_unlock(&ad->ad_resource_fork, fork);
633  }
634
635  if (ad->ad_flags != AD_VERSION1_SFM) {
636    return;
637  }
638  if (ad_meta_fileno(ad) != -1) {
639    adf_unlock(&ad->ad_metadata_fork, fork);
640  }
641
642}
643