• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/ap/gpl/timemachine/netatalk-2.2.5/libatalk/adouble/
1/*
2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
4 *
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
10 *
11 * TODO: fix the race when reading/writing.
12 *       keep a pool of both locks and reference counters around so that
13 *       we can save on mallocs. we should also use a tree to keep things
14 *       sorted.
15 */
16
17#ifdef HAVE_CONFIG_H
18#include "config.h"
19#endif /* HAVE_CONFIG_H */
20
21#include <atalk/adouble.h>
22#include <atalk/logger.h>
23
24#include <stdio.h>
25#include <stdlib.h>
26#include <errno.h>
27#include <inttypes.h>
28
29#include <string.h>
30
31#include "ad_private.h"
32
33/* translate between ADLOCK styles and specific locking mechanisms */
34#define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
35((type) == ADLOCK_WR ? LOCK_EX : \
36 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
37
38#ifdef DISABLE_LOCKING
39#define fcntl(a, b, c ) (0)
40#endif
41
42/* ----------------------- */
43static int set_lock(int fd, int cmd,  struct flock *lock)
44{
45  if (fd == -2) {
46      /* We assign fd = -2 for symlinks -> do nothing */
47      if (cmd == F_GETLK)
48	    lock->l_type = F_UNLCK;
49      return 0;
50  }
51  return fcntl(fd, cmd, lock);
52}
53
54/* ----------------------- */
55static int XLATE_FCNTL_LOCK(int type)
56{
57    switch(type) {
58    case ADLOCK_RD:
59        return F_RDLCK;
60    case ADLOCK_WR:
61         return F_WRLCK;
62    case ADLOCK_CLR:
63         return F_UNLCK;
64    }
65    return -1;
66}
67
68/* ----------------------- */
69static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
70{
71 return (!alen && a <= b) ||
72	(!blen && b <= a) ||
73	( (a + alen > b) && (b + blen > a) );
74}
75
76/* allocation for lock regions. we allocate aggressively and shrink
77 * only in large chunks. */
78#define ARRAY_BLOCK_SIZE 10
79#define ARRAY_FREE_DELTA 100
80
81/* remove a lock and compact space if necessary */
82static void adf_freelock(struct ad_fd *ad, const int i)
83{
84    adf_lock_t *lock = ad->adf_lock + i;
85
86    if (--(*lock->refcount) < 1) {
87	free(lock->refcount);
88	if (!ad->adf_excl) {
89	    lock->lock.l_type = F_UNLCK;
90	    set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
91	}
92    }
93
94    ad->adf_lockcount--;
95
96    /* move another lock into the empty space */
97    if (i < ad->adf_lockcount) {
98        memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
99    }
100
101    /* free extra cruft if we go past a boundary. we always want to
102     * keep at least some stuff around for allocations. this wastes
103     * a bit of space to save time on reallocations. */
104    if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
105	(ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
106	    struct adf_lock_t *tmp;
107
108	    tmp = (struct adf_lock_t *)
109		    realloc(ad->adf_lock, sizeof(adf_lock_t)*
110			    (ad->adf_lockcount + ARRAY_FREE_DELTA));
111	    if (tmp) {
112		ad->adf_lock = tmp;
113		ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
114	    }
115    }
116}
117
118
119/* this needs to deal with the following cases:
120 * 1) fork is the only user of the lock
121 * 2) fork shares a read lock with another open fork
122 *
123 * i converted to using arrays of locks. everytime a lock
124 * gets removed, we shift all of the locks down.
125 */
126static void adf_unlock(struct ad_fd *ad, const int fork)
127{
128    adf_lock_t *lock = ad->adf_lock;
129    int i;
130
131    for (i = 0; i < ad->adf_lockcount; i++) {
132
133      if (lock[i].user == fork) {
134	/* we're really going to delete this lock. note: read locks
135           are the only ones that allow refcounts > 1 */
136	 adf_freelock(ad, i);
137	 i--; /* we shifted things down, so we need to backtrack */
138	 /* unlikely but realloc may have change adf_lock */
139	 lock = ad->adf_lock;
140      }
141    }
142}
143
144/* relock any byte lock that overlaps off/len. unlock everything
145 * else. */
146static void adf_relockrange(struct ad_fd *ad, int fd,
147				       const off_t off, const off_t len)
148{
149    adf_lock_t *lock = ad->adf_lock;
150    int i;
151
152    if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
153      if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
154	set_lock(fd, F_SETLK, &lock[i].lock);
155    }
156}
157
158
159/* find a byte lock that overlaps off/len for a particular open fork */
160static int adf_findlock(struct ad_fd *ad,
161				   const int fork, const int type,
162				   const off_t off,
163				   const off_t len)
164{
165  adf_lock_t *lock = ad->adf_lock;
166  int i;
167
168  for (i = 0; i < ad->adf_lockcount; i++) {
169    if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
170	((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
171	(lock[i].user == fork) &&
172	OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
173      return i;
174    }
175  }
176
177  return -1;
178}
179
180
181/* search other fork lock lists */
182static int adf_findxlock(struct ad_fd *ad,
183				     const int fork, const int type,
184				     const off_t off,
185				     const off_t len)
186{
187  adf_lock_t *lock = ad->adf_lock;
188  int i;
189
190  for (i = 0; i < ad->adf_lockcount; i++) {
191    if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
192	 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
193	(lock[i].user != fork) &&
194	OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
195	    return i;
196  }
197  return -1;
198}
199
200/* okay, this needs to do the following:
201 * 1) check current list of locks. error on conflict.
202 * 2) apply the lock. error on conflict with another process.
203 * 3) update the list of locks this file has.
204 *
205 * NOTE: this treats synchronization locks a little differently. we
206 *       do the following things for those:
207 *       1) if the header file exists, all the locks go in the beginning
208 *          of that.
209 *       2) if the header file doesn't exist, we stick the locks
210 *          in the locations specified by AD_FILELOCK_RD/WR.
211 */
212#define LOCK_DATA_WR (0)
213#define LOCK_DATA_RD (1)
214#define LOCK_RSRC_WR (2)
215#define LOCK_RSRC_RD (3)
216
217#define LOCK_RSRC_DRD (4)
218#define LOCK_RSRC_DWR (5)
219#define LOCK_DATA_DRD (6)
220#define LOCK_DATA_DWR (7)
221
222#define LOCK_RSRC_NONE (8)
223#define LOCK_DATA_NONE (9)
224
225/* --------------
226	translate a data fork lock to an offset
227*/
228
229static off_t df2off(off_t off)
230{
231    off_t start = off;
232	if (off == AD_FILELOCK_OPEN_WR)
233		start = LOCK_DATA_WR;
234	else if (off == AD_FILELOCK_OPEN_RD)
235		start = LOCK_DATA_RD;
236    else if (off == AD_FILELOCK_DENY_RD)
237		start = LOCK_DATA_DRD;
238	else if (off == AD_FILELOCK_DENY_WR)
239		start = LOCK_DATA_DWR;
240	else if (off == AD_FILELOCK_OPEN_NONE)
241		start = LOCK_DATA_NONE;
242	return start;
243}
244
245/* --------------
246	translate a resource fork lock to an offset
247*/
248
249static off_t hf2off(off_t off)
250{
251    off_t start = off;
252	if (off == AD_FILELOCK_OPEN_WR)
253		start = LOCK_RSRC_WR;
254	else if (off == AD_FILELOCK_OPEN_RD)
255		start = LOCK_RSRC_RD;
256    else if (off == AD_FILELOCK_DENY_RD)
257		start = LOCK_RSRC_DRD;
258	else if (off == AD_FILELOCK_DENY_WR)
259		start = LOCK_RSRC_DWR;
260	else if (off == AD_FILELOCK_OPEN_NONE)
261		start = LOCK_RSRC_NONE;
262	return start;
263}
264
265/* ------------------ */
266int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
267		  const off_t off, const off_t len, const int fork)
268{
269  struct flock lock;
270  struct ad_fd *adf;
271  adf_lock_t *adflock;
272  int oldlock;
273  int i;
274  int type;
275
276  lock.l_start = off;
277  type = locktype;
278  if (eid == ADEID_DFORK) {
279    adf = &ad->ad_data_fork;
280    if ((type & ADLOCK_FILELOCK)) {
281        if (ad_meta_fileno(ad) != -1) { /* META */
282            adf = ad->ad_md;
283            lock.l_start = df2off(off);
284        }
285    }
286  } else { /* rfork */
287    if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
288        /* there's no meta data. return a lock error
289         * otherwise if a second process is able to create it
290         * locks are a mess.
291         */
292        errno = EACCES;
293        return -1;
294    }
295    if (type & ADLOCK_FILELOCK) {
296      adf = ad->ad_md;			/* either resource or meta data (set in ad_open) */
297      lock.l_start = hf2off(off);
298    }
299    else {
300      /* we really want the resource fork it's a byte lock */
301      adf = &ad->ad_resource_fork;
302      lock.l_start += ad_getentryoff(ad, eid);
303    }
304  }
305  /* NOTE: we can't write lock a read-only file. on those, we just
306    * make sure that we have a read lock set. that way, we at least prevent
307    * someone else from really setting a deny read/write on the file.
308    */
309  if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
310      type = (type & ~ADLOCK_WR) | ADLOCK_RD;
311  }
312
313  lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
314  lock.l_whence = SEEK_SET;
315  lock.l_len = len;
316
317  /* byte_lock(len=-1) lock whole file */
318  if (len == BYTELOCK_MAX) {
319      lock.l_len -= lock.l_start; /* otherwise  EOVERFLOW error */
320  }
321
322  /* see if it's locked by another fork.
323   * NOTE: this guarantees that any existing locks must be at most
324   * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
325   * guaranteed to be ORable. */
326  if (adf_findxlock(adf, fork, ADLOCK_WR |
327		    ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
328		    lock.l_start, lock.l_len) > -1) {
329    errno = EACCES;
330    return -1;
331  }
332
333  /* look for any existing lock that we may have */
334  i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
335  adflock = (i < 0) ? NULL : adf->adf_lock + i;
336
337  /* here's what we check for:
338     1) we're trying to re-lock a lock, but we didn't specify an update.
339     2) we're trying to free only part of a lock.
340     3) we're trying to free a non-existent lock. */
341  if ( (!adflock && (lock.l_type == F_UNLCK))
342       ||
343       (adflock
344        && !(type & ADLOCK_UPGRADE)
345        && ((lock.l_type != F_UNLCK)
346            || (adflock->lock.l_start != lock.l_start)
347            || (adflock->lock.l_len != lock.l_len) ))
348      ) {
349      errno = EINVAL;
350      return -1;
351  }
352
353
354  /* now, update our list of locks */
355  /* clear the lock */
356  if (lock.l_type == F_UNLCK) {
357    adf_freelock(adf, i);
358    return 0;
359  }
360
361  /* attempt to lock the file. */
362  if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
363    return -1;
364
365  /* we upgraded this lock. */
366  if (adflock && (type & ADLOCK_UPGRADE)) {
367    memcpy(&adflock->lock, &lock, sizeof(lock));
368    return 0;
369  }
370
371  /* it wasn't an upgrade */
372  oldlock = -1;
373  if (lock.l_type == F_RDLCK) {
374    oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
375  }
376
377  /* no more space. this will also happen if lockmax == lockcount == 0 */
378  if (adf->adf_lockmax == adf->adf_lockcount) {
379    adf_lock_t *tmp = (adf_lock_t *)
380	    realloc(adf->adf_lock, sizeof(adf_lock_t)*
381		    (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
382    if (!tmp)
383      goto fcntl_lock_err;
384    adf->adf_lock = tmp;
385    adf->adf_lockmax += ARRAY_BLOCK_SIZE;
386  }
387  adflock = adf->adf_lock + adf->adf_lockcount;
388
389  /* fill in fields */
390  memcpy(&adflock->lock, &lock, sizeof(lock));
391  adflock->user = fork;
392  if (oldlock > -1) {
393    adflock->refcount = (adf->adf_lock + oldlock)->refcount;
394  } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
395    goto fcntl_lock_err;
396  }
397
398  (*adflock->refcount)++;
399  adf->adf_lockcount++;
400  return 0;
401
402fcntl_lock_err:
403  lock.l_type = F_UNLCK;
404  if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
405  return -1;
406}
407
408/* -------------------------
409   we are using lock as tristate variable
410
411   we have a lock ==> 1
412   no             ==> 0
413   error          ==> -1
414
415*/
416static int testlock(struct ad_fd *adf, off_t off, off_t len)
417{
418  struct flock lock;
419  adf_lock_t *plock;
420  int i;
421
422  lock.l_start = off;
423
424  plock = adf->adf_lock;
425  lock.l_whence = SEEK_SET;
426  lock.l_len = len;
427
428  /* Do we have a lock? */
429  for (i = 0; i < adf->adf_lockcount; i++) {
430    if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
431        return 1;   /* */
432  }
433  /* Does another process have a lock?
434  */
435  lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
436
437  if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
438      /* is that kind of error possible ?*/
439      return (errno == EACCES || errno == EAGAIN)?1:-1;
440  }
441
442  if (lock.l_type == F_UNLCK) {
443      return 0;
444  }
445  return 1;
446}
447
448/* --------------- */
449int ad_testlock(struct adouble *ad, int eid, const off_t off)
450{
451  struct ad_fd *adf;
452  off_t      lock_offset;
453
454  lock_offset = off;
455  if (eid == ADEID_DFORK) {
456    adf = &ad->ad_data_fork;
457    if (ad_meta_fileno(ad) != -1) {
458      	adf = ad->ad_md;
459    	lock_offset = df2off(off);
460    }
461  }
462  else { /* rfork */
463    if (ad_meta_fileno(ad) == -1) {
464        /* there's no resource fork. return no lock */
465        return 0;
466    }
467    adf = ad->ad_md;
468    lock_offset = hf2off(off);
469  }
470  return testlock(adf, lock_offset, 1);
471}
472
473/* -------------------------
474   return if a file is open by another process.
475   Optimized for the common case:
476   - there's no locks held by another process (clients)
477   - or we already know the answer and don't need to test.
478*/
479u_int16_t ad_openforks(struct adouble *ad, u_int16_t attrbits)
480{
481  u_int16_t ret = 0;
482  struct ad_fd *adf;
483  off_t off;
484
485  if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
486      off_t len;
487      /* XXX know the locks layout:
488         AD_FILELOCK_OPEN_WR is first
489         and use it for merging requests
490      */
491      if (ad_meta_fileno(ad) != -1) {
492          /* there's a resource fork test the four bytes for
493           * data RW/RD and fork RW/RD locks in one request
494          */
495      	  adf = ad->ad_md;
496      	  off = LOCK_DATA_WR;
497      	  len = 4;
498      }
499      else {
500          /* no resource fork, only data RD/RW may exist */
501          adf = &ad->ad_data_fork;
502          off = AD_FILELOCK_OPEN_WR;
503          len = 2;
504      }
505      if (!testlock(adf, off, len))
506          return ret;
507  }
508  /* either there's a lock or we already know one
509     fork is open
510  */
511  if (!(attrbits & ATTRBIT_DOPEN)) {
512      if (ad_meta_fileno(ad) != -1) {
513      	  adf = ad->ad_md;
514      	  off = LOCK_DATA_WR;
515      }
516      else {
517          adf = &ad->ad_data_fork;
518          off = AD_FILELOCK_OPEN_WR;
519      }
520      ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
521  }
522
523  if (!(attrbits & ATTRBIT_ROPEN)) {
524      if (ad_meta_fileno(ad) != -1) {
525      	  adf = ad->ad_md;
526          off = LOCK_RSRC_WR;
527          ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
528      }
529  }
530
531  return ret;
532}
533
534/* -------------------------
535*/
536int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
537	             const off_t off, const off_t len, const int fork)
538{
539  struct flock lock;
540  struct ad_fd *adf;
541  int err;
542  int type;
543
544  lock.l_start = off;
545  type = locktype;
546  if (eid == ADEID_DFORK) {
547    adf = &ad->ad_data_fork;
548  } else {
549    /* FIXME META */
550    adf = &ad->ad_resource_fork;
551    if (adf->adf_fd == -1) {
552        /* there's no resource fork. return success */
553        return 0;
554    }
555    /* if ADLOCK_FILELOCK we want a lock from offset 0
556     * it's used when deleting a file:
557     * in open we put read locks on meta datas
558     * in delete a write locks on the whole file
559     * so if the file is open by somebody else it fails
560    */
561    if (!(type & ADLOCK_FILELOCK))
562        lock.l_start += ad_getentryoff(ad, eid);
563  }
564
565  if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
566      type = (type & ~ADLOCK_WR) | ADLOCK_RD;
567  }
568
569  lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
570  lock.l_whence = SEEK_SET;
571  lock.l_len = len;
572
573  /* see if it's locked by another fork. */
574  if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
575		    ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
576		    lock.l_start, lock.l_len) > -1) {
577    errno = EACCES;
578    return -1;
579  }
580
581  /* okay, we might have ranges byte-locked. we need to make sure that
582   * we restore the appropriate ranges once we're done. so, we check
583   * for overlap on an unlock and relock.
584   * XXX: in the future, all the byte locks will be sorted and contiguous.
585   *      we just want to upgrade all the locks and then downgrade them
586   *      here. */
587  if (!adf->adf_excl) {
588       err = set_lock(adf->adf_fd, F_SETLK, &lock);
589  }
590  else {
591      err = 0;
592  }
593  if (!err && (lock.l_type == F_UNLCK))
594    adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
595
596  return err;
597}
598
599/* -------------------------
600   the fork is opened in Read Write, Deny Read, Deny Write mode
601   lock the whole file once
602*/
603int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
604{
605  struct ad_fd *adf;
606  struct flock lock;
607  int    err;
608
609  lock.l_start = 0;
610  lock.l_type = F_WRLCK;
611  lock.l_whence = SEEK_SET;
612  lock.l_len = 0;
613
614  if (eid == ADEID_DFORK) {
615    adf = &ad->ad_data_fork;
616  } else {
617    adf = &ad->ad_resource_fork;
618    lock.l_start = ad_getentryoff(ad, eid);
619  }
620
621  err = set_lock(adf->adf_fd, F_SETLK, &lock);
622  if (!err)
623      adf->adf_excl = 1;
624  return err;
625}
626
627/* --------------------- */
628void ad_fcntl_unlock(struct adouble *ad, const int fork)
629{
630  if (ad_data_fileno(ad) != -1) {
631    adf_unlock(&ad->ad_data_fork, fork);
632  }
633  if (ad_reso_fileno(ad) != -1) {
634    adf_unlock(&ad->ad_resource_fork, fork);
635  }
636
637  if (ad->ad_flags != AD_VERSION1_SFM) {
638    return;
639  }
640  if (ad_meta_fileno(ad) != -1) {
641    adf_unlock(&ad->ad_metadata_fork, fork);
642  }
643
644}
645