1/*
2   Unix SMB/CIFS implementation.
3   byte range locking code
4   Updated to handle range splits/merges.
5
6   Copyright (C) Andrew Tridgell 1992-2000
7   Copyright (C) Jeremy Allison 1992-2000
8
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18
19   You should have received a copy of the GNU General Public License
20   along with this program; if not, write to the Free Software
21   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/
23
24/* This module implements a tdb based byte range locking service,
25   replacing the fcntl() based byte range locking previously
26   used. This allows us to provide the same semantics as NT */
27
28#include "includes.h"
29
30#undef DBGC_CLASS
31#define DBGC_CLASS DBGC_LOCKING
32
33#define ZERO_ZERO 0
34
35/* The open brlock.tdb database. */
36
37static TDB_CONTEXT *tdb;
38
39/****************************************************************************
40 Debug info at level 10 for lock struct.
41****************************************************************************/
42
43static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44{
45	DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
46			i,
47			(unsigned int)pls->context.smbpid,
48			(unsigned int)pls->context.tid,
49			(unsigned int)procid_to_pid(&pls->context.pid) ));
50
51	DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
52		(double)pls->start,
53		(double)pls->size,
54		pls->fnum,
55		lock_type_name(pls->lock_type),
56		lock_flav_name(pls->lock_flav) ));
57}
58
59/****************************************************************************
60 See if two locking contexts are equal.
61****************************************************************************/
62
63BOOL brl_same_context(const struct lock_context *ctx1,
64			     const struct lock_context *ctx2)
65{
66	return (procid_equal(&ctx1->pid, &ctx2->pid) &&
67		(ctx1->smbpid == ctx2->smbpid) &&
68		(ctx1->tid == ctx2->tid));
69}
70
71/****************************************************************************
72 See if lck1 and lck2 overlap.
73****************************************************************************/
74
75static BOOL brl_overlap(const struct lock_struct *lck1,
76                        const struct lock_struct *lck2)
77{
78	/* this extra check is not redundent - it copes with locks
79	   that go beyond the end of 64 bit file space */
80	if (lck1->size != 0 &&
81	    lck1->start == lck2->start &&
82	    lck1->size == lck2->size) {
83		return True;
84	}
85
86	if (lck1->start >= (lck2->start+lck2->size) ||
87	    lck2->start >= (lck1->start+lck1->size)) {
88		return False;
89	}
90	return True;
91}
92
93/****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95****************************************************************************/
96
97static BOOL brl_conflict(const struct lock_struct *lck1,
98			 const struct lock_struct *lck2)
99{
100	/* Ignore PENDING locks. */
101	if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
102		return False;
103
104	/* Read locks never conflict. */
105	if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106		return False;
107	}
108
109	if (brl_same_context(&lck1->context, &lck2->context) &&
110	    lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
111		return False;
112	}
113
114	return brl_overlap(lck1, lck2);
115}
116
117/****************************************************************************
118 See if lock2 can be added when lock1 is in place - when both locks are POSIX
119 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120 know already match.
121****************************************************************************/
122
123static BOOL brl_conflict_posix(const struct lock_struct *lck1,
124			 	const struct lock_struct *lck2)
125{
126#if defined(DEVELOPER)
127	SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
128	SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
129#endif
130
131	/* Ignore PENDING locks. */
132	if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
133		return False;
134
135	/* Read locks never conflict. */
136	if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137		return False;
138	}
139
140	/* Locks on the same context con't conflict. Ignore fnum. */
141	if (brl_same_context(&lck1->context, &lck2->context)) {
142		return False;
143	}
144
145	/* One is read, the other write, or the context is different,
146	   do they overlap ? */
147	return brl_overlap(lck1, lck2);
148}
149
150#if ZERO_ZERO
151static BOOL brl_conflict1(const struct lock_struct *lck1,
152			 const struct lock_struct *lck2)
153{
154	if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
155		return False;
156
157	if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
158		return False;
159	}
160
161	if (brl_same_context(&lck1->context, &lck2->context) &&
162	    lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
163		return False;
164	}
165
166	if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
167		return True;
168	}
169
170	if (lck1->start >= (lck2->start + lck2->size) ||
171	    lck2->start >= (lck1->start + lck1->size)) {
172		return False;
173	}
174
175	return True;
176}
177#endif
178
179/****************************************************************************
180 Check to see if this lock conflicts, but ignore our own locks on the
181 same fnum only. This is the read/write lock check code path.
182 This is never used in the POSIX lock case.
183****************************************************************************/
184
185static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
186{
187	if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
188		return False;
189
190	if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
191		return False;
192
193	/* POSIX flavour locks never conflict here - this is only called
194	   in the read/write path. */
195
196	if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
197		return False;
198
199	/*
200	 * Incoming WRITE locks conflict with existing READ locks even
201	 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
202	 */
203
204	if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
205		if (brl_same_context(&lck1->context, &lck2->context) &&
206					lck1->fnum == lck2->fnum)
207			return False;
208	}
209
210	return brl_overlap(lck1, lck2);
211}
212
213/****************************************************************************
214 Check if an unlock overlaps a pending lock.
215****************************************************************************/
216
217static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
218{
219	if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
220		return True;
221	if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
222		return True;
223	return False;
224}
225
226/****************************************************************************
227 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
228 is the same as this one and changes its error code. I wonder if any
229 app depends on this ?
230****************************************************************************/
231
232static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
233{
234	if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
235		/* amazing the little things you learn with a test
236		   suite. Locks beyond this offset (as a 64 bit
237		   number!) always generate the conflict error code,
238		   unless the top bit is set */
239		if (!blocking_lock) {
240			fsp->last_lock_failure = *lock;
241		}
242		return NT_STATUS_FILE_LOCK_CONFLICT;
243	}
244
245	if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
246			lock->context.tid == fsp->last_lock_failure.context.tid &&
247			lock->fnum == fsp->last_lock_failure.fnum &&
248			lock->start == fsp->last_lock_failure.start) {
249		return NT_STATUS_FILE_LOCK_CONFLICT;
250	}
251
252	if (!blocking_lock) {
253		fsp->last_lock_failure = *lock;
254	}
255	return NT_STATUS_LOCK_NOT_GRANTED;
256}
257
258/****************************************************************************
259 Open up the brlock.tdb database.
260****************************************************************************/
261
262void brl_init(int read_only)
263{
264	if (tdb) {
265		return;
266	}
267	tdb = tdb_open_log(lock_path("brlock.tdb"),
268			lp_open_files_db_hash_size(),
269			TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
270			read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
271	if (!tdb) {
272		DEBUG(0,("Failed to open byte range locking database %s\n",
273			lock_path("brlock.tdb")));
274		return;
275	}
276
277	/* Activate the per-hashchain freelist */
278	tdb_set_max_dead(tdb, 5);
279}
280
281/****************************************************************************
282 Close down the brlock.tdb database.
283****************************************************************************/
284
285void brl_shutdown(int read_only)
286{
287	if (!tdb) {
288		return;
289	}
290	tdb_close(tdb);
291}
292
293#if ZERO_ZERO
294/****************************************************************************
295 Compare two locks for sorting.
296****************************************************************************/
297
298static int lock_compare(const struct lock_struct *lck1,
299			 const struct lock_struct *lck2)
300{
301	if (lck1->start != lck2->start) {
302		return (lck1->start - lck2->start);
303	}
304	if (lck2->size != lck1->size) {
305		return ((int)lck1->size - (int)lck2->size);
306	}
307	return 0;
308}
309#endif
310
311/****************************************************************************
312 Lock a range of bytes - Windows lock semantics.
313****************************************************************************/
314
315static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
316			struct lock_struct *plock, BOOL blocking_lock)
317{
318	unsigned int i;
319	files_struct *fsp = br_lck->fsp;
320	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
321
322	for (i=0; i < br_lck->num_locks; i++) {
323		/* Do any Windows or POSIX locks conflict ? */
324		if (brl_conflict(&locks[i], plock)) {
325			/* Remember who blocked us. */
326			plock->context.smbpid = locks[i].context.smbpid;
327			return brl_lock_failed(fsp,plock,blocking_lock);
328		}
329#if ZERO_ZERO
330		if (plock->start == 0 && plock->size == 0 &&
331				locks[i].size == 0) {
332			break;
333		}
334#endif
335	}
336
337	/* We can get the Windows lock, now see if it needs to
338	   be mapped into a lower level POSIX one, and if so can
339	   we get it ? */
340
341	if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
342		int errno_ret;
343		if (!set_posix_lock_windows_flavour(fsp,
344				plock->start,
345				plock->size,
346				plock->lock_type,
347				&plock->context,
348				locks,
349				br_lck->num_locks,
350				&errno_ret)) {
351
352			/* We don't know who blocked us. */
353			plock->context.smbpid = 0xFFFFFFFF;
354
355			if (errno_ret == EACCES || errno_ret == EAGAIN) {
356				return NT_STATUS_FILE_LOCK_CONFLICT;
357			} else {
358				return map_nt_error_from_unix(errno);
359			}
360		}
361	}
362
363	/* no conflicts - add it to the list of locks */
364	locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
365	if (!locks) {
366		return NT_STATUS_NO_MEMORY;
367	}
368
369	memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
370	br_lck->num_locks += 1;
371	br_lck->lock_data = (void *)locks;
372	br_lck->modified = True;
373
374	return NT_STATUS_OK;
375}
376
377/****************************************************************************
378 Cope with POSIX range splits and merges.
379****************************************************************************/
380
381static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,		/* Output array. */
382						const struct lock_struct *ex,		/* existing lock. */
383						const struct lock_struct *plock,	/* proposed lock. */
384						BOOL *lock_was_added)
385{
386	BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
387
388	/* We can't merge non-conflicting locks on different context - ignore fnum. */
389
390	if (!brl_same_context(&ex->context, &plock->context)) {
391		/* Just copy. */
392		memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
393		return 1;
394	}
395
396	/* We now know we have the same context. */
397
398	/* Did we overlap ? */
399
400/*********************************************
401                                             +---------+
402                                             | ex      |
403                                             +---------+
404                              +-------+
405                              | plock |
406                              +-------+
407OR....
408             +---------+
409             |  ex     |
410             +---------+
411**********************************************/
412
413	if ( (ex->start > (plock->start + plock->size)) ||
414			(plock->start > (ex->start + ex->size))) {
415		/* No overlap with this lock - copy existing. */
416		memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
417		return 1;
418	}
419
420/*********************************************
421        +---------------------------+
422        |          ex               |
423        +---------------------------+
424        +---------------------------+
425        |       plock               | -> replace with plock.
426        +---------------------------+
427**********************************************/
428
429	if ( (ex->start >= plock->start) &&
430			(ex->start + ex->size <= plock->start + plock->size) ) {
431		memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
432		*lock_was_added = True;
433		return 1;
434	}
435
436/*********************************************
437        +-----------------------+
438        |          ex           |
439        +-----------------------+
440        +---------------+
441        |   plock       |
442        +---------------+
443OR....
444                        +-------+
445                        |  ex   |
446                        +-------+
447        +---------------+
448        |   plock       |
449        +---------------+
450
451BECOMES....
452        +---------------+-------+
453        |   plock       | ex    | - different lock types.
454        +---------------+-------+
455OR.... (merge)
456        +-----------------------+
457        |   ex                  | - same lock type.
458        +-----------------------+
459**********************************************/
460
461	if ( (ex->start >= plock->start) &&
462				(ex->start <= plock->start + plock->size) &&
463				(ex->start + ex->size > plock->start + plock->size) ) {
464
465		*lock_was_added = True;
466
467		/* If the lock types are the same, we merge, if different, we
468		   add the new lock before the old. */
469
470		if (lock_types_differ) {
471			/* Add new. */
472			memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
473			memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
474			/* Adjust existing start and size. */
475			lck_arr[1].start = plock->start + plock->size;
476			lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
477			return 2;
478		} else {
479			/* Merge. */
480			memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
481			/* Set new start and size. */
482			lck_arr[0].start = plock->start;
483			lck_arr[0].size = (ex->start + ex->size) - plock->start;
484			return 1;
485		}
486	}
487
488/*********************************************
489   +-----------------------+
490   |  ex                   |
491   +-----------------------+
492           +---------------+
493           |   plock       |
494           +---------------+
495OR....
496   +-------+
497   |  ex   |
498   +-------+
499           +---------------+
500           |   plock       |
501           +---------------+
502BECOMES....
503   +-------+---------------+
504   | ex    |   plock       | - different lock types
505   +-------+---------------+
506
507OR.... (merge)
508   +-----------------------+
509   | ex                    | - same lock type.
510   +-----------------------+
511
512**********************************************/
513
514	if ( (ex->start < plock->start) &&
515			(ex->start + ex->size >= plock->start) &&
516			(ex->start + ex->size <= plock->start + plock->size) ) {
517
518		*lock_was_added = True;
519
520		/* If the lock types are the same, we merge, if different, we
521		   add the new lock after the old. */
522
523		if (lock_types_differ) {
524			memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
525			memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
526			/* Adjust existing size. */
527			lck_arr[0].size = plock->start - ex->start;
528			return 2;
529		} else {
530			/* Merge. */
531			memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
532			/* Adjust existing size. */
533			lck_arr[0].size = (plock->start + plock->size) - ex->start;
534			return 1;
535		}
536	}
537
538/*********************************************
539        +---------------------------+
540        |        ex                 |
541        +---------------------------+
542                +---------+
543                |  plock  |
544                +---------+
545BECOMES.....
546        +-------+---------+---------+
547        | ex    |  plock  | ex      | - different lock types.
548        +-------+---------+---------+
549OR
550        +---------------------------+
551        |        ex                 | - same lock type.
552        +---------------------------+
553**********************************************/
554
555	if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
556		*lock_was_added = True;
557
558		if (lock_types_differ) {
559
560			/* We have to split ex into two locks here. */
561
562			memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
563			memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
564			memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
565
566			/* Adjust first existing size. */
567			lck_arr[0].size = plock->start - ex->start;
568
569			/* Adjust second existing start and size. */
570			lck_arr[2].start = plock->start + plock->size;
571			lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
572			return 3;
573		} else {
574			/* Just eat plock. */
575			memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
576			return 1;
577		}
578	}
579
580	/* Never get here. */
581	smb_panic("brlock_posix_split_merge\n");
582	/* Notreached. */
583	abort();
584	/* Keep some compilers happy. */
585	return 0;
586}
587
588/****************************************************************************
589 Lock a range of bytes - POSIX lock semantics.
590 We must cope with range splits and merges.
591****************************************************************************/
592
593static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
594			struct lock_struct *plock)
595{
596	unsigned int i, count;
597	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
598	struct lock_struct *tp;
599	BOOL lock_was_added = False;
600	BOOL signal_pending_read = False;
601
602	/* No zero-zero locks for POSIX. */
603	if (plock->start == 0 && plock->size == 0) {
604		return NT_STATUS_INVALID_PARAMETER;
605	}
606
607	/* Don't allow 64-bit lock wrap. */
608	if (plock->start + plock->size < plock->start ||
609			plock->start + plock->size < plock->size) {
610		return NT_STATUS_INVALID_PARAMETER;
611	}
612
613	/* The worst case scenario here is we have to split an
614	   existing POSIX lock range into two, and add our lock,
615	   so we need at most 2 more entries. */
616
617	tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
618	if (!tp) {
619		return NT_STATUS_NO_MEMORY;
620	}
621
622	count = 0;
623	for (i=0; i < br_lck->num_locks; i++) {
624		struct lock_struct *curr_lock = &locks[i];
625
626		/* If we have a pending read lock, a lock downgrade should
627		   trigger a lock re-evaluation. */
628		if (curr_lock->lock_type == PENDING_READ_LOCK &&
629				brl_pending_overlap(plock, curr_lock)) {
630			signal_pending_read = True;
631		}
632
633		if (curr_lock->lock_flav == WINDOWS_LOCK) {
634			/* Do any Windows flavour locks conflict ? */
635			if (brl_conflict(curr_lock, plock)) {
636				/* No games with error messages. */
637				SAFE_FREE(tp);
638				/* Remember who blocked us. */
639				plock->context.smbpid = curr_lock->context.smbpid;
640				return NT_STATUS_FILE_LOCK_CONFLICT;
641			}
642			/* Just copy the Windows lock into the new array. */
643			memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
644			count++;
645		} else {
646			/* POSIX conflict semantics are different. */
647			if (brl_conflict_posix(curr_lock, plock)) {
648				/* Can't block ourselves with POSIX locks. */
649				/* No games with error messages. */
650				SAFE_FREE(tp);
651				/* Remember who blocked us. */
652				plock->context.smbpid = curr_lock->context.smbpid;
653				return NT_STATUS_FILE_LOCK_CONFLICT;
654			}
655
656			/* Work out overlaps. */
657			count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
658		}
659	}
660
661	if (!lock_was_added) {
662		memcpy(&tp[count], plock, sizeof(struct lock_struct));
663		count++;
664	}
665
666	/* We can get the POSIX lock, now see if it needs to
667	   be mapped into a lower level POSIX one, and if so can
668	   we get it ? */
669
670	if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
671		int errno_ret;
672
673		/* The lower layer just needs to attempt to
674		   get the system POSIX lock. We've weeded out
675		   any conflicts above. */
676
677		if (!set_posix_lock_posix_flavour(br_lck->fsp,
678				plock->start,
679				plock->size,
680				plock->lock_type,
681				&errno_ret)) {
682
683			/* We don't know who blocked us. */
684			plock->context.smbpid = 0xFFFFFFFF;
685
686			if (errno_ret == EACCES || errno_ret == EAGAIN) {
687				SAFE_FREE(tp);
688				return NT_STATUS_FILE_LOCK_CONFLICT;
689			} else {
690				SAFE_FREE(tp);
691				return map_nt_error_from_unix(errno);
692			}
693		}
694	}
695
696	/* Realloc so we don't leak entries per lock call. */
697	tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
698	if (!tp) {
699		return NT_STATUS_NO_MEMORY;
700	}
701	br_lck->num_locks = count;
702	SAFE_FREE(br_lck->lock_data);
703	br_lck->lock_data = (void *)tp;
704	locks = tp;
705	br_lck->modified = True;
706
707	/* A successful downgrade from write to read lock can trigger a lock
708	   re-evalutation where waiting readers can now proceed. */
709
710	if (signal_pending_read) {
711		/* Send unlock messages to any pending read waiters that overlap. */
712		for (i=0; i < br_lck->num_locks; i++) {
713			struct lock_struct *pend_lock = &locks[i];
714
715			/* Ignore non-pending locks. */
716			if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
717				continue;
718			}
719
720			if (pend_lock->lock_type == PENDING_READ_LOCK &&
721					brl_pending_overlap(plock, pend_lock)) {
722				DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
723					procid_str_static(&pend_lock->context.pid )));
724
725				message_send_pid(pend_lock->context.pid,
726						MSG_SMB_UNLOCK,
727						NULL, 0, True);
728			}
729		}
730	}
731
732	return NT_STATUS_OK;
733}
734
735/****************************************************************************
736 Lock a range of bytes.
737****************************************************************************/
738
739NTSTATUS brl_lock(struct byte_range_lock *br_lck,
740		uint32 smbpid,
741		struct process_id pid,
742		br_off start,
743		br_off size,
744		enum brl_type lock_type,
745		enum brl_flavour lock_flav,
746		BOOL blocking_lock,
747		uint32 *psmbpid)
748{
749	NTSTATUS ret;
750	struct lock_struct lock;
751
752#if !ZERO_ZERO
753	if (start == 0 && size == 0) {
754		DEBUG(0,("client sent 0/0 lock - please report this\n"));
755	}
756#endif
757
758	lock.context.smbpid = smbpid;
759	lock.context.pid = pid;
760	lock.context.tid = br_lck->fsp->conn->cnum;
761	lock.start = start;
762	lock.size = size;
763	lock.fnum = br_lck->fsp->fnum;
764	lock.lock_type = lock_type;
765	lock.lock_flav = lock_flav;
766
767	if (lock_flav == WINDOWS_LOCK) {
768		ret = brl_lock_windows(br_lck, &lock, blocking_lock);
769	} else {
770		ret = brl_lock_posix(br_lck, &lock);
771	}
772
773#if ZERO_ZERO
774	/* sort the lock list */
775	qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
776#endif
777
778	/* If we're returning an error, return who blocked us. */
779	if (!NT_STATUS_IS_OK(ret) && psmbpid) {
780		*psmbpid = lock.context.smbpid;
781	}
782	return ret;
783}
784
785/****************************************************************************
786 Unlock a range of bytes - Windows semantics.
787****************************************************************************/
788
789static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
790{
791	unsigned int i, j;
792	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
793	enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
794
795#if ZERO_ZERO
796	/* Delete write locks by preference... The lock list
797	   is sorted in the zero zero case. */
798
799	for (i = 0; i < br_lck->num_locks; i++) {
800		struct lock_struct *lock = &locks[i];
801
802		if (lock->lock_type == WRITE_LOCK &&
803		    brl_same_context(&lock->context, &plock->context) &&
804		    lock->fnum == plock->fnum &&
805		    lock->lock_flav == WINDOWS_LOCK &&
806		    lock->start == plock->start &&
807		    lock->size == plock->size) {
808
809			/* found it - delete it */
810			deleted_lock_type = lock->lock_type;
811			break;
812		}
813	}
814
815	if (i != br_lck->num_locks) {
816		/* We found it - don't search again. */
817		goto unlock_continue;
818	}
819#endif
820
821	for (i = 0; i < br_lck->num_locks; i++) {
822		struct lock_struct *lock = &locks[i];
823
824		/* Only remove our own locks that match in start, size, and flavour. */
825		if (brl_same_context(&lock->context, &plock->context) &&
826					lock->fnum == plock->fnum &&
827					lock->lock_flav == WINDOWS_LOCK &&
828					lock->start == plock->start &&
829					lock->size == plock->size ) {
830			deleted_lock_type = lock->lock_type;
831			break;
832		}
833	}
834
835	if (i == br_lck->num_locks) {
836		/* we didn't find it */
837		return False;
838	}
839
840#if ZERO_ZERO
841  unlock_continue:
842#endif
843
844	/* Actually delete the lock. */
845	if (i < br_lck->num_locks - 1) {
846		memmove(&locks[i], &locks[i+1],
847			sizeof(*locks)*((br_lck->num_locks-1) - i));
848	}
849
850	br_lck->num_locks -= 1;
851	br_lck->modified = True;
852
853	/* Unlock the underlying POSIX regions. */
854	if(lp_posix_locking(br_lck->fsp->conn->params)) {
855		release_posix_lock_windows_flavour(br_lck->fsp,
856				plock->start,
857				plock->size,
858				deleted_lock_type,
859				&plock->context,
860				locks,
861				br_lck->num_locks);
862	}
863
864	/* Send unlock messages to any pending waiters that overlap. */
865	for (j=0; j < br_lck->num_locks; j++) {
866		struct lock_struct *pend_lock = &locks[j];
867
868		/* Ignore non-pending locks. */
869		if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
870			continue;
871		}
872
873		/* We could send specific lock info here... */
874		if (brl_pending_overlap(plock, pend_lock)) {
875			DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
876				procid_str_static(&pend_lock->context.pid )));
877
878			message_send_pid(pend_lock->context.pid,
879					MSG_SMB_UNLOCK,
880					NULL, 0, True);
881		}
882	}
883
884	return True;
885}
886
887/****************************************************************************
888 Unlock a range of bytes - POSIX semantics.
889****************************************************************************/
890
891static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
892{
893	unsigned int i, j, count;
894	struct lock_struct *tp;
895	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
896	BOOL overlap_found = False;
897
898	/* No zero-zero locks for POSIX. */
899	if (plock->start == 0 && plock->size == 0) {
900		return False;
901	}
902
903	/* Don't allow 64-bit lock wrap. */
904	if (plock->start + plock->size < plock->start ||
905			plock->start + plock->size < plock->size) {
906		DEBUG(10,("brl_unlock_posix: lock wrap\n"));
907		return False;
908	}
909
910	/* The worst case scenario here is we have to split an
911	   existing POSIX lock range into two, so we need at most
912	   1 more entry. */
913
914	tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
915	if (!tp) {
916		DEBUG(10,("brl_unlock_posix: malloc fail\n"));
917		return False;
918	}
919
920	count = 0;
921	for (i = 0; i < br_lck->num_locks; i++) {
922		struct lock_struct *lock = &locks[i];
923		struct lock_struct tmp_lock[3];
924		BOOL lock_was_added = False;
925		unsigned int tmp_count;
926
927		/* Only remove our own locks - ignore fnum. */
928		if (IS_PENDING_LOCK(lock->lock_type) ||
929				!brl_same_context(&lock->context, &plock->context)) {
930			memcpy(&tp[count], lock, sizeof(struct lock_struct));
931			count++;
932			continue;
933		}
934
935		/* Work out overlaps. */
936		tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
937
938		if (tmp_count == 1) {
939			/* Ether the locks didn't overlap, or the unlock completely
940			   overlapped this lock. If it didn't overlap, then there's
941			   no change in the locks. */
942			if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
943				SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
944				/* No change in this lock. */
945				memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
946				count++;
947			} else {
948				SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
949				overlap_found = True;
950			}
951			continue;
952		} else if (tmp_count == 2) {
953			/* The unlock overlapped an existing lock. Copy the truncated
954			   lock into the lock array. */
955			if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
956				SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
957				SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
958				memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
959				if (tmp_lock[0].size != locks[i].size) {
960					overlap_found = True;
961				}
962			} else {
963				SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
964				SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
965				memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
966				if (tmp_lock[1].start != locks[i].start) {
967					overlap_found = True;
968				}
969			}
970			count++;
971			continue;
972		} else {
973			/* tmp_count == 3 - (we split a lock range in two). */
974			SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
975			SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
976			SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
977
978			memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
979			count++;
980			memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
981			count++;
982			overlap_found = True;
983			/* Optimisation... */
984			/* We know we're finished here as we can't overlap any
985			   more POSIX locks. Copy the rest of the lock array. */
986			if (i < br_lck->num_locks - 1) {
987				memcpy(&tp[count], &locks[i+1],
988					sizeof(*locks)*((br_lck->num_locks-1) - i));
989				count += ((br_lck->num_locks-1) - i);
990			}
991			break;
992		}
993	}
994
995	if (!overlap_found) {
996		/* Just ignore - no change. */
997		SAFE_FREE(tp);
998		DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
999		return True;
1000	}
1001
1002	/* Unlock any POSIX regions. */
1003	if(lp_posix_locking(br_lck->fsp->conn->params)) {
1004		release_posix_lock_posix_flavour(br_lck->fsp,
1005						plock->start,
1006						plock->size,
1007						&plock->context,
1008						tp,
1009						count);
1010	}
1011
1012	/* Realloc so we don't leak entries per unlock call. */
1013	if (count) {
1014		tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1015		if (!tp) {
1016			DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1017			return False;
1018		}
1019	} else {
1020		/* We deleted the last lock. */
1021		SAFE_FREE(tp);
1022		tp = NULL;
1023	}
1024
1025	br_lck->num_locks = count;
1026	SAFE_FREE(br_lck->lock_data);
1027	locks = tp;
1028	br_lck->lock_data = (void *)tp;
1029	br_lck->modified = True;
1030
1031	/* Send unlock messages to any pending waiters that overlap. */
1032
1033	for (j=0; j < br_lck->num_locks; j++) {
1034		struct lock_struct *pend_lock = &locks[j];
1035
1036		/* Ignore non-pending locks. */
1037		if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1038			continue;
1039		}
1040
1041		/* We could send specific lock info here... */
1042		if (brl_pending_overlap(plock, pend_lock)) {
1043			DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1044				procid_str_static(&pend_lock->context.pid )));
1045
1046			message_send_pid(pend_lock->context.pid,
1047					MSG_SMB_UNLOCK,
1048					NULL, 0, True);
1049		}
1050	}
1051
1052	return True;
1053}
1054
1055/****************************************************************************
1056 Unlock a range of bytes.
1057****************************************************************************/
1058
1059BOOL brl_unlock(struct byte_range_lock *br_lck,
1060		uint32 smbpid,
1061		struct process_id pid,
1062		br_off start,
1063		br_off size,
1064		enum brl_flavour lock_flav)
1065{
1066	struct lock_struct lock;
1067
1068	lock.context.smbpid = smbpid;
1069	lock.context.pid = pid;
1070	lock.context.tid = br_lck->fsp->conn->cnum;
1071	lock.start = start;
1072	lock.size = size;
1073	lock.fnum = br_lck->fsp->fnum;
1074	lock.lock_type = UNLOCK_LOCK;
1075	lock.lock_flav = lock_flav;
1076
1077	if (lock_flav == WINDOWS_LOCK) {
1078		return brl_unlock_windows(br_lck, &lock);
1079	} else {
1080		return brl_unlock_posix(br_lck, &lock);
1081	}
1082}
1083
1084/****************************************************************************
1085 Test if we could add a lock if we wanted to.
1086 Returns True if the region required is currently unlocked, False if locked.
1087****************************************************************************/
1088
1089BOOL brl_locktest(struct byte_range_lock *br_lck,
1090		uint32 smbpid,
1091		struct process_id pid,
1092		br_off start,
1093		br_off size,
1094		enum brl_type lock_type,
1095		enum brl_flavour lock_flav)
1096{
1097	BOOL ret = True;
1098	unsigned int i;
1099	struct lock_struct lock;
1100	const struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1101	files_struct *fsp = br_lck->fsp;
1102
1103	lock.context.smbpid = smbpid;
1104	lock.context.pid = pid;
1105	lock.context.tid = br_lck->fsp->conn->cnum;
1106	lock.start = start;
1107	lock.size = size;
1108	lock.fnum = fsp->fnum;
1109	lock.lock_type = lock_type;
1110	lock.lock_flav = lock_flav;
1111
1112	/* Make sure existing locks don't conflict */
1113	for (i=0; i < br_lck->num_locks; i++) {
1114		/*
1115		 * Our own locks don't conflict.
1116		 */
1117		if (brl_conflict_other(&locks[i], &lock)) {
1118			return False;
1119		}
1120	}
1121
1122	/*
1123	 * There is no lock held by an SMB daemon, check to
1124	 * see if there is a POSIX lock from a UNIX or NFS process.
1125	 * This only conflicts with Windows locks, not POSIX locks.
1126	 */
1127
1128	if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1129		ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1130
1131		DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1132			(double)start, (double)size, ret ? "locked" : "unlocked",
1133			fsp->fnum, fsp->fsp_name ));
1134
1135		/* We need to return the inverse of is_posix_locked. */
1136		ret = !ret;
1137        }
1138
1139	/* no conflicts - we could have added it */
1140	return ret;
1141}
1142
1143/****************************************************************************
1144 Query for existing locks.
1145****************************************************************************/
1146
1147NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1148		uint32 *psmbpid,
1149		struct process_id pid,
1150		br_off *pstart,
1151		br_off *psize,
1152		enum brl_type *plock_type,
1153		enum brl_flavour lock_flav)
1154{
1155	unsigned int i;
1156	struct lock_struct lock;
1157	const struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1158	files_struct *fsp = br_lck->fsp;
1159
1160	lock.context.smbpid = *psmbpid;
1161	lock.context.pid = pid;
1162	lock.context.tid = br_lck->fsp->conn->cnum;
1163	lock.start = *pstart;
1164	lock.size = *psize;
1165	lock.fnum = fsp->fnum;
1166	lock.lock_type = *plock_type;
1167	lock.lock_flav = lock_flav;
1168
1169	/* Make sure existing locks don't conflict */
1170	for (i=0; i < br_lck->num_locks; i++) {
1171		const struct lock_struct *exlock = &locks[i];
1172		BOOL conflict = False;
1173
1174		if (exlock->lock_flav == WINDOWS_LOCK) {
1175			conflict = brl_conflict(exlock, &lock);
1176		} else {
1177			conflict = brl_conflict_posix(exlock, &lock);
1178		}
1179
1180		if (conflict) {
1181			*psmbpid = exlock->context.smbpid;
1182        		*pstart = exlock->start;
1183		        *psize = exlock->size;
1184        		*plock_type = exlock->lock_type;
1185			return NT_STATUS_LOCK_NOT_GRANTED;
1186		}
1187	}
1188
1189	/*
1190	 * There is no lock held by an SMB daemon, check to
1191	 * see if there is a POSIX lock from a UNIX or NFS process.
1192	 */
1193
1194	if(lp_posix_locking(fsp->conn->params)) {
1195		BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1196
1197		DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1198			(double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1199			fsp->fnum, fsp->fsp_name ));
1200
1201		if (ret) {
1202			/* Hmmm. No clue what to set smbpid to - use -1. */
1203			*psmbpid = 0xFFFF;
1204			return NT_STATUS_LOCK_NOT_GRANTED;
1205		}
1206        }
1207
1208	return NT_STATUS_OK;
1209}
1210
1211/****************************************************************************
1212 Remove a particular pending lock.
1213****************************************************************************/
1214
1215BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
1216		uint32 smbpid,
1217		struct process_id pid,
1218		br_off start,
1219		br_off size,
1220		enum brl_flavour lock_flav)
1221{
1222	unsigned int i;
1223	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1224	struct lock_context context;
1225
1226	context.smbpid = smbpid;
1227	context.pid = pid;
1228	context.tid = br_lck->fsp->conn->cnum;
1229
1230	for (i = 0; i < br_lck->num_locks; i++) {
1231		struct lock_struct *lock = &locks[i];
1232
1233		/* For pending locks we *always* care about the fnum. */
1234		if (brl_same_context(&lock->context, &context) &&
1235				lock->fnum == br_lck->fsp->fnum &&
1236				IS_PENDING_LOCK(lock->lock_type) &&
1237				lock->lock_flav == lock_flav &&
1238				lock->start == start &&
1239				lock->size == size) {
1240			break;
1241		}
1242	}
1243
1244	if (i == br_lck->num_locks) {
1245		/* Didn't find it. */
1246		return False;
1247	}
1248
1249	if (i < br_lck->num_locks - 1) {
1250		/* Found this particular pending lock - delete it */
1251		memmove(&locks[i], &locks[i+1],
1252			sizeof(*locks)*((br_lck->num_locks-1) - i));
1253	}
1254
1255	br_lck->num_locks -= 1;
1256	br_lck->modified = True;
1257	return True;
1258}
1259
1260/****************************************************************************
1261 Remove any locks associated with a open file.
1262 We return True if this process owns any other Windows locks on this
1263 fd and so we should not immediately close the fd.
1264****************************************************************************/
1265
1266void brl_close_fnum(struct byte_range_lock *br_lck)
1267{
1268	files_struct *fsp = br_lck->fsp;
1269	uint16 tid = fsp->conn->cnum;
1270	int fnum = fsp->fnum;
1271	unsigned int i, j, dcount=0;
1272	int num_deleted_windows_locks = 0;
1273	struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1274	struct process_id pid = procid_self();
1275	BOOL unlock_individually = False;
1276
1277	if(lp_posix_locking(fsp->conn->params)) {
1278
1279		/* Check if there are any Windows locks associated with this dev/ino
1280		   pair that are not this fnum. If so we need to call unlock on each
1281		   one in order to release the system POSIX locks correctly. */
1282
1283		for (i=0; i < br_lck->num_locks; i++) {
1284			struct lock_struct *lock = &locks[i];
1285
1286			if (!procid_equal(&lock->context.pid, &pid)) {
1287				continue;
1288			}
1289
1290			if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1291				continue; /* Ignore pending. */
1292			}
1293
1294			if (lock->context.tid != tid || lock->fnum != fnum) {
1295				unlock_individually = True;
1296				break;
1297			}
1298		}
1299
1300		if (unlock_individually) {
1301			struct lock_struct *locks_copy;
1302			unsigned int num_locks_copy;
1303
1304			/* Copy the current lock array. */
1305			if (br_lck->num_locks) {
1306				locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1307				if (!locks_copy) {
1308					smb_panic("brl_close_fnum: talloc fail.\n");
1309	 			}
1310			} else {
1311				locks_copy = NULL;
1312			}
1313
1314			num_locks_copy = br_lck->num_locks;
1315
1316			for (i=0; i < num_locks_copy; i++) {
1317				struct lock_struct *lock = &locks_copy[i];
1318
1319				if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1320						(lock->fnum == fnum)) {
1321					brl_unlock(br_lck,
1322						lock->context.smbpid,
1323						pid,
1324						lock->start,
1325						lock->size,
1326						lock->lock_flav);
1327				}
1328			}
1329			return;
1330		}
1331	}
1332
1333	/* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1334
1335	/* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1336
1337	for (i=0; i < br_lck->num_locks; i++) {
1338		struct lock_struct *lock = &locks[i];
1339		BOOL del_this_lock = False;
1340
1341		if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1342			if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1343				del_this_lock = True;
1344				num_deleted_windows_locks++;
1345			} else if (lock->lock_flav == POSIX_LOCK) {
1346				del_this_lock = True;
1347			}
1348		}
1349
1350		if (del_this_lock) {
1351			/* Send unlock messages to any pending waiters that overlap. */
1352			for (j=0; j < br_lck->num_locks; j++) {
1353				struct lock_struct *pend_lock = &locks[j];
1354
1355				/* Ignore our own or non-pending locks. */
1356				if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1357					continue;
1358				}
1359
1360				/* Optimisation - don't send to this fnum as we're
1361				   closing it. */
1362				if (pend_lock->context.tid == tid &&
1363				    procid_equal(&pend_lock->context.pid, &pid) &&
1364				    pend_lock->fnum == fnum) {
1365					continue;
1366				}
1367
1368				/* We could send specific lock info here... */
1369				if (brl_pending_overlap(lock, pend_lock)) {
1370					message_send_pid(pend_lock->context.pid,
1371							MSG_SMB_UNLOCK,
1372							NULL, 0, True);
1373				}
1374			}
1375
1376			/* found it - delete it */
1377			if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1378				memmove(&locks[i], &locks[i+1],
1379					sizeof(*locks)*((br_lck->num_locks-1) - i));
1380			}
1381			br_lck->num_locks--;
1382			br_lck->modified = True;
1383			i--;
1384			dcount++;
1385		}
1386	}
1387
1388	if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1389		/* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1390		reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1391	}
1392}
1393
1394/****************************************************************************
1395 Ensure this set of lock entries is valid.
1396****************************************************************************/
1397
1398static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1399{
1400	unsigned int i;
1401	unsigned int num_valid_entries = 0;
1402	struct lock_struct *locks = *pplocks;
1403
1404	for (i = 0; i < *pnum_entries; i++) {
1405		struct lock_struct *lock_data = &locks[i];
1406		if (!process_exists(lock_data->context.pid)) {
1407			/* This process no longer exists - mark this
1408			   entry as invalid by zeroing it. */
1409			ZERO_STRUCTP(lock_data);
1410		} else {
1411			num_valid_entries++;
1412		}
1413	}
1414
1415	if (num_valid_entries != *pnum_entries) {
1416		struct lock_struct *new_lock_data = NULL;
1417
1418		if (num_valid_entries) {
1419			new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1420			if (!new_lock_data) {
1421				DEBUG(3, ("malloc fail\n"));
1422				return False;
1423			}
1424
1425			num_valid_entries = 0;
1426			for (i = 0; i < *pnum_entries; i++) {
1427				struct lock_struct *lock_data = &locks[i];
1428				if (lock_data->context.smbpid &&
1429						lock_data->context.tid) {
1430					/* Valid (nonzero) entry - copy it. */
1431					memcpy(&new_lock_data[num_valid_entries],
1432						lock_data, sizeof(struct lock_struct));
1433					num_valid_entries++;
1434				}
1435			}
1436		}
1437
1438		SAFE_FREE(*pplocks);
1439		*pplocks = new_lock_data;
1440		*pnum_entries = num_valid_entries;
1441	}
1442
1443	return True;
1444}
1445
1446/****************************************************************************
1447 Traverse the whole database with this function, calling traverse_callback
1448 on each lock.
1449****************************************************************************/
1450
1451static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1452{
1453	struct lock_struct *locks;
1454	struct lock_key *key;
1455	unsigned int i;
1456	unsigned int num_locks = 0;
1457	unsigned int orig_num_locks = 0;
1458
1459	BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1460
1461	/* In a traverse function we must make a copy of
1462	   dbuf before modifying it. */
1463
1464	locks = (struct lock_struct *)memdup(dbuf.dptr, dbuf.dsize);
1465	if (!locks) {
1466		return -1; /* Terminate traversal. */
1467	}
1468
1469	key = (struct lock_key *)kbuf.dptr;
1470	orig_num_locks = num_locks = dbuf.dsize/sizeof(*locks);
1471
1472	/* Ensure the lock db is clean of entries from invalid processes. */
1473
1474	if (!validate_lock_entries(&num_locks, &locks)) {
1475		SAFE_FREE(locks);
1476		return -1; /* Terminate traversal */
1477	}
1478
1479	if (orig_num_locks != num_locks) {
1480		dbuf.dptr = (char *)locks;
1481		dbuf.dsize = num_locks * sizeof(*locks);
1482
1483		if (dbuf.dsize) {
1484			tdb_store(ttdb, kbuf, dbuf, TDB_REPLACE);
1485		} else {
1486			tdb_delete(ttdb, kbuf);
1487		}
1488	}
1489
1490	for ( i=0; i<num_locks; i++) {
1491		traverse_callback(key->device,
1492				  key->inode,
1493				  locks[i].context.pid,
1494				  locks[i].lock_type,
1495				  locks[i].lock_flav,
1496				  locks[i].start,
1497				  locks[i].size);
1498	}
1499
1500	SAFE_FREE(locks);
1501	return 0;
1502}
1503
1504/*******************************************************************
1505 Call the specified function on each lock in the database.
1506********************************************************************/
1507
1508int brl_forall(BRLOCK_FN(fn))
1509{
1510	if (!tdb) {
1511		return 0;
1512	}
1513	return tdb_traverse(tdb, traverse_fn, (void *)fn);
1514}
1515
1516/*******************************************************************
1517 Store a potentially modified set of byte range lock data back into
1518 the database.
1519 Unlock the record.
1520********************************************************************/
1521
1522static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1523{
1524	TDB_DATA key;
1525
1526	key.dptr = (char *)&br_lck->key;
1527	key.dsize = sizeof(struct lock_key);
1528
1529	if (br_lck->read_only) {
1530		SMB_ASSERT(!br_lck->modified);
1531	}
1532
1533	if (!br_lck->modified) {
1534		goto done;
1535	}
1536
1537	if (br_lck->num_locks == 0) {
1538		/* No locks - delete this entry. */
1539		if (tdb_delete(tdb, key) == -1) {
1540			smb_panic("Could not delete byte range lock entry\n");
1541		}
1542	} else {
1543		TDB_DATA data;
1544		data.dptr = (char *)br_lck->lock_data;
1545		data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1546
1547		if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1548			smb_panic("Could not store byte range mode entry\n");
1549		}
1550	}
1551
1552 done:
1553
1554	if (!br_lck->read_only) {
1555		tdb_chainunlock(tdb, key);
1556	}
1557	SAFE_FREE(br_lck->lock_data);
1558	return 0;
1559}
1560
1561/*******************************************************************
1562 Fetch a set of byte range lock data from the database.
1563 Leave the record locked.
1564 TALLOC_FREE(brl) will release the lock in the destructor.
1565********************************************************************/
1566
1567static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1568					files_struct *fsp, BOOL read_only)
1569{
1570	TDB_DATA key;
1571	TDB_DATA data;
1572	struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1573
1574	if (br_lck == NULL) {
1575		return NULL;
1576	}
1577
1578	br_lck->fsp = fsp;
1579	br_lck->num_locks = 0;
1580	br_lck->modified = False;
1581	memset(&br_lck->key, '\0', sizeof(struct lock_key));
1582	br_lck->key.device = fsp->dev;
1583	br_lck->key.inode = fsp->inode;
1584
1585	key.dptr = (char *)&br_lck->key;
1586	key.dsize = sizeof(struct lock_key);
1587
1588	if (!fsp->lockdb_clean) {
1589		/* We must be read/write to clean
1590		   the dead entries. */
1591		read_only = False;
1592	}
1593
1594	if (read_only) {
1595		br_lck->read_only = True;
1596	} else {
1597		if (tdb_chainlock(tdb, key) != 0) {
1598			DEBUG(3, ("Could not lock byte range lock entry\n"));
1599			TALLOC_FREE(br_lck);
1600			return NULL;
1601		}
1602		br_lck->read_only = False;
1603	}
1604
1605	talloc_set_destructor(br_lck, byte_range_lock_destructor);
1606
1607	data = tdb_fetch(tdb, key);
1608	br_lck->lock_data = (void *)data.dptr;
1609	br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1610
1611	if (!fsp->lockdb_clean) {
1612		int orig_num_locks = br_lck->num_locks;
1613
1614		/* This is the first time we've accessed this. */
1615		/* Go through and ensure all entries exist - remove any that don't. */
1616		/* Makes the lockdb self cleaning at low cost. */
1617
1618		struct lock_struct *locks =
1619			(struct lock_struct *)br_lck->lock_data;
1620
1621		if (!validate_lock_entries(&br_lck->num_locks, &locks)) {
1622			SAFE_FREE(br_lck->lock_data);
1623			TALLOC_FREE(br_lck);
1624			return NULL;
1625		}
1626
1627                /* Ensure invalid locks are cleaned up in the destructor. */
1628		if (orig_num_locks != br_lck->num_locks) {
1629			br_lck->modified = True;
1630		}
1631
1632		/*
1633		 * validate_lock_entries might have changed locks. We can't
1634		 * use a direct pointer here because otherwise gcc warnes
1635		 * about strict aliasing rules being violated.
1636		 */
1637		br_lck->lock_data = locks;
1638
1639		/* Mark the lockdb as "clean" as seen from this open file. */
1640		fsp->lockdb_clean = True;
1641	}
1642
1643	if (DEBUGLEVEL >= 10) {
1644		unsigned int i;
1645		struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1646		DEBUG(10,("brl_get_locks_internal: %u current locks on dev=%.0f, inode=%.0f\n",
1647			br_lck->num_locks,
1648			(double)fsp->dev, (double)fsp->inode ));
1649		for( i = 0; i < br_lck->num_locks; i++) {
1650			print_lock_struct(i, &locks[i]);
1651		}
1652	}
1653	return br_lck;
1654}
1655
1656struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1657					files_struct *fsp)
1658{
1659	return brl_get_locks_internal(mem_ctx, fsp, False);
1660}
1661
1662struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1663					files_struct *fsp)
1664{
1665	return brl_get_locks_internal(mem_ctx, fsp, True);
1666}
1667