1/*
2   Unix SMB/CIFS implementation.
3   Samba database functions
4   Copyright (C) Anton Blanchard                   2001
5
6   This program is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 2 of the License, or
9   (at your option) any later version.
10
11   This program is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with this program; if not, write to the Free Software
18   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20#if HAVE_CONFIG_H
21#include <config.h>
22#endif
23
24#if STANDALONE
25#include <stdlib.h>
26#include <stdio.h>
27#include <unistd.h>
28#include <string.h>
29#include <fcntl.h>
30#include <errno.h>
31#include <sys/stat.h>
32#include <time.h>
33#include <signal.h>
34#include "tdb.h"
35#include "spinlock.h"
36
37#define DEBUG
38#else
39#include "includes.h"
40#endif
41
42#ifdef USE_SPINLOCKS
43
44/*
45 * ARCH SPECIFIC
46 */
47
48#if defined(SPARC_SPINLOCKS)
49
50static inline int __spin_trylock(spinlock_t *lock)
51{
52	unsigned int result;
53
54	asm volatile("ldstub    [%1], %0"
55		: "=r" (result)
56		: "r" (lock)
57		: "memory");
58
59	return (result == 0) ? 0 : EBUSY;
60}
61
62static inline void __spin_unlock(spinlock_t *lock)
63{
64	asm volatile("":::"memory");
65	*lock = 0;
66}
67
68static inline void __spin_lock_init(spinlock_t *lock)
69{
70	*lock = 0;
71}
72
73static inline int __spin_is_locked(spinlock_t *lock)
74{
75	return (*lock != 0);
76}
77
78#elif defined(POWERPC_SPINLOCKS)
79
80static inline int __spin_trylock(spinlock_t *lock)
81{
82	unsigned int result;
83
84	__asm__ __volatile__(
85"1:	lwarx		%0,0,%1\n\
86	cmpwi		0,%0,0\n\
87	li		%0,0\n\
88	bne-		2f\n\
89	li		%0,1\n\
90	stwcx.		%0,0,%1\n\
91	bne-		1b\n\
92	isync\n\
932:"	: "=&r"(result)
94	: "r"(lock)
95	: "cr0", "memory");
96
97	return (result == 1) ? 0 : EBUSY;
98}
99
100static inline void __spin_unlock(spinlock_t *lock)
101{
102	asm volatile("eieio":::"memory");
103	*lock = 0;
104}
105
106static inline void __spin_lock_init(spinlock_t *lock)
107{
108	*lock = 0;
109}
110
111static inline int __spin_is_locked(spinlock_t *lock)
112{
113	return (*lock != 0);
114}
115
116#elif defined(INTEL_SPINLOCKS)
117
118static inline int __spin_trylock(spinlock_t *lock)
119{
120	int oldval;
121
122	asm volatile("xchgl %0,%1"
123		: "=r" (oldval), "=m" (*lock)
124		: "0" (0)
125		: "memory");
126
127	return oldval > 0 ? 0 : EBUSY;
128}
129
130static inline void __spin_unlock(spinlock_t *lock)
131{
132	asm volatile("":::"memory");
133	*lock = 1;
134}
135
136static inline void __spin_lock_init(spinlock_t *lock)
137{
138	*lock = 1;
139}
140
141static inline int __spin_is_locked(spinlock_t *lock)
142{
143	return (*lock != 1);
144}
145
146#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
147
148/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
149 * sync(3) for the details of the intrinsic operations.
150 *
151 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
152 */
153
154#if defined(STANDALONE)
155
156/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
157#define inline __inline
158
159#endif /* STANDALONE */
160
161/* Returns 0 if the lock is acquired, EBUSY otherwise. */
162static inline int __spin_trylock(spinlock_t *lock)
163{
164        unsigned int val;
165        val = __lock_test_and_set(lock, 1);
166        return val == 0 ? 0 : EBUSY;
167}
168
169static inline void __spin_unlock(spinlock_t *lock)
170{
171        __lock_release(lock);
172}
173
174static inline void __spin_lock_init(spinlock_t *lock)
175{
176        __lock_release(lock);
177}
178
179/* Returns 1 if the lock is held, 0 otherwise. */
180static inline int __spin_is_locked(spinlock_t *lock)
181{
182        unsigned int val;
183        val = __add_and_fetch(lock, 0);
184	return val;
185}
186
187#elif defined(MIPS_SPINLOCKS)
188
189static inline unsigned int load_linked(unsigned long addr)
190{
191	unsigned int res;
192
193	__asm__ __volatile__("ll\t%0,(%1)"
194		: "=r" (res)
195		: "r" (addr));
196
197	return res;
198}
199
200static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
201{
202	unsigned int res;
203
204	__asm__ __volatile__("sc\t%0,(%2)"
205		: "=r" (res)
206		: "0" (value), "r" (addr));
207	return res;
208}
209
210static inline int __spin_trylock(spinlock_t *lock)
211{
212	unsigned int mw;
213
214	do {
215		mw = load_linked(lock);
216		if (mw)
217			return EBUSY;
218	} while (!store_conditional(lock, 1));
219
220	asm volatile("":::"memory");
221
222	return 0;
223}
224
225static inline void __spin_unlock(spinlock_t *lock)
226{
227	asm volatile("":::"memory");
228	*lock = 0;
229}
230
231static inline void __spin_lock_init(spinlock_t *lock)
232{
233	*lock = 0;
234}
235
236static inline int __spin_is_locked(spinlock_t *lock)
237{
238	return (*lock != 0);
239}
240
241#else
242#error Need to implement spinlock code in spinlock.c
243#endif
244
245/*
246 * OS SPECIFIC
247 */
248
249static void yield_cpu(void)
250{
251	struct timespec tm;
252
253#ifdef USE_SCHED_YIELD
254	sched_yield();
255#else
256	/* Linux will busy loop for delays < 2ms on real time tasks */
257	tm.tv_sec = 0;
258	tm.tv_nsec = 2000000L + 1;
259	nanosleep(&tm, NULL);
260#endif
261}
262
263static int this_is_smp(void)
264{
265#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
266        return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
267#else
268	return 0;
269#endif
270}
271
272/*
273 * GENERIC
274 */
275
276static int smp_machine = 0;
277
278static inline void __spin_lock(spinlock_t *lock)
279{
280	int ntries = 0;
281
282	while(__spin_trylock(lock)) {
283		while(__spin_is_locked(lock)) {
284			if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
285				continue;
286			yield_cpu();
287		}
288	}
289}
290
291static void __read_lock(tdb_rwlock_t *rwlock)
292{
293	int ntries = 0;
294
295	while(1) {
296		__spin_lock(&rwlock->lock);
297
298		if (!(rwlock->count & RWLOCK_BIAS)) {
299			rwlock->count++;
300			__spin_unlock(&rwlock->lock);
301			return;
302		}
303
304		__spin_unlock(&rwlock->lock);
305
306		while(rwlock->count & RWLOCK_BIAS) {
307			if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
308				continue;
309			yield_cpu();
310		}
311	}
312}
313
314static void __write_lock(tdb_rwlock_t *rwlock)
315{
316	int ntries = 0;
317
318	while(1) {
319		__spin_lock(&rwlock->lock);
320
321		if (rwlock->count == 0) {
322			rwlock->count |= RWLOCK_BIAS;
323			__spin_unlock(&rwlock->lock);
324			return;
325		}
326
327		__spin_unlock(&rwlock->lock);
328
329		while(rwlock->count != 0) {
330			if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
331				continue;
332			yield_cpu();
333		}
334	}
335}
336
337static void __write_unlock(tdb_rwlock_t *rwlock)
338{
339	__spin_lock(&rwlock->lock);
340
341#ifdef DEBUG
342	if (!(rwlock->count & RWLOCK_BIAS))
343		fprintf(stderr, "bug: write_unlock\n");
344#endif
345
346	rwlock->count &= ~RWLOCK_BIAS;
347	__spin_unlock(&rwlock->lock);
348}
349
350static void __read_unlock(tdb_rwlock_t *rwlock)
351{
352	__spin_lock(&rwlock->lock);
353
354#ifdef DEBUG
355	if (!rwlock->count)
356		fprintf(stderr, "bug: read_unlock\n");
357
358	if (rwlock->count & RWLOCK_BIAS)
359		fprintf(stderr, "bug: read_unlock\n");
360#endif
361
362	rwlock->count--;
363	__spin_unlock(&rwlock->lock);
364}
365
366/* TDB SPECIFIC */
367
368/* lock a list in the database. list -1 is the alloc list */
369int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
370{
371	tdb_rwlock_t *rwlocks;
372
373	if (!tdb->map_ptr) return -1;
374	rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
375
376	switch(rw_type) {
377	case F_RDLCK:
378		__read_lock(&rwlocks[list+1]);
379		break;
380
381	case F_WRLCK:
382		__write_lock(&rwlocks[list+1]);
383		break;
384
385	default:
386		return TDB_ERRCODE(TDB_ERR_LOCK, -1);
387	}
388	return 0;
389}
390
391/* unlock the database. */
392int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
393{
394	tdb_rwlock_t *rwlocks;
395
396	if (!tdb->map_ptr) return -1;
397	rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
398
399	switch(rw_type) {
400	case F_RDLCK:
401		__read_unlock(&rwlocks[list+1]);
402		break;
403
404	case F_WRLCK:
405		__write_unlock(&rwlocks[list+1]);
406		break;
407
408	default:
409		return TDB_ERRCODE(TDB_ERR_LOCK, -1);
410	}
411
412	return 0;
413}
414
415int tdb_create_rwlocks(int fd, unsigned int hash_size)
416{
417	unsigned size, i;
418	tdb_rwlock_t *rwlocks;
419
420	size = TDB_SPINLOCK_SIZE(hash_size);
421	rwlocks = malloc(size);
422	if (!rwlocks)
423		return -1;
424
425	for(i = 0; i < hash_size+1; i++) {
426		__spin_lock_init(&rwlocks[i].lock);
427		rwlocks[i].count = 0;
428	}
429
430	/* Write it out (appending to end) */
431	if (write(fd, rwlocks, size) != size) {
432		free(rwlocks);
433		return -1;
434	}
435	smp_machine = this_is_smp();
436	free(rwlocks);
437	return 0;
438}
439
440int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
441{
442	tdb_rwlock_t *rwlocks;
443	unsigned i;
444
445	if (tdb->header.rwlocks == 0) return 0;
446	if (!tdb->map_ptr) return -1;
447
448	/* We're mmapped here */
449	rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
450	for(i = 0; i < tdb->header.hash_size+1; i++) {
451		__spin_lock_init(&rwlocks[i].lock);
452		rwlocks[i].count = 0;
453	}
454	return 0;
455}
456#else
457int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
458int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
459int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
460
461/* Non-spinlock version: remove spinlock pointer */
462int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
463{
464	tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
465				- (char *)&tdb->header);
466
467	tdb->header.rwlocks = 0;
468	if (lseek(tdb->fd, off, SEEK_SET) != off
469	    || write(tdb->fd, (void *)&tdb->header.rwlocks,
470		     sizeof(tdb->header.rwlocks))
471	    != sizeof(tdb->header.rwlocks))
472		return -1;
473	return 0;
474}
475#endif
476