1/*****************************************************************************
2 *
3 * cache.c
4 *
5 * Description:  Implements a credentail caching layer to ease the loading
6 *               on the authentication mechanisms.
7 *
8 * Copyright (C) 2003 Jeremy Rumpf
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 *
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS''. ANY EXPRESS OR IMPLIED WARRANTIES,
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL JEREMY RUMPF OR ANY CONTRIBUTER TO THIS SOFTWARE BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE
31 *
32 * Jeremy Rumpf
33 * jrumpf@heavyload.net
34 *
35 *****************************************************************************/
36
37/****************************************
38 * includes
39 *****************************************/
40#include "saslauthd.h"
41
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <fcntl.h>
45#include <sys/mman.h>
46#include <errno.h>
47#include <string.h>
48#include <stdio.h>
49#include <unistd.h>
50#include <stdlib.h>
51#include <limits.h>
52#include <time.h>
53
54#include "cache.h"
55#include "utils.h"
56#include "globals.h"
57#include "md5global.h"
58#include "saslauthd_md5.h"
59
60/****************************************
61 * module globals
62 *****************************************/
63static  struct mm_ctl	mm;
64static  struct lock_ctl	lock;
65static  struct bucket	*table = NULL;
66static  struct stats	*table_stats = NULL;
67static  unsigned int	table_size = 0;
68static  unsigned int	table_timeout = 0;
69
70/****************************************
71 * flags               global from saslauthd-main.c
72 * run_path            global from saslauthd-main.c
73 * tx_rec()            function from utils.c
74 * logger()            function from utils.c
75 *****************************************/
76
77/*************************************************************
78 * The initialization function. This function will setup
79 * the hash table's memory region, initialize the table, etc.
80 **************************************************************/
81int cache_init(void) {
82	int		bytes;
83	char		cache_magic[64];
84	void		*base;
85
86	if (!(flags & CACHE_ENABLED))
87		return 0;
88
89	memset(cache_magic, 0, sizeof(cache_magic));
90	strlcpy(cache_magic, CACHE_CACHE_MAGIC, sizeof(cache_magic));
91
92	/**************************************************************
93	 * Compute the size of the hash table. This and a stats
94	 * struct will make up the memory region.
95	 **************************************************************/
96
97	if (table_size == 0)
98		table_size = CACHE_DEFAULT_TABLE_SIZE;
99
100	bytes = (table_size * CACHE_MAX_BUCKETS_PER * (int)sizeof(struct bucket)) \
101		+ (int)sizeof(struct stats) + 256;
102
103
104	if ((base = cache_alloc_mm(bytes)) == NULL)
105		return -1;
106
107	if (table_timeout == 0)
108		table_timeout = CACHE_DEFAULT_TIMEOUT;
109
110	if (flags & VERBOSE) {
111		logger(L_DEBUG, L_FUNC, "bucket size: %d bytes",
112		       sizeof(struct bucket));
113		logger(L_DEBUG, L_FUNC, "stats size : %d bytes",
114		       sizeof(struct stats));
115		logger(L_DEBUG, L_FUNC, "timeout    : %d seconds",
116		       table_timeout);
117		logger(L_DEBUG, L_FUNC, "cache table: %d total bytes",
118		       bytes);
119		logger(L_DEBUG, L_FUNC, "cache table: %d slots",
120		       table_size);
121		logger(L_DEBUG, L_FUNC, "cache table: %d buckets",
122		       table_size * CACHE_MAX_BUCKETS_PER);
123	}
124
125	/**************************************************************
126	 * At the top of the region is the magic and stats struct. The
127	 * slots follow. Due to locking, the counters in the stats
128	 * struct will not be entirely accurate.
129	 **************************************************************/
130
131	memset(base, 0, bytes);
132
133	memcpy(base, cache_magic, 64);
134	table_stats = (void *)((char *)base + 64);
135	table_stats->table_size = table_size;
136	table_stats->max_buckets_per = CACHE_MAX_BUCKETS_PER;
137	table_stats->sizeof_bucket = (unsigned int)sizeof(struct bucket);
138	table_stats->timeout = table_timeout;
139	table_stats->bytes = bytes;
140
141	table = (void *)((char *)table_stats + 128);
142
143	/**************************************************************
144	 * Last, initialize the hash table locking.
145	 **************************************************************/
146
147	if (cache_init_lock() != 0)
148		return -1;
149
150	return 0;
151}
152
153/*************************************************************
154 * Here we'll take some credentials and run them through
155 * the hash table. If we have a valid hit then all is good
156 * return CACHE_OK. If we don't get a hit, write the entry to
157 * the result pointer and expect a later call to
158 * cache_commit() to flush the bucket into the table.
159 **************************************************************/
160int cache_lookup(const char *user, const char *realm, const char *service, const char *password, struct cache_result *result) {
161
162	int			user_length = 0;
163	int			realm_length = 0;
164	int			service_length = 0;
165	int			hash_offset;
166	unsigned char		pwd_digest[16];
167	MD5_CTX			md5_context;
168	time_t			epoch;
169	time_t			epoch_timeout;
170	struct bucket		*ref_bucket;
171	struct bucket		*low_bucket;
172	struct bucket		*high_bucket;
173	struct bucket		*read_bucket = NULL;
174	char			userrealmserv[CACHE_MAX_CREDS_LENGTH];
175	static char		*debug = "[login=%s] [service=%s] [realm=%s]: %s";
176
177
178	if (!(flags & CACHE_ENABLED))
179		return CACHE_FAIL;
180
181	memset((void *)result, 0, sizeof(struct cache_result));
182	result->status = CACHE_NO_FLUSH;
183
184	/**************************************************************
185	 * Initial length checks
186	 **************************************************************/
187
188	user_length = (int)strlen(user) + 1;
189	realm_length = (int)strlen(realm) + 1;
190	service_length = (int)strlen(service) + 1;
191
192	if ((user_length + realm_length + service_length) > CACHE_MAX_CREDS_LENGTH) {
193		return CACHE_TOO_BIG;
194	}
195
196	/**************************************************************
197	 * Any ideas on how not to call time() for every lookup?
198	 **************************************************************/
199
200	epoch = time(NULL);
201	epoch_timeout = epoch - table_timeout;
202
203	/**************************************************************
204	 * Get the offset into the hash table and the md5 sum of
205	 * the password.
206	 **************************************************************/
207
208	strlcpy(userrealmserv, user, sizeof(userrealmserv));
209	strlcat(userrealmserv, realm, sizeof(userrealmserv));
210	strlcat(userrealmserv, service, sizeof(userrealmserv));
211
212	hash_offset = cache_pjwhash(userrealmserv);
213
214	_saslauthd_MD5Init(&md5_context);
215	_saslauthd_MD5Update(&md5_context, password, strlen(password));
216	_saslauthd_MD5Final(pwd_digest, &md5_context);
217
218	/**************************************************************
219	 * Loop through the bucket chain to try and find a hit.
220	 *
221	 * low_bucket = bucket at the start of the slot.
222	 *
223	 * high_bucket = last bucket in the slot.
224	 *
225	 * read_bucket = Contains the matched bucket if found.
226	 *               Otherwise is NULL.
227	 *
228	 * Also, lock the slot first to avoid contention in the
229	 * bucket chain.
230	 *
231	 **************************************************************/
232
233	table_stats->attempts++;
234
235	if (cache_get_rlock(hash_offset) != 0) {
236		table_stats->misses++;
237		table_stats->lock_failures++;
238		return CACHE_FAIL;
239	}
240
241	low_bucket = table + (CACHE_MAX_BUCKETS_PER * hash_offset);
242	high_bucket = low_bucket + CACHE_MAX_BUCKETS_PER;
243
244	for (ref_bucket = low_bucket; ref_bucket < high_bucket; ref_bucket++) {
245		if (strcmp(user, ref_bucket->creds + ref_bucket->user_offt) == 0 && \
246		    strcmp (realm, ref_bucket->creds + ref_bucket->realm_offt) == 0 && \
247		    strcmp(service, ref_bucket->creds + ref_bucket->service_offt) == 0) {
248			read_bucket = ref_bucket;
249			break;
250		}
251	}
252
253	/**************************************************************
254	 * If we have our fish, check the password. If it's good,
255	 * release the slot (row) lock and return CACHE_OK. Else,
256	 * we'll write the entry to the result pointer. If we have a
257	 * read_bucket, then tell cache_commit() to not rescan the
258	 * chain (CACHE_FLUSH). Else, have cache_commit() determine the
259	 * best bucket to place the new entry (CACHE_FLUSH_WITH_RESCAN).
260	 **************************************************************/
261
262	if (read_bucket != NULL && read_bucket->created > epoch_timeout) {
263
264		if (memcmp(pwd_digest, read_bucket->pwd_digest, 16) == 0) {
265
266			if (flags & VERBOSE)
267				logger(L_DEBUG, L_FUNC, debug, user, realm, service, "found with valid passwd");
268
269			cache_un_lock(hash_offset);
270			table_stats->hits++;
271			return CACHE_OK;
272		}
273
274		if (flags & VERBOSE)
275			logger(L_DEBUG, L_FUNC, debug, user, realm, service, "found with invalid passwd, update pending");
276
277		result->status = CACHE_FLUSH;
278
279	} else {
280
281		if (flags & VERBOSE)
282			logger(L_DEBUG, L_FUNC, debug, user, realm, service, "not found, update pending");
283
284		result->status = CACHE_FLUSH_WITH_RESCAN;
285	}
286
287	result->hash_offset = hash_offset;
288	result->read_bucket = read_bucket;
289
290	result->bucket.user_offt = 0;
291	result->bucket.realm_offt = user_length;
292	result->bucket.service_offt = user_length + realm_length;
293
294	strcpy(result->bucket.creds + result->bucket.user_offt, user);
295	strcpy(result->bucket.creds + result->bucket.realm_offt, realm);
296	strcpy(result->bucket.creds + result->bucket.service_offt, service);
297
298	memcpy(result->bucket.pwd_digest, pwd_digest, 16);
299	result->bucket.created = epoch;
300
301	cache_un_lock(hash_offset);
302	table_stats->misses++;
303	return CACHE_FAIL;
304}
305
306
307/*************************************************************
308 * If it was later determined that the previous failed lookup
309 * is ok, flush the result->bucket out to it's permanent home
310 * in the hash table.
311 **************************************************************/
312void cache_commit(struct cache_result *result) {
313	struct bucket           *write_bucket;
314	struct bucket		*ref_bucket;
315	struct bucket		*low_bucket;
316	struct bucket		*high_bucket;
317
318	if (!(flags & CACHE_ENABLED))
319		return;
320
321	if (result->status == CACHE_NO_FLUSH)
322		return;
323
324	if (cache_get_wlock(result->hash_offset) != 0) {
325		table_stats->lock_failures++;
326		return;
327	}
328
329	if (result->status == CACHE_FLUSH) {
330		write_bucket = result->read_bucket;
331	} else {
332		/*********************************************************
333		 * CACHE_FLUSH_WITH_RESCAN is the default action to take.
334	 	 * Simply traverse the slot looking for the oldest bucket
335		 * and mark it for writing.
336	 	 **********************************************************/
337		low_bucket = table + (CACHE_MAX_BUCKETS_PER * result->hash_offset);
338		high_bucket = low_bucket + CACHE_MAX_BUCKETS_PER;
339		write_bucket = low_bucket;
340
341		for (ref_bucket = low_bucket; ref_bucket < high_bucket; ref_bucket++) {
342			if (ref_bucket->created < write_bucket->created)
343				write_bucket = ref_bucket;
344		}
345	}
346
347	memcpy((void *)write_bucket, (void *)&(result->bucket), sizeof(struct bucket));
348
349	if (flags & VERBOSE)
350		logger(L_DEBUG, L_FUNC, "lookup committed");
351
352	cache_un_lock(result->hash_offset);
353	return;
354}
355
356
357/*************************************************************
358 * Hashing function. Algorithm is an adaptation of Peter
359 * Weinberger's (PJW) generic hashing algorithm, which
360 * is based on Allen Holub's version.
361 **************************************************************/
362int cache_pjwhash(char *datum ) {
363    const int BITS_IN_int = ( (int)sizeof(int) * CHAR_BIT );
364    const int THREE_QUARTERS = ((int) ((BITS_IN_int * 3) / 4));
365    const int ONE_EIGHTH = ((int) (BITS_IN_int / 8));
366    const int HIGH_BITS = ( ~((unsigned int)(~0) >> ONE_EIGHTH ));
367
368    unsigned int            hash_value, i;
369
370    for (hash_value = 0; *datum; ++datum) {
371	hash_value = (hash_value << ONE_EIGHTH) + *datum;
372	if ((i = hash_value & HIGH_BITS) != 0)
373	    hash_value = (hash_value ^ (i >> THREE_QUARTERS)) & ~HIGH_BITS;
374    }
375
376    return (hash_value % table_size);
377}
378
379/*************************************************************
380 * Allow someone to set the hash table size (in kilobytes).
381 * Since the hash table has to be prime, this won't be exact.
382 **************************************************************/
383void cache_set_table_size(const char *size) {
384	unsigned int	kilobytes;
385	unsigned int	bytes;
386	unsigned int	calc_bytes = 0;
387	unsigned int	calc_table_size = 1;
388
389	kilobytes = (unsigned int)strtol(size, (char **)NULL, 10);
390
391	if (kilobytes <= 0) {
392		logger(L_ERR, L_FUNC,
393		       "cache size must be positive and non zero");
394		exit(1);
395	}
396
397	bytes = kilobytes * 1024;
398
399	calc_table_size =
400	    bytes / ((unsigned int)sizeof(struct bucket) * CACHE_MAX_BUCKETS_PER);
401
402	do {
403	    calc_table_size = cache_get_next_prime(calc_table_size);
404	    calc_bytes = calc_table_size *
405            (unsigned int)sizeof(struct bucket) * CACHE_MAX_BUCKETS_PER;
406	} while (calc_bytes < bytes);
407
408	table_size = calc_table_size;
409
410	return;
411}
412
413
414/*************************************************************
415 * Allow someone to set the table timeout (in seconds)
416 **************************************************************/
417void cache_set_timeout(const char *time) {
418	table_timeout = (unsigned int)strtol(time, (char **)NULL, 10);
419
420	if (table_timeout <= 0) {
421		logger(L_ERR, L_FUNC, "cache timeout must be positive");
422		exit(1);
423	}
424
425	return;
426}
427
428
429/*************************************************************
430 * Find the next closest prime relative to the number given.
431 * This is a variation of an implementation of the
432 * Sieve of Erastothenes by Frank Pilhofer,
433 * http://www.fpx.de/fp/Software/Sieve.html.
434 **************************************************************/
435unsigned int cache_get_next_prime(unsigned int number) {
436
437#define TEST(f,x)	(*(f+((x)>>4))&(1<<(((x)&15U)>>1)))
438#define SET(f,x)        *(f+((x)>>4))|=1<<(((x)&15)>>1)
439
440	unsigned char	*feld = NULL;
441	unsigned int	teste = 1;
442	unsigned int	max;
443	unsigned int	mom;
444	unsigned int	alloc;
445
446	max = number + 20000;
447
448	feld = malloc(alloc=(((max-=10000)>>4)+1));
449
450	if (feld == NULL) {
451		logger(L_ERR, L_FUNC, "could not allocate memory");
452		exit(1);
453	}
454
455	memset(feld, 0, alloc);
456
457	while ((teste += 2) < max) {
458		if (!TEST(feld, teste)) {
459			if (teste > number) {
460				free(feld);
461				return teste;
462			}
463
464			for (mom=3*teste; mom<max; mom+=teste<<1) SET (feld, mom);
465		}
466	}
467
468	/******************************************************
469	 * A prime wasn't found in the maximum search range.
470	 * Just return the original number.
471	 ******************************************************/
472
473	free(feld);
474	return number;
475}
476
477
478/*************************************************************
479 * Open the file that we'll mmap in as the shared memory
480 * segment. If something fails, return NULL.
481 **************************************************************/
482void *cache_alloc_mm(unsigned int bytes) {
483	int		file_fd;
484	int		rc;
485	int		chunk_count;
486	char		null_buff[1024];
487	size_t          mm_file_len;
488
489	mm.bytes = bytes;
490
491	mm_file_len = strlen(run_path) + sizeof(CACHE_MMAP_FILE) + 1;
492	if (!(mm.file =
493	     (char *)malloc(mm_file_len))) {
494		logger(L_ERR, L_FUNC, "could not allocate memory");
495		return NULL;
496	}
497
498	strlcpy(mm.file, run_path, mm_file_len);
499	strlcat(mm.file, CACHE_MMAP_FILE, mm_file_len);
500
501	if ((file_fd =
502	     open(mm.file, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR)) < 0) {
503		rc = errno;
504		logger(L_ERR, L_FUNC, "could not open mmap file: %s", mm.file);
505		logger(L_ERR, L_FUNC, "open: %s", strerror(rc));
506		return NULL;
507	}
508
509	memset(null_buff, 0, sizeof(null_buff));
510
511	chunk_count = (bytes / (int)sizeof(null_buff)) + 1;
512
513	while (chunk_count > 0) {
514	    if (tx_rec(file_fd, null_buff, sizeof(null_buff))
515		!= (ssize_t)sizeof(null_buff)) {
516		rc = errno;
517		logger(L_ERR, L_FUNC,
518		       "failed while writing to mmap file: %s",
519		       mm.file);
520		close(file_fd);
521		return NULL;
522	    }
523
524	    chunk_count--;
525	}
526
527	if ((mm.base = mmap(NULL, bytes, PROT_READ|PROT_WRITE,
528			    MAP_SHARED, file_fd, 0))== (void *)-1) {
529		rc = errno;
530		logger(L_ERR, L_FUNC, "could not mmap shared memory segment");
531		logger(L_ERR, L_FUNC, "mmap: %s", strerror(rc));
532		close(file_fd);
533		return NULL;
534	}
535
536	close(file_fd);
537
538	if (flags & VERBOSE) {
539		logger(L_DEBUG, L_FUNC,
540		       "mmaped shared memory segment on file: %s", mm.file);
541	}
542
543	return mm.base;
544}
545
546
547/*************************************************************
548 * When we die we may need to perform some cleanup on the
549 * mmaped region. We assume we're the last process out here.
550 * Otherwise, deleting the file may cause SIGBUS signals to
551 * be generated for other processes.
552 **************************************************************/
553void cache_cleanup_mm(void) {
554	if (mm.base != NULL) {
555		munmap(mm.base, mm.bytes);
556		unlink(mm.file);
557
558		if (flags & VERBOSE) {
559			logger(L_DEBUG, L_FUNC,
560			       "cache mmap file removed: %s", mm.file);
561		}
562	}
563
564	return;
565}
566
567/*****************************************************************
568 * The following is relative to the fcntl() locking method. Probably
569 * used when the Sys IV SHM Implementation is in effect.
570 ****************************************************************/
571#ifdef CACHE_USE_FCNTL
572
573/*************************************************************
574 * Setup the locking stuff required to implement the fcntl()
575 * style record locking of the hash table. Return 0 if
576 * everything is peachy, otherwise -1.
577 * __FCNTL Impl__
578 **************************************************************/
579int cache_init_lock(void) {
580	int	rc;
581	size_t  flock_file_len;
582
583	flock_file_len = strlen(run_path) + sizeof(CACHE_FLOCK_FILE) + 1;
584	if ((lock.flock_file = (char *)malloc(flock_file_len)) == NULL) {
585		logger(L_ERR, L_FUNC, "could not allocate memory");
586		return -1;
587	}
588
589	strlcpy(lock.flock_file, run_path, flock_file_len);
590	strlcat(lock.flock_file, CACHE_FLOCK_FILE, flock_file_len);
591
592	if ((lock.flock_fd = open(lock.flock_file, O_RDWR|O_CREAT|O_TRUNC, S_IWUSR|S_IRUSR)) == -1) {
593		rc = errno;
594		logger(L_ERR, L_FUNC, "could not open flock file: %s", lock.flock_file);
595		logger(L_ERR, L_FUNC, "open: %s", strerror(rc));
596		return -1;
597	}
598
599	if (flags & VERBOSE)
600		logger(L_DEBUG, L_FUNC, "flock file opened at %s", lock.flock_file);
601
602	return 0;
603}
604
605
606/*************************************************************
607 * When the processes die we'll need to cleanup/delete
608 * the flock_file. More for correctness than anything.
609 * __FCNTL Impl__
610 **************************************************************/
611void cache_cleanup_lock(void) {
612
613
614	if (lock.flock_file != NULL) {
615		unlink(lock.flock_file);
616
617		if (flags & VERBOSE)
618			logger(L_DEBUG, L_FUNC, "flock file removed: %s", lock.flock_file);
619
620	}
621
622	return;
623}
624
625
626/*************************************************************
627 * Attempt to get a write lock on a slot. Return 0 if
628 * everything went ok, return -1 if something bad happened.
629 * This function is expected to block.
630 * __FCNTL Impl__
631 **************************************************************/
632int cache_get_wlock(unsigned int slot) {
633	struct flock	lock_st;
634	int		rc;
635
636	lock_st.l_type = F_WRLCK;
637	lock_st.l_start = slot;
638	lock_st.l_whence = SEEK_SET;
639	lock_st.l_len = 1;
640
641	errno = 0;
642
643	do {
644		if (flags & VERBOSE)
645			logger(L_DEBUG, L_FUNC, "attempting a write lock on slot: %d", slot);
646
647		rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
648	} while (rc != 0 && errno == EINTR);
649
650	if (rc != 0) {
651		rc = errno;
652		logger(L_ERR, L_FUNC, "could not acquire a write lock on slot: %d\n", slot);
653		logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
654		return -1;
655	}
656
657	return 0;
658}
659
660
661/*************************************************************
662 * Attempt to get a read lock on a slot. Return 0 if
663 * everything went ok, return -1 if something bad happened.
664 * This function is expected to block.
665 * __FCNTL Impl__
666 **************************************************************/
667int cache_get_rlock(unsigned int slot) {
668
669	struct flock	lock_st;
670	int		rc;
671
672
673	lock_st.l_type = F_RDLCK;
674	lock_st.l_start = slot;
675	lock_st.l_whence = SEEK_SET;
676	lock_st.l_len = 1;
677
678	errno = 0;
679
680	do {
681		if (flags & VERBOSE)
682			logger(L_DEBUG, L_FUNC, "attempting a read lock on slot: %d", slot);
683
684		rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
685	} while (rc != 0 && errno == EINTR);
686
687	if (rc != 0) {
688		rc = errno;
689		logger(L_ERR, L_FUNC, "could not acquire a read lock on slot: %d\n", slot);
690		logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
691		return -1;
692	}
693
694	return 0;
695}
696
697
698/*************************************************************
699 * Releases a previously acquired lock on a slot.
700 * __FCNTL Impl__
701 **************************************************************/
702int cache_un_lock(unsigned int slot) {
703
704	struct flock	lock_st;
705	int		rc;
706
707
708	lock_st.l_type = F_UNLCK;
709	lock_st.l_start = slot;
710	lock_st.l_whence = SEEK_SET;
711	lock_st.l_len = 1;
712
713	errno = 0;
714
715	do {
716		if (flags & VERBOSE)
717			logger(L_DEBUG, L_FUNC, "attempting to release lock on slot: %d", slot);
718
719		rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
720	} while (rc != 0 && errno == EINTR);
721
722	if (rc != 0) {
723		rc = errno;
724		logger(L_ERR, L_FUNC, "could not release lock on slot: %d\n", slot);
725		logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
726		return -1;
727	}
728
729	return 0;
730}
731
732
733#endif  /* CACHE_USE_FCNTL */
734
735/**********************************************************************
736 * The following is relative to the POSIX threads rwlock method of locking
737 * slots in the hash table. Used when the Doors IPC is in effect, thus
738 * -lpthreads is evident.
739 ***********************************************************************/
740
741#ifdef CACHE_USE_PTHREAD_RWLOCK
742
743/*************************************************************
744 * Initialize a pthread_rwlock_t for every slot (row) in the
745 * hash table. Return 0 if everything went ok, -1 if we bomb.
746 * __RWLock Impl__
747 **************************************************************/
748int cache_init_lock(void) {
749	unsigned int		x;
750	pthread_rwlock_t	*rwlock;
751
752	if (!(lock.rwlock =
753	     (pthread_rwlock_t *)malloc(sizeof(pthread_rwlock_t) * table_size))) {
754		logger(L_ERR, L_FUNC, "could not allocate memory");
755		return -1;
756	}
757
758	for (x = 0; x < table_size; x++) {
759		rwlock = lock.rwlock + x;
760
761		if (pthread_rwlock_init(rwlock, NULL) != 0) {
762			logger(L_ERR, L_FUNC, "failed to initialize lock %d", x);
763			return -1;
764		}
765	}
766
767	if (flags & VERBOSE)
768		logger(L_DEBUG, L_FUNC, "%d rwlocks initialized", table_size);
769
770	return 0;
771}
772
773
774/*************************************************************
775 * Destroy all of the rwlocks, free the buffer.
776 * __RWLock Impl__
777 **************************************************************/
778void cache_cleanup_lock(void) {
779    unsigned int x;
780    pthread_rwlock_t	*rwlock;
781
782    if(!lock.rwlock) return;
783
784    for(x=0; x<table_size; x++) {
785	rwlock = lock.rwlock + x;
786	pthread_rwlock_destroy(rwlock);
787    }
788
789    free(lock.rwlock);
790
791    return;
792}
793
794
795/*************************************************************
796 * Attempt to get a write lock on a slot. Return 0 if
797 * everything went ok, return -1 if something bad happened.
798 * This function is expected to block the current thread.
799 * __RWLock Impl__
800**************************************************************/
801int cache_get_wlock(unsigned int slot) {
802
803	int		rc = 0;
804
805
806	if (flags & VERBOSE)
807		logger(L_DEBUG, L_FUNC, "attempting a write lock on slot: %d", slot);
808
809	rc = pthread_rwlock_wrlock(lock.rwlock + slot);
810
811	if (rc != 0) {
812		logger(L_ERR, L_FUNC, "could not acquire a write lock on slot: %d\n", slot);
813		return -1;
814	}
815
816	return 0;
817}
818
819
820/*************************************************************
821 * Attempt to get a read lock on a slot. Return 0 if
822 * everything went ok, return -1 if something bad happened.
823 * This function is expected to block the current thread.
824 * __RWLock Impl__
825 **************************************************************/
826int cache_get_rlock(unsigned int slot) {
827
828	int		rc = 0;
829
830
831	if (flags & VERBOSE)
832		logger(L_DEBUG, L_FUNC, "attempting a read lock on slot: %d", slot);
833
834	rc = pthread_rwlock_rdlock(lock.rwlock + slot);
835
836	if (rc != 0) {
837		logger(L_ERR, L_FUNC, "could not acquire a read lock on slot: %d\n", slot);
838		return -1;
839	}
840
841	return 0;
842}
843
844
845/*************************************************************
846 * Releases a previously acquired lock on a slot.
847 * __RWLock Impl__
848 **************************************************************/
849int cache_un_lock(unsigned int slot) {
850
851	int		rc = 0;
852
853
854	if (flags & VERBOSE)
855		logger(L_DEBUG, L_FUNC, "attempting to release lock on slot: %d", slot);
856
857	rc = pthread_rwlock_unlock(lock.rwlock + slot);
858
859	if (rc != 0) {
860		logger(L_ERR, L_FUNC, "could not release lock on slot: %d\n", slot);
861		return -1;
862	}
863
864	return 0;
865}
866
867
868#endif  /* CACHE_USE_PTHREAD_RWLOCK */
869/***************************************************************************************/
870/***************************************************************************************/
871