1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Portions Copyright 2007-2011 Apple Inc.
29 */
30
31/*
32 *	autod_readdir.c
33 */
34
35#pragma ident	"@(#)autod_readdir.c	1.23	05/06/08 SMI"
36
37#include <stdio.h>
38#include <ctype.h>
39#include <string.h>
40#include <syslog.h>
41#include <sys/types.h>
42#include <sys/param.h>
43#include <errno.h>
44#include <pwd.h>
45#include <locale.h>
46#include <stdlib.h>
47#include <unistd.h>
48#include <assert.h>
49#include <fcntl.h>
50#include "automount.h"
51#include "automountd.h"
52
53static void build_dir_entry_list(struct rddir_cache *rdcp,
54				struct dir_entry *list);
55static void build_subdir_entry_list(struct dir_entry *list, ino_t start_inonum);
56static int rddir_cache_enter(const char *map, uint_t bucket_size,
57				struct rddir_cache **rdcpp);
58static int rddir_cache_lookup(const char *map, struct rddir_cache **rdcpp);
59static int rddir_cache_delete(struct rddir_cache *rdcp);
60static struct dir_entry *scan_cache_entry_for_bucket(struct rddir_cache *rdcp,
61				off_t offset);
62static int create_dirents(struct dir_entry *list, off_t offset,
63				uint32_t rda_count,
64				off_t *rddir_offset,
65				boolean_t *rddir_eof,
66				byte_buffer *rddir_entries,
67				mach_msg_type_number_t *rddir_entriesCnt);
68static void free_offset_tbl(struct off_tbl *head);
69static void free_dir_list(struct dir_entry *head);
70
71#define	OFFSET_BUCKET_SIZE	100
72
73pthread_rwlock_t rddir_cache_lock;		/* readdir cache lock */
74struct rddir_cache *rddir_head;		/* readdir cache head */
75
76int
77do_readdir(autofs_pathname rda_map, off_t rda_offset,
78    uint32_t rda_count, off_t *rddir_offset,
79    boolean_t *rddir_eof, byte_buffer *rddir_entries,
80    mach_msg_type_number_t *rddir_entriesCnt)
81{
82	struct dir_entry *list = NULL, *l, *bucket;
83	struct rddir_cache *rdcp = NULL;
84	int error;
85	int cache_time = RDDIR_CACHE_TIME;
86
87	if (automountd_nobrowse) {
88		/*
89		 * Browsability was disabled return an empty list.
90		 */
91		*rddir_entriesCnt = 0;
92		*rddir_eof = TRUE;
93		*rddir_entries = NULL;
94
95		return (0);
96	}
97
98	pthread_rwlock_rdlock(&rddir_cache_lock);
99	error = rddir_cache_lookup(rda_map, &rdcp);
100	if (error) {
101		pthread_rwlock_unlock(&rddir_cache_lock);
102		pthread_rwlock_wrlock(&rddir_cache_lock);
103		error = rddir_cache_lookup(rda_map, &rdcp);
104		if (error) {
105			if (trace > 2)
106				trace_prt(1,
107				"map %s not found, adding...\n", rda_map);
108			/*
109			 * entry doesn't exist, add it.
110			 */
111			error = rddir_cache_enter(rda_map,
112					OFFSET_BUCKET_SIZE, &rdcp);
113		}
114	}
115	pthread_rwlock_unlock(&rddir_cache_lock);
116
117	if (error)
118		return (error);
119
120	assert(rdcp != NULL);
121	assert(rdcp->in_use);
122
123	if (!rdcp->full) {
124		pthread_rwlock_wrlock(&rdcp->rwlock);
125		if (!rdcp->full) {
126			/*
127			 * cache entry hasn't been filled up, do it now.
128			 */
129			char *stack[STACKSIZ];
130			char **stkptr;
131
132			/*
133			 * Initialize the stack of open files
134			 * for this thread
135			 */
136			stack_op(INIT, NULL, stack, &stkptr);
137			(void) getmapkeys(rda_map, &list, &error,
138			    &cache_time, stack, &stkptr);
139			if (!error)
140				build_dir_entry_list(rdcp, list);
141			else if (list) {
142				free_dir_list(list);
143				list = NULL;
144			}
145		}
146	} else
147		pthread_rwlock_rdlock(&rdcp->rwlock);
148
149	if (!error) {
150		bucket = scan_cache_entry_for_bucket(rdcp, rda_offset);
151		error = create_dirents(bucket, rda_offset, rda_count,
152		    rddir_offset, rddir_eof, rddir_entries,
153		    rddir_entriesCnt);
154		if (error) {
155			if (rdcp->offtp) {
156				free_offset_tbl(rdcp->offtp);
157				rdcp->offtp = NULL;
158			}
159			if (rdcp->entp) {
160				free_dir_list(rdcp->entp);
161				rdcp->entp = NULL;
162			}
163			rdcp->full = 0;
164			list = NULL;
165		}
166	}
167
168	if (trace > 2) {
169		/*
170		 * print this list only once
171		 */
172		for (l = list; l != NULL; l = l->next)
173			trace_prt(0, "%s\n", l->name);
174		trace_prt(0, "\n");
175	}
176
177	if (!error) {
178		if (cache_time) {
179			/*
180			 * keep list of entries for up to
181			 * 'cache_time' seconds
182			 */
183			rdcp->ttl = time((time_t *)NULL) + cache_time;
184		} else {
185			/*
186			 * the underlying name service indicated not
187			 * to cache contents.
188			 */
189			if (rdcp->offtp) {
190				free_offset_tbl(rdcp->offtp);
191				rdcp->offtp = NULL;
192			}
193			if (rdcp->entp) {
194				free_dir_list(rdcp->entp);
195				rdcp->entp = NULL;
196			}
197			rdcp->full = 0;
198		}
199	} else {
200		/*
201		 * return an empty list
202		 */
203		*rddir_entriesCnt = 0;
204		*rddir_eof = TRUE;
205		*rddir_entries = NULL;
206	}
207	pthread_rwlock_unlock(&rdcp->rwlock);
208
209	pthread_mutex_lock(&rdcp->lock);
210	rdcp->in_use--;
211	pthread_mutex_unlock(&rdcp->lock);
212
213	assert(rdcp->in_use >= 0);
214
215	return (error);
216}
217
218int
219do_readsubdir(autofs_pathname rda_map, char *key,
220    autofs_pathname rda_subdir, autofs_opts mapopts, uint32_t rda_dirino,
221    off_t rda_offset, uint32_t rda_count, off_t *rddir_offset,
222    boolean_t *rddir_eof, byte_buffer *rddir_entries,
223    mach_msg_type_number_t *rddir_entriesCnt)
224{
225	struct mapent *me, *mapents;
226	int err;
227	bool_t isrestricted = hasrestrictopt(mapopts);
228	char *p;
229	struct dir_entry *list = NULL;
230	struct dir_entry *last = NULL;
231
232	/*
233	 * We're not reading the top-level directory of an indirect
234	 * map, we're reading a directory underneath that.  We must
235	 * always show that, even if we've globally turned off
236	 * browsability.
237	 *
238	 * First, look up the map entry for the directory immediately
239	 * below the top-level directory.
240	 */
241
242	/*
243	 * call parser w default mount_access = TRUE
244	 */
245	mapents = parse_entry(key, rda_map, mapopts, rda_subdir, FALSE,
246		NULL, isrestricted, TRUE, &err);
247	if (mapents == NULL) {
248		/* Return the error parse_entry handed back. */
249		return (err);
250	}
251	for (me = mapents; me; me = me->map_next) {
252		p = me->map_mntpnt;
253		if (p == NULL) {
254			syslog(LOG_ERR, "null mountpoint in entry in %s",
255			    me->map_root  ? me->map_root : "<NULL>");
256			continue;
257		}
258		while (*p == '/')
259			p++;
260		err = add_dir_entry(p, NULL, NULL, &list, &last);
261		if (err != -1) {
262			if (err != 0) {
263				/*
264				 * Free up list.
265				 */
266				if (list)
267					free_dir_list(list);
268
269				/*
270				 * Free the map entries.
271				 */
272				free_mapent(mapents);
273
274				/*
275				 * return an empty list
276				 */
277				*rddir_entriesCnt = 0;
278				*rddir_eof = TRUE;
279				*rddir_entries = NULL;
280				return (err);
281			}
282		}
283	}
284
285	if (mapents)
286		free_mapent(mapents);
287
288	/*
289	 * We base the inode numbers in the subdirectory on the inode
290	 * number of the directory we're reading, so that:
291	 *
292	 *	1) they don't match the inode numbers in other
293	 *	   subdirectories, or in the top-level directory;
294	 *
295	 *	2) they're likely to remain the same from readdir
296	 *	   to readdir.
297	 *
298	 * We swap the two halves of the directory inode number, so
299	 * that the part we change is the slowly-changing part of
300	 * the inode number handed out by autofs.  automountd hands
301	 * out even inode numbers, and autofs hands out odd inode
302	 * numbers, so, if the low-order bit of the result of the
303	 * swap is 1, we clear it and the (known to be 1) low-order
304	 * bit of the upper 16 bits.  If the upper 16 bits are 0, we set
305	 * them to 0xffff, so that we never hand out "small" (<65536)
306	 * inode numbers, which might collide with the inode numbers
307	 * handed out for top-level directories.
308	 */
309	rda_dirino = ((rda_dirino >> 16) & 0x0000FFFF) |
310		     ((rda_dirino << 16) & 0xFFFF0000);
311	if (rda_dirino & 0x00000001)
312		rda_dirino &= ~0x00010001;
313
314	build_subdir_entry_list(list, rda_dirino);
315
316	err = create_dirents(list, rda_offset, rda_count,
317	    rddir_offset, rddir_eof, rddir_entries,
318	    rddir_entriesCnt);
319
320	if (err) {
321		/*
322		 * return an empty list
323		 */
324		*rddir_entriesCnt = 0;
325		*rddir_eof = TRUE;
326		*rddir_entries = NULL;
327	}
328
329	return (err);
330}
331
332static struct dir_entry *
333scan_cache_entry_for_bucket(struct rddir_cache *rdcp, off_t offset)
334{
335	struct off_tbl *offtp, *next = NULL;
336	int this_bucket = 0;
337	struct dir_entry *list = NULL;
338	int x = 0;
339
340#if 0
341	assert(RW_LOCK_HELD(&rdcp->rwlock));
342#endif
343	for (offtp = rdcp->offtp; offtp != NULL; offtp = next) {
344		x++;
345		next = offtp->next;
346		this_bucket = (next == NULL);
347		if (!this_bucket)
348			this_bucket = (offset < next->offset);
349		if (this_bucket) {
350			/*
351			 * has to be in this bucket
352			 */
353			assert(offset >= offtp->offset);
354			list = offtp->first;
355			break;
356		}
357		/*
358		 * loop to look in next bucket
359		 */
360	}
361
362	if (trace > 2)
363		trace_prt(1, "%s: offset searches (%d)\n", rdcp->map, x);
364
365	return (list);
366}
367
368static int
369create_dirents(struct dir_entry *list, off_t offset, uint32_t rda_count,
370    off_t *rddir_offset, boolean_t *rddir_eof, byte_buffer *rddir_entries,
371    mach_msg_type_number_t *rddir_entriesCnt)
372{
373	uint_t total_bytes_wanted;
374	size_t bufsize;
375	size_t this_reclen;
376	uint_t outcount = 0;
377	int namelen;
378	struct dir_entry *l;
379	kern_return_t ret;
380	vm_address_t buffer_vm_address;
381	struct dirent_nonext *dp;
382	uint8_t *outbuf;
383	int error = 0;
384	int y = 0;
385
386	for (l = list; l != NULL && l->offset < offset; l = l->next)
387		y++;
388
389	if (l == NULL) {
390		/*
391		 * reached end of directory
392		 */
393		error = 0;
394		goto empty;
395	}
396
397	if (trace > 2)
398		trace_prt(1, "offset searches (%d)\n", y);
399
400	total_bytes_wanted = rda_count;
401	bufsize = total_bytes_wanted + sizeof (struct dirent_nonext);
402	ret = vm_allocate(current_task(), &buffer_vm_address,
403	    bufsize, VM_FLAGS_ANYWHERE);
404	if (ret != KERN_SUCCESS) {
405		syslog(LOG_ERR, "memory allocation error: %s",
406		    mach_error_string(ret));
407		error = ENOMEM;
408		goto empty;
409	}
410	outbuf = (uint8_t *)buffer_vm_address;
411	memset(outbuf, 0, bufsize);
412	/* LINTED pointer alignment */
413	dp = (struct dirent_nonext *)outbuf;
414
415	for (;;) {
416		namelen = (int)strlen(l->name);
417		this_reclen = DIRENT_RECLEN(namelen);
418		if (outcount + this_reclen > total_bytes_wanted) {
419			break;
420		}
421
422		/*
423		 * XXX - 64-bit inumbers....
424		 */
425		dp->d_ino = (__uint32_t)l->nodeid;
426		dp->d_reclen = this_reclen;
427#if 0
428		dp->d_type = DT_DIR;
429#else
430		dp->d_type = DT_UNKNOWN;
431#endif
432		dp->d_namlen = namelen;
433		(void) strlcpy(dp->d_name, l->name, NAME_MAX);
434		outcount += dp->d_reclen;
435		dp = (struct dirent_nonext *)((char *)dp + dp->d_reclen);
436		assert(outcount <= total_bytes_wanted);
437		if (!l->next)
438			break;
439		l = l->next;
440	}
441
442	/*
443	 * "l" is the last element; make offset one plus that entry's
444	 * offset.
445	 */
446	*rddir_offset = l->offset + 1;
447
448	if (outcount > 0) {
449		/*
450		 * have some entries
451		 */
452		*rddir_entriesCnt = outcount;
453		*rddir_eof = (l == NULL);
454		*rddir_entries = outbuf;
455		error = 0;
456	} else {
457		/*
458		 * total_bytes_wanted is not large enough for one
459		 * directory entry
460		 */
461		*rddir_entriesCnt = 0;
462		*rddir_eof = FALSE;
463		*rddir_entries = NULL;
464		vm_deallocate(current_task(), buffer_vm_address, bufsize);
465		syslog(LOG_ERR,
466			"byte count in readdir too small for one directory entry");
467		error = EIO;
468	}
469	return (error);
470
471empty:	*rddir_entriesCnt = 0;
472	*rddir_eof = TRUE;
473	*rddir_entries = NULL;
474	return (error);
475}
476
477
478/*
479 * add new entry to cache for 'map'
480 */
481static int
482rddir_cache_enter(const char *map, uint_t bucket_size,
483    struct rddir_cache **rdcpp)
484{
485	struct rddir_cache *p;
486	int len;
487#if 0
488	assert(RW_LOCK_HELD(&rddir_cache_lock));
489#endif
490
491	/*
492	 * Add to front of the list at this time
493	 */
494	p = (struct rddir_cache *)malloc(sizeof (*p));
495	if (p == NULL) {
496		syslog(LOG_ERR,
497			"rddir_cache_enter: memory allocation failed\n");
498		return (ENOMEM);
499	}
500	memset((char *)p, 0, sizeof (*p));
501
502	len = (int) strlen(map) + 1;
503	p->map = malloc(len);
504	if (p->map == NULL) {
505		syslog(LOG_ERR,
506			"rddir_cache_enter: memory allocation failed\n");
507		free(p);
508		return (ENOMEM);
509	}
510	strlcpy(p->map, map, len);
511
512	p->bucket_size = bucket_size;
513	/*
514	 * no need to grab mutex lock since I haven't yet made the
515	 * node visible to the list
516	 */
517	p->in_use = 1;
518	(void) pthread_rwlock_init(&p->rwlock, NULL);
519	(void) pthread_mutex_init(&p->lock, NULL);
520
521	if (rddir_head == NULL)
522		rddir_head = p;
523	else {
524		p->next = rddir_head;
525		rddir_head = p;
526	}
527	*rdcpp = p;
528
529	return (0);
530}
531
532/*
533 * find 'map' in readdir cache
534 */
535static int
536rddir_cache_lookup(const char *map, struct rddir_cache **rdcpp)
537{
538	struct rddir_cache *p;
539
540#if 0
541	assert(RW_LOCK_HELD(&rddir_cache_lock));
542#endif
543	for (p = rddir_head; p != NULL; p = p->next) {
544		if (strcmp(p->map, map) == 0) {
545			/*
546			 * found matching entry
547			 */
548			*rdcpp = p;
549			pthread_mutex_lock(&p->lock);
550			p->in_use++;
551			pthread_mutex_unlock(&p->lock);
552			return (0);
553		}
554	}
555	/*
556	 * didn't find entry
557	 */
558	return (ENOENT);
559}
560
561/*
562 * free the offset table
563 */
564static void
565free_offset_tbl(struct off_tbl *head)
566{
567	struct off_tbl *p, *next = NULL;
568
569	for (p = head; p != NULL; p = next) {
570		next = p->next;
571		free(p);
572	}
573}
574
575/*
576 * free the directory entries
577 */
578static void
579free_dir_list(struct dir_entry *head)
580{
581	struct dir_entry *p, *next = NULL;
582
583	for (p = head; p != NULL; p = next) {
584		next = p->next;
585		free(p->line);
586		free(p->lineq);
587		assert(p->name);
588		free(p->name);
589		free(p);
590	}
591}
592
593static void
594rddir_cache_entry_free(struct rddir_cache *p)
595{
596#if 0
597	assert(RW_LOCK_HELD(&rddir_cache_lock));
598#endif
599	assert(!p->in_use);
600	if (p->map)
601		free(p->map);
602	if (p->offtp)
603		free_offset_tbl(p->offtp);
604	if (p->entp)
605		free_dir_list(p->entp);
606	free(p);
607}
608
609/*
610 * Remove entry from the rddircache
611 * the caller must own the rddir_cache_lock.
612 */
613static int
614rddir_cache_delete(struct rddir_cache *rdcp)
615{
616	struct rddir_cache *p, *prev;
617
618#if 0
619	assert(RW_LOCK_HELD(&rddir_cache_lock));
620#endif
621	/*
622	 * Search cache for entry
623	 */
624	prev = NULL;
625	for (p = rddir_head; p != NULL; p = p->next) {
626		if (p == rdcp) {
627			/*
628			 * entry found, remove from list if not in use
629			 */
630			if (p->in_use)
631				return (EBUSY);
632			if (prev)
633				prev->next = p->next;
634			else
635				rddir_head = p->next;
636			rddir_cache_entry_free(p);
637			return (0);
638		}
639		prev = p;
640	}
641	syslog(LOG_ERR, "Couldn't find entry %p in cache\n", rdcp);
642	return (ENOENT);
643}
644
645/*
646 * Return entry in map that matches name, NULL otherwise.
647 */
648struct dir_entry *
649rddir_entry_lookup(const char *mapname, const char *name)
650{
651	int err;
652	struct rddir_cache *rdcp;
653	struct dir_entry *p = NULL;
654
655	pthread_rwlock_rdlock(&rddir_cache_lock);
656	err = rddir_cache_lookup(mapname, &rdcp);
657	if (!err && rdcp->full) {
658		pthread_rwlock_unlock(&rddir_cache_lock);
659		/*
660		 * Try to lock readdir cache entry for reading, if
661		 * the entry can not be locked, then avoid blocking
662		 * and return null; our caller will have to go to the
663		 * name service to find the entry. I'm assuming it is
664		 * faster to go to the name service than to wait for
665		 * the cache to be populated.
666		 */
667		if (pthread_rwlock_tryrdlock(&rdcp->rwlock) == 0) {
668			p = btree_lookup(rdcp->entp, name);
669			pthread_rwlock_unlock(&rdcp->rwlock);
670		}
671	} else
672		pthread_rwlock_unlock(&rddir_cache_lock);
673
674	if (!err) {
675		/*
676		 * release reference on cache entry
677		 */
678		pthread_mutex_lock(&rdcp->lock);
679		rdcp->in_use--;
680		assert(rdcp->in_use >= 0);
681		pthread_mutex_unlock(&rdcp->lock);
682	}
683
684	return (p);
685}
686
687static void
688build_dir_entry_list(struct rddir_cache *rdcp, struct dir_entry *list)
689{
690	struct dir_entry *p;
691	off_t offset = AUTOFS_DAEMONCOOKIE, offset_list = AUTOFS_DAEMONCOOKIE;
692	struct off_tbl *offtp, *last = NULL;
693	ino_t inonum = 4;
694
695#if 0
696	assert(RW_LOCK_HELD(&rdcp->rwlock));
697#endif
698	assert(rdcp->entp == NULL);
699	rdcp->entp = list;
700	for (p = list; p != NULL; p = p->next) {
701		p->nodeid = inonum;
702		p->offset = offset;
703		if (offset >= offset_list) {
704			/*
705			 * add node to index table
706			 */
707			offtp = (struct off_tbl *)
708				malloc(sizeof (struct off_tbl));
709			if (offtp != NULL) {
710				offtp->offset = offset;
711				offtp->first = p;
712				offtp->next = NULL;
713				offset_list += rdcp->bucket_size;
714			} else {
715				syslog(LOG_ERR,
716"WARNING: build_dir_entry_list: could not add offset to index table\n");
717				continue;
718			}
719			/*
720			 * add to cache
721			 */
722			if (rdcp->offtp == NULL)
723				rdcp->offtp = offtp;
724			else
725				last->next = offtp;
726			last = offtp;
727		}
728		offset++;
729		inonum += 2;		/* use even numbers in daemon */
730	}
731	rdcp->full = 1;
732}
733
734static void
735build_subdir_entry_list(struct dir_entry *list, ino_t start_inonum)
736{
737	struct dir_entry *p;
738	off_t offset = AUTOFS_DAEMONCOOKIE;
739	ino_t inonum = start_inonum;
740
741	for (p = list; p != NULL; p = p->next) {
742		p->nodeid = inonum;
743		p->offset = offset;
744		offset++;
745		inonum += 2;		/* use even numbers in daemon */
746	}
747}
748
749pthread_mutex_t cleanup_lock;
750pthread_cond_t cleanup_start_cv;
751pthread_cond_t cleanup_done_cv;
752
753/*
754 * cache cleanup thread starting point
755 */
756void *
757cache_cleanup(__unused void *unused)
758{
759	struct timespec abstime;
760	struct rddir_cache *p, *next = NULL;
761	int error;
762
763	pthread_setname_np("cache cleanup");
764	pthread_mutex_init(&cleanup_lock, NULL);
765	pthread_cond_init(&cleanup_start_cv, NULL);
766	pthread_cond_init(&cleanup_done_cv, NULL);
767
768	pthread_mutex_lock(&cleanup_lock);
769	for (;;) {
770		/*
771		 * delay RDDIR_CACHE_TIME seconds, or until some other thread
772		 * requests that I cleanup the caches
773		 */
774		abstime.tv_sec = time(NULL) + RDDIR_CACHE_TIME/2;
775		abstime.tv_nsec = 0;
776		if ((error = pthread_cond_timedwait(
777		    &cleanup_start_cv, &cleanup_lock, &abstime)) != 0) {
778			if (error != ETIMEDOUT) {
779				if (trace > 1)
780					trace_prt(1,
781					"cleanup thread wakeup (%d)\n", error);
782				continue;
783			}
784		}
785		pthread_mutex_unlock(&cleanup_lock);
786
787		/*
788		 * Perform the cache cleanup
789		 */
790		pthread_rwlock_wrlock(&rddir_cache_lock);
791		for (p = rddir_head; p != NULL; p = next) {
792			next = p->next;
793			if (p->in_use > 0) {
794				/*
795				 * cache entry busy, skip it
796				 */
797				if (trace > 1) {
798					trace_prt(1,
799					"%s cache in use\n", p->map);
800				}
801				continue;
802			}
803			/*
804			 * Cache entry is not in use, and nobody can grab a
805			 * new reference since I'm holding the rddir_cache_lock
806			 */
807
808			/*
809			 * error will be zero if some thread signaled us asking
810			 * that the caches be freed. In such case, free caches
811			 * even if they're still valid and nobody is referencing
812			 * them at this time. Otherwise, free caches only
813			 * if their time to live (ttl) has expired.
814			 */
815			if (error == ETIMEDOUT && (p->ttl > time((time_t *)NULL))) {
816				/*
817				 * Scheduled cache cleanup, if cache is still
818				 * valid don't free.
819				 */
820				if (trace > 1) {
821					trace_prt(1,
822					"%s cache still valid\n", p->map);
823				}
824				continue;
825			}
826			if (trace > 1)
827				trace_prt(1, "%s freeing cache\n", p->map);
828			assert(!p->in_use);
829			error = rddir_cache_delete(p);
830			assert(!error);
831		}
832		pthread_rwlock_unlock(&rddir_cache_lock);
833
834		/*
835		 * Clean up the fstab cache.
836		 */
837		clean_fstab_cache(error == ETIMEDOUT);
838
839		/*
840		 * wakeup the thread/threads waiting for the
841		 * cleanup to finish
842		 */
843		pthread_mutex_lock(&cleanup_lock);
844		pthread_cond_broadcast(&cleanup_done_cv);
845	}
846	/* NOTREACHED */
847	return NULL;
848}
849