nscd.c revision 158115
1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in thereg
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/usr.sbin/nscd/nscd.c 158115 2006-04-28 12:03:38Z ume $");
30
31#include <sys/types.h>
32#include <sys/event.h>
33#include <sys/socket.h>
34#include <sys/time.h>
35#include <sys/param.h>
36#include <sys/un.h>
37#include <assert.h>
38#include <err.h>
39#include <errno.h>
40#include <fcntl.h>
41#include <libutil.h>
42#include <pthread.h>
43#include <signal.h>
44#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48
49#include "agents/passwd.h"
50#include "agents/group.h"
51#include "agents/services.h"
52#include "cachedcli.h"
53#include "cachelib.h"
54#include "config.h"
55#include "debug.h"
56#include "log.h"
57#include "parser.h"
58#include "query.h"
59#include "singletons.h"
60
61#ifndef CONFIG_PATH
62#define CONFIG_PATH "/etc/cached.conf"
63#endif
64#define DEFAULT_CONFIG_PATH	"cached.conf"
65
66#define MAX_SOCKET_IO_SIZE	4096
67
68struct processing_thread_args {
69	cache	the_cache;
70	struct configuration	*the_configuration;
71	struct runtime_env		*the_runtime_env;
72};
73
74static void accept_connection(struct kevent *, struct runtime_env *,
75	struct configuration *);
76static void destroy_cache_(cache);
77static void destroy_runtime_env(struct runtime_env *);
78static cache init_cache_(struct configuration *);
79static struct runtime_env *init_runtime_env(struct configuration *);
80static void print_version_info(void);
81static void processing_loop(cache, struct runtime_env *,
82	struct configuration *);
83static void process_socket_event(struct kevent *, struct runtime_env *,
84	struct configuration *);
85static void process_timer_event(struct kevent *, struct runtime_env *,
86	struct configuration *);
87static void *processing_thread(void *);
88static void usage(void);
89
90void get_time_func(struct timeval *);
91
92static void
93print_version_info(void)
94{
95	TRACE_IN(print_version_info);
96	printf("cached v0.2 (20 Oct 2005)\nwas developed during SoC 2005\n");
97	TRACE_OUT(print_version_info);
98}
99
100static void
101usage(void)
102{
103	fprintf(stderr,"usage: cached [-nstiId]\n");
104	exit(1);
105}
106
107static cache
108init_cache_(struct configuration *config)
109{
110	struct cache_params params;
111	cache retval;
112
113	struct configuration_entry *config_entry;
114	size_t	size, i;
115	int res;
116
117	TRACE_IN(init_cache_);
118
119	memset(&params, 0, sizeof(struct cache_params));
120	params.get_time_func = get_time_func;
121	retval = init_cache(&params);
122
123	size = configuration_get_entries_size(config);
124	for (i = 0; i < size; ++i) {
125		config_entry = configuration_get_entry(config, i);
126	    	/*
127	    	 * We should register common entries now - multipart entries
128	    	 * would be registered automatically during the queries.
129	    	 */
130		res = register_cache_entry(retval, (struct cache_entry_params *)
131			&config_entry->positive_cache_params);
132		config_entry->positive_cache_entry = find_cache_entry(retval,
133			config_entry->positive_cache_params.entry_name);
134		assert(config_entry->positive_cache_entry !=
135			INVALID_CACHE_ENTRY);
136
137		res = register_cache_entry(retval, (struct cache_entry_params *)
138			&config_entry->negative_cache_params);
139		config_entry->negative_cache_entry = find_cache_entry(retval,
140			config_entry->negative_cache_params.entry_name);
141		assert(config_entry->negative_cache_entry !=
142			INVALID_CACHE_ENTRY);
143	}
144
145	LOG_MSG_2("cache", "cache was successfully initialized");
146	TRACE_OUT(init_cache_);
147	return (retval);
148}
149
150static void
151destroy_cache_(cache the_cache)
152{
153	TRACE_IN(destroy_cache_);
154	destroy_cache(the_cache);
155	TRACE_OUT(destroy_cache_);
156}
157
158/*
159 * Socket and kqueues are prepared here. We have one global queue for both
160 * socket and timers events.
161 */
162static struct runtime_env *
163init_runtime_env(struct configuration *config)
164{
165	int serv_addr_len;
166	struct sockaddr_un serv_addr;
167
168	struct kevent eventlist;
169	struct timespec timeout;
170
171	struct runtime_env *retval;
172
173	TRACE_IN(init_runtime_env);
174	retval = (struct runtime_env *)malloc(sizeof(struct runtime_env));
175	assert(retval != NULL);
176	memset(retval, 0, sizeof(struct runtime_env));
177
178	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
179
180	if (config->force_unlink == 1)
181		unlink(config->socket_path);
182
183	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
184	serv_addr.sun_family = PF_LOCAL;
185	strncpy(serv_addr.sun_path, config->socket_path,
186		sizeof(serv_addr.sun_path));
187	serv_addr_len = sizeof(serv_addr.sun_family) +
188		strlen(serv_addr.sun_path) + 1;
189
190	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
191		serv_addr_len) == -1) {
192		close(retval->sockfd);
193		free(retval);
194
195		LOG_ERR_2("runtime environment", "can't bind socket to path: "
196			"%s", config->socket_path);
197		TRACE_OUT(init_runtime_env);
198		return (NULL);
199	}
200	LOG_MSG_2("runtime environment", "using socket %s",
201		config->socket_path);
202
203	/*
204	 * Here we're marking socket as non-blocking and setting its backlog
205	 * to the maximum value
206	 */
207	chmod(config->socket_path, config->socket_mode);
208	listen(retval->sockfd, -1);
209	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
210
211	retval->queue = kqueue();
212	assert(retval->queue != -1);
213
214	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
215		0, 0, 0);
216	memset(&timeout, 0, sizeof(struct timespec));
217	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
218
219	LOG_MSG_2("runtime environment", "successfully initialized");
220	TRACE_OUT(init_runtime_env);
221	return (retval);
222}
223
224static void
225destroy_runtime_env(struct runtime_env *env)
226{
227	TRACE_IN(destroy_runtime_env);
228	close(env->queue);
229	close(env->sockfd);
230	free(env);
231	TRACE_OUT(destroy_runtime_env);
232}
233
234static void
235accept_connection(struct kevent *event_data, struct runtime_env *env,
236	struct configuration *config)
237{
238	struct kevent	eventlist[2];
239	struct timespec	timeout;
240	struct query_state	*qstate;
241
242	int	fd;
243	int	res;
244
245	uid_t	euid;
246	gid_t	egid;
247
248	TRACE_IN(accept_connection);
249	fd = accept(event_data->ident, NULL, NULL);
250	if (fd == -1) {
251		LOG_ERR_2("accept_connection", "error %d during accept()",
252		    errno);
253		TRACE_OUT(accept_connection);
254		return;
255	}
256
257	if (getpeereid(fd, &euid, &egid) != 0) {
258		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
259			errno);
260		TRACE_OUT(accept_connection);
261		return;
262	}
263
264	qstate = init_query_state(fd, sizeof(int), euid, egid);
265	if (qstate == NULL) {
266		LOG_ERR_2("accept_connection", "can't init query_state");
267		TRACE_OUT(accept_connection);
268		return;
269	}
270
271	memset(&timeout, 0, sizeof(struct timespec));
272	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
273		0, qstate->timeout.tv_sec * 1000, qstate);
274	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
275		NOTE_LOWAT, qstate->kevent_watermark, qstate);
276	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
277	if (res < 0)
278		LOG_ERR_2("accept_connection", "kevent error");
279
280	TRACE_OUT(accept_connection);
281}
282
283static void
284process_socket_event(struct kevent *event_data, struct runtime_env *env,
285	struct configuration *config)
286{
287	struct kevent	eventlist[2];
288	struct timeval	query_timeout;
289	struct timespec	kevent_timeout;
290	int	nevents;
291	int	eof_res, res;
292	ssize_t	io_res;
293	struct query_state *qstate;
294
295	TRACE_IN(process_socket_event);
296	eof_res = event_data->flags & EV_EOF ? 1 : 0;
297	res = 0;
298
299	memset(&kevent_timeout, 0, sizeof(struct timespec));
300	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
301		0, 0, NULL);
302	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
303	if (nevents == -1) {
304		if (errno == ENOENT) {
305			/* the timer is already handling this event */
306			TRACE_OUT(process_socket_event);
307			return;
308		} else {
309			/* some other error happened */
310			LOG_ERR_2("process_socket_event", "kevent error, errno"
311				" is %d", errno);
312			TRACE_OUT(process_socket_event);
313			return;
314		}
315	}
316	qstate = (struct query_state *)event_data->udata;
317
318	/*
319	 * If the buffer that is to be send/received is too large,
320	 * we send it implicitly, by using query_io_buffer_read and
321	 * query_io_buffer_write functions in the query_state. These functions
322	 * use the temporary buffer, which is later send/received in parts.
323	 * The code below implements buffer splitting/mergind for send/receive
324	 * operations. It also does the actual socket IO operations.
325	 */
326	if (((qstate->use_alternate_io == 0) &&
327		(qstate->kevent_watermark <= event_data->data)) ||
328		((qstate->use_alternate_io != 0) &&
329		(qstate->io_buffer_watermark <= event_data->data))) {
330		if (qstate->use_alternate_io != 0) {
331			switch (qstate->io_buffer_filter) {
332			case EVFILT_READ:
333				io_res = query_socket_read(qstate,
334					qstate->io_buffer_p,
335					qstate->io_buffer_watermark);
336				if (io_res < 0) {
337					qstate->use_alternate_io = 0;
338					qstate->process_func = NULL;
339				} else {
340					qstate->io_buffer_p += io_res;
341					if (qstate->io_buffer_p ==
342					    	qstate->io_buffer +
343						qstate->io_buffer_size) {
344						qstate->io_buffer_p =
345						    qstate->io_buffer;
346						qstate->use_alternate_io = 0;
347					}
348				}
349			break;
350			default:
351			break;
352			}
353		}
354
355		if (qstate->use_alternate_io == 0) {
356			do {
357				res = qstate->process_func(qstate);
358			} while ((qstate->kevent_watermark == 0) &&
359					(qstate->process_func != NULL) &&
360					(res == 0));
361
362			if (res != 0)
363				qstate->process_func = NULL;
364		}
365
366		if ((qstate->use_alternate_io != 0) &&
367			(qstate->io_buffer_filter == EVFILT_WRITE)) {
368			io_res = query_socket_write(qstate, qstate->io_buffer_p,
369				qstate->io_buffer_watermark);
370			if (io_res < 0) {
371				qstate->use_alternate_io = 0;
372				qstate->process_func = NULL;
373			} else
374				qstate->io_buffer_p += io_res;
375		}
376	} else {
377		/* assuming that socket was closed */
378		qstate->process_func = NULL;
379		qstate->use_alternate_io = 0;
380	}
381
382	if (((qstate->process_func == NULL) &&
383	    	(qstate->use_alternate_io == 0)) ||
384		(eof_res != 0) || (res != 0)) {
385		destroy_query_state(qstate);
386		close(event_data->ident);
387		TRACE_OUT(process_socket_event);
388		return;
389	}
390
391	/* updating the query_state lifetime variable */
392	get_time_func(&query_timeout);
393	query_timeout.tv_usec = 0;
394	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
395	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
396		query_timeout.tv_sec = 0;
397	else
398		query_timeout.tv_sec = qstate->timeout.tv_sec -
399			query_timeout.tv_sec;
400
401	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
402		qstate->io_buffer + qstate->io_buffer_size))
403		qstate->use_alternate_io = 0;
404
405	if (qstate->use_alternate_io == 0) {
406		/*
407		 * If we must send/receive the large block of data,
408		 * we should prepare the query_state's io_XXX fields.
409		 * We should also substitute its write_func and read_func
410		 * with the query_io_buffer_write and query_io_buffer_read,
411		 * which will allow us to implicitly send/receive this large
412		 * buffer later (in the subsequent calls to the
413		 * process_socket_event).
414		 */
415		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
416			if (qstate->io_buffer != NULL)
417				free(qstate->io_buffer);
418
419			qstate->io_buffer = (char *)malloc(
420				qstate->kevent_watermark);
421			assert(qstate->io_buffer != NULL);
422			memset(qstate->io_buffer, 0, qstate->kevent_watermark);
423
424			qstate->io_buffer_p = qstate->io_buffer;
425			qstate->io_buffer_size = qstate->kevent_watermark;
426			qstate->io_buffer_filter = qstate->kevent_filter;
427
428			qstate->write_func = query_io_buffer_write;
429			qstate->read_func = query_io_buffer_read;
430
431			if (qstate->kevent_filter == EVFILT_READ)
432				qstate->use_alternate_io = 1;
433
434			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
435			EV_SET(&eventlist[1], event_data->ident,
436				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
437				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
438		} else {
439			EV_SET(&eventlist[1], event_data->ident,
440		    		qstate->kevent_filter, EV_ADD | EV_ONESHOT,
441		    		NOTE_LOWAT, qstate->kevent_watermark, qstate);
442		}
443	} else {
444		if (qstate->io_buffer + qstate->io_buffer_size -
445		    	qstate->io_buffer_p <
446			MAX_SOCKET_IO_SIZE) {
447			qstate->io_buffer_watermark = qstate->io_buffer +
448				qstate->io_buffer_size - qstate->io_buffer_p;
449			EV_SET(&eventlist[1], event_data->ident,
450			    	qstate->io_buffer_filter,
451				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
452				qstate->io_buffer_watermark,
453				qstate);
454		} else {
455			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
456			EV_SET(&eventlist[1], event_data->ident,
457		    		qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
458		    		NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
459		}
460	}
461	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
462		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
463	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
464
465	TRACE_OUT(process_socket_event);
466}
467
468/*
469 * This routine is called if timer event has been signaled in the kqueue. It
470 * just closes the socket and destroys the query_state.
471 */
472static void
473process_timer_event(struct kevent *event_data, struct runtime_env *env,
474	struct configuration *config)
475{
476	struct query_state	*qstate;
477
478	TRACE_IN(process_timer_event);
479	qstate = (struct query_state *)event_data->udata;
480	destroy_query_state(qstate);
481	close(event_data->ident);
482	TRACE_OUT(process_timer_event);
483}
484
485/*
486 * Processing loop is the basic processing routine, that forms a body of each
487 * procssing thread
488 */
489static void
490processing_loop(cache the_cache, struct runtime_env *env,
491	struct configuration *config)
492{
493	struct timespec timeout;
494	const int eventlist_size = 1;
495	struct kevent eventlist[eventlist_size];
496	int nevents, i;
497
498	TRACE_MSG("=> processing_loop");
499	memset(&timeout, 0, sizeof(struct timespec));
500	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
501
502	for (;;) {
503		nevents = kevent(env->queue, NULL, 0, eventlist,
504	    		eventlist_size, NULL);
505		/*
506		 * we can only receive 1 event on success
507		 */
508		if (nevents == 1) {
509			struct kevent *event_data;
510			event_data = &eventlist[0];
511
512			if (event_data->ident == env->sockfd) {
513				for (i = 0; i < event_data->data; ++i)
514				    accept_connection(event_data, env, config);
515
516				EV_SET(eventlist, s_runtime_env->sockfd,
517				    EVFILT_READ, EV_ADD | EV_ONESHOT,
518				    0, 0, 0);
519				memset(&timeout, 0,
520				    sizeof(struct timespec));
521				kevent(s_runtime_env->queue, eventlist,
522				    1, NULL, 0, &timeout);
523
524			} else {
525				switch (event_data->filter) {
526				case EVFILT_READ:
527				case EVFILT_WRITE:
528					process_socket_event(event_data,
529						env, config);
530					break;
531				case EVFILT_TIMER:
532					process_timer_event(event_data,
533						env, config);
534					break;
535				default:
536					break;
537				}
538			}
539		} else {
540			/* this branch shouldn't be currently executed */
541		}
542	}
543
544	TRACE_MSG("<= processing_loop");
545}
546
547/*
548 * Wrapper above the processing loop function. It sets the thread signal mask
549 * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
550 */
551static void *
552processing_thread(void *data)
553{
554	struct processing_thread_args	*args;
555	sigset_t new;
556
557	TRACE_MSG("=> processing_thread");
558	args = (struct processing_thread_args *)data;
559
560	sigemptyset(&new);
561	sigaddset(&new, SIGPIPE);
562	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
563		LOG_ERR_1("processing thread",
564			"thread can't block the SIGPIPE signal");
565
566	processing_loop(args->the_cache, args->the_runtime_env,
567		args->the_configuration);
568	free(args);
569	TRACE_MSG("<= processing_thread");
570
571	return (NULL);
572}
573
574void
575get_time_func(struct timeval *time)
576{
577	struct timespec res;
578	memset(&res, 0, sizeof(struct timespec));
579	clock_gettime(CLOCK_MONOTONIC, &res);
580
581	time->tv_sec = res.tv_sec;
582	time->tv_usec = 0;
583}
584
585/*
586 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
587 * search for this symbol in the executable. This symbol is the attribute of
588 * the caching daemon. So, if it exists, nsdispatch won't try to connect to
589 * the caching daemon and will just ignore the 'cache' source in the
590 * nsswitch.conf. This method helps to avoid cycles and organize
591 * self-performing requests.
592 */
593void
594_nss_cache_cycle_prevention_function(void)
595{
596}
597
598int
599main(int argc, char *argv[])
600{
601	struct processing_thread_args *thread_args;
602	pthread_t *threads;
603
604	struct pidfh *pidfile;
605	pid_t pid;
606
607	char const *config_file;
608	char const *error_str;
609	int error_line;
610	int i, res;
611
612	int trace_mode_enabled;
613	int force_single_threaded;
614	int do_not_daemonize;
615	int clear_user_cache_entries, clear_all_cache_entries;
616	char *user_config_entry_name, *global_config_entry_name;
617	int show_statistics;
618	int daemon_mode, interactive_mode;
619
620
621	/* by default all debug messages are omitted */
622	TRACE_OFF();
623
624	/* startup output */
625	print_version_info();
626
627	/* parsing command line arguments */
628	trace_mode_enabled = 0;
629	force_single_threaded = 0;
630	do_not_daemonize = 0;
631	clear_user_cache_entries = 0;
632	clear_all_cache_entries = 0;
633	show_statistics = 0;
634	user_config_entry_name = NULL;
635	global_config_entry_name = NULL;
636	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
637		switch (res) {
638		case 'n':
639			do_not_daemonize = 1;
640			break;
641		case 's':
642			force_single_threaded = 1;
643			break;
644		case 't':
645			trace_mode_enabled = 1;
646			break;
647		case 'i':
648			clear_user_cache_entries = 1;
649			if (optarg != NULL)
650				if (strcmp(optarg, "all") != 0)
651					user_config_entry_name = strdup(optarg);
652			break;
653		case 'I':
654			clear_all_cache_entries = 1;
655			if (optarg != NULL)
656				if (strcmp(optarg, "all") != 0)
657					global_config_entry_name =
658						strdup(optarg);
659			break;
660		case 'd':
661			show_statistics = 1;
662			break;
663		case '?':
664		default:
665			usage();
666			/* NOT REACHED */
667		}
668	}
669
670	daemon_mode = do_not_daemonize | force_single_threaded |
671		trace_mode_enabled;
672	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
673		show_statistics;
674
675	if ((daemon_mode != 0) && (interactive_mode != 0)) {
676		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
677			"can't be used together");
678		usage();
679	}
680
681	if (interactive_mode != 0) {
682		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
683		char pidbuf[256];
684
685		struct cached_connection_params connection_params;
686		cached_connection connection;
687
688		int result;
689
690		if (pidfin == NULL)
691			errx(EXIT_FAILURE, "There is no daemon running.");
692
693		memset(pidbuf, 0, sizeof(pidbuf));
694		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
695		fclose(pidfin);
696
697		if (ferror(pidfin) != 0)
698			errx(EXIT_FAILURE, "Can't read from pidfile.");
699
700		if (sscanf(pidbuf, "%d", &pid) != 1)
701			errx(EXIT_FAILURE, "Invalid pidfile.");
702		LOG_MSG_1("main", "daemon PID is %d", pid);
703
704
705		memset(&connection_params, 0,
706			sizeof(struct cached_connection_params));
707		connection_params.socket_path = DEFAULT_SOCKET_PATH;
708		connection = open_cached_connection__(&connection_params);
709		if (connection == INVALID_CACHED_CONNECTION)
710			errx(EXIT_FAILURE, "Can't connect to the daemon.");
711
712		if (clear_user_cache_entries != 0) {
713			result = cached_transform__(connection,
714				user_config_entry_name, TT_USER);
715			if (result != 0)
716				LOG_MSG_1("main",
717					"user cache transformation failed");
718			else
719				LOG_MSG_1("main",
720					"user cache_transformation "
721					"succeeded");
722		}
723
724		if (clear_all_cache_entries != 0) {
725			if (geteuid() != 0)
726				errx(EXIT_FAILURE, "Only root can initiate "
727					"global cache transformation.");
728
729			result = cached_transform__(connection,
730				global_config_entry_name, TT_ALL);
731			if (result != 0)
732				LOG_MSG_1("main",
733					"global cache transformation "
734					"failed");
735			else
736				LOG_MSG_1("main",
737					"global cache transformation "
738					"succeeded");
739		}
740
741		close_cached_connection__(connection);
742
743		free(user_config_entry_name);
744		free(global_config_entry_name);
745		return (EXIT_SUCCESS);
746	}
747
748	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
749	if (pidfile == NULL) {
750		if (errno == EEXIST)
751			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
752				pid);
753		warn("Cannot open or create pidfile");
754	}
755
756	if (trace_mode_enabled == 1)
757		TRACE_ON();
758
759	/* blocking the main thread from receiving SIGPIPE signal */
760	sigblock(sigmask(SIGPIPE));
761
762	/* daemonization */
763	if (do_not_daemonize == 0) {
764		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
765		if (res != 0) {
766			LOG_ERR_1("main", "can't daemonize myself: %s",
767		    		strerror(errno));
768			pidfile_remove(pidfile);
769			goto fin;
770		} else
771			LOG_MSG_1("main", "successfully daemonized");
772	}
773
774	pidfile_write(pidfile);
775
776	s_agent_table = init_agent_table();
777	register_agent(s_agent_table, init_passwd_agent());
778	register_agent(s_agent_table, init_passwd_mp_agent());
779	register_agent(s_agent_table, init_group_agent());
780	register_agent(s_agent_table, init_group_mp_agent());
781	register_agent(s_agent_table, init_services_agent());
782	register_agent(s_agent_table, init_services_mp_agent());
783	LOG_MSG_1("main", "request agents registered successfully");
784
785	/*
786 	 * Hosts agent can't work properly until we have access to the
787	 * appropriate dtab structures, which are used in nsdispatch
788	 * calls
789	 *
790	 register_agent(s_agent_table, init_hosts_agent());
791	*/
792
793	/* configuration initialization */
794	s_configuration = init_configuration();
795	fill_configuration_defaults(s_configuration);
796
797	error_str = NULL;
798	error_line = 0;
799	config_file = CONFIG_PATH;
800
801	res = parse_config_file(s_configuration, config_file, &error_str,
802		&error_line);
803	if ((res != 0) && (error_str == NULL)) {
804		config_file = DEFAULT_CONFIG_PATH;
805		res = parse_config_file(s_configuration, config_file,
806			&error_str, &error_line);
807	}
808
809	if (res != 0) {
810		if (error_str != NULL) {
811		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
812			config_file, error_line, error_str);
813		} else {
814		LOG_ERR_1("main", "no configuration file found "
815		    	"- was looking for %s and %s",
816			CONFIG_PATH, DEFAULT_CONFIG_PATH);
817		}
818		destroy_configuration(s_configuration);
819		return (-1);
820	}
821
822	if (force_single_threaded == 1)
823		s_configuration->threads_num = 1;
824
825	/* cache initialization */
826	s_cache = init_cache_(s_configuration);
827	if (s_cache == NULL) {
828		LOG_ERR_1("main", "can't initialize the cache");
829		destroy_configuration(s_configuration);
830		return (-1);
831	}
832
833	/* runtime environment initialization */
834	s_runtime_env = init_runtime_env(s_configuration);
835	if (s_runtime_env == NULL) {
836		LOG_ERR_1("main", "can't initialize the runtime environment");
837		destroy_configuration(s_configuration);
838		destroy_cache_(s_cache);
839		return (-1);
840	}
841
842	if (s_configuration->threads_num > 1) {
843		threads = (pthread_t *)malloc(sizeof(pthread_t) *
844			s_configuration->threads_num);
845		memset(threads, 0, sizeof(pthread_t) *
846	    		s_configuration->threads_num);
847		for (i = 0; i < s_configuration->threads_num; ++i) {
848			thread_args = (struct processing_thread_args *)malloc(
849				sizeof(struct processing_thread_args));
850			thread_args->the_cache = s_cache;
851			thread_args->the_runtime_env = s_runtime_env;
852			thread_args->the_configuration = s_configuration;
853
854			LOG_MSG_1("main", "thread #%d was successfully created",
855				i);
856			pthread_create(&threads[i], NULL, processing_thread,
857				thread_args);
858
859			thread_args = NULL;
860		}
861
862		for (i = 0; i < s_configuration->threads_num; ++i)
863			pthread_join(threads[i], NULL);
864	} else {
865		LOG_MSG_1("main", "working in single-threaded mode");
866		processing_loop(s_cache, s_runtime_env, s_configuration);
867	}
868
869fin:
870	/* runtime environment destruction */
871	destroy_runtime_env(s_runtime_env);
872
873	/* cache destruction */
874	destroy_cache_(s_cache);
875
876	/* configuration destruction */
877	destroy_configuration(s_configuration);
878
879	/* agents table destruction */
880	destroy_agent_table(s_agent_table);
881
882	pidfile_remove(pidfile);
883	return (EXIT_SUCCESS);
884}
885