1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in thereg
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/usr.sbin/nscd/nscd.c 315599 2017-03-20 00:54:45Z pfg $");
30
31#include <sys/param.h>
32#include <sys/event.h>
33#include <sys/socket.h>
34#include <sys/stat.h>
35#include <sys/time.h>
36#include <sys/un.h>
37
38#include <assert.h>
39#include <err.h>
40#include <errno.h>
41#include <fcntl.h>
42#include <libutil.h>
43#include <pthread.h>
44#include <signal.h>
45#include <stdio.h>
46#include <stdlib.h>
47#include <string.h>
48#include <unistd.h>
49
50#include "agents/passwd.h"
51#include "agents/group.h"
52#include "agents/services.h"
53#include "cachelib.h"
54#include "config.h"
55#include "debug.h"
56#include "log.h"
57#include "nscdcli.h"
58#include "parser.h"
59#include "query.h"
60#include "singletons.h"
61
62#ifndef CONFIG_PATH
63#define CONFIG_PATH "/etc/nscd.conf"
64#endif
65#define DEFAULT_CONFIG_PATH	"nscd.conf"
66
67#define MAX_SOCKET_IO_SIZE	4096
68
69struct processing_thread_args {
70	cache	the_cache;
71	struct configuration	*the_configuration;
72	struct runtime_env		*the_runtime_env;
73};
74
75static void accept_connection(struct kevent *, struct runtime_env *,
76	struct configuration *);
77static void destroy_cache_(cache);
78static void destroy_runtime_env(struct runtime_env *);
79static cache init_cache_(struct configuration *);
80static struct runtime_env *init_runtime_env(struct configuration *);
81static void processing_loop(cache, struct runtime_env *,
82	struct configuration *);
83static void process_socket_event(struct kevent *, struct runtime_env *,
84	struct configuration *);
85static void process_timer_event(struct kevent *, struct runtime_env *,
86	struct configuration *);
87static void *processing_thread(void *);
88static void usage(void);
89
90void get_time_func(struct timeval *);
91
92static void
93usage(void)
94{
95	fprintf(stderr,
96	    "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
97	exit(1);
98}
99
100static cache
101init_cache_(struct configuration *config)
102{
103	struct cache_params params;
104	cache retval;
105
106	struct configuration_entry *config_entry;
107	size_t	size, i;
108	int res;
109
110	TRACE_IN(init_cache_);
111
112	memset(&params, 0, sizeof(struct cache_params));
113	params.get_time_func = get_time_func;
114	retval = init_cache(&params);
115
116	size = configuration_get_entries_size(config);
117	for (i = 0; i < size; ++i) {
118		config_entry = configuration_get_entry(config, i);
119	    	/*
120	    	 * We should register common entries now - multipart entries
121	    	 * would be registered automatically during the queries.
122	    	 */
123		res = register_cache_entry(retval, (struct cache_entry_params *)
124			&config_entry->positive_cache_params);
125		config_entry->positive_cache_entry = find_cache_entry(retval,
126			config_entry->positive_cache_params.cep.entry_name);
127		assert(config_entry->positive_cache_entry !=
128			INVALID_CACHE_ENTRY);
129
130		res = register_cache_entry(retval, (struct cache_entry_params *)
131			&config_entry->negative_cache_params);
132		config_entry->negative_cache_entry = find_cache_entry(retval,
133			config_entry->negative_cache_params.cep.entry_name);
134		assert(config_entry->negative_cache_entry !=
135			INVALID_CACHE_ENTRY);
136	}
137
138	LOG_MSG_2("cache", "cache was successfully initialized");
139	TRACE_OUT(init_cache_);
140	return (retval);
141}
142
143static void
144destroy_cache_(cache the_cache)
145{
146	TRACE_IN(destroy_cache_);
147	destroy_cache(the_cache);
148	TRACE_OUT(destroy_cache_);
149}
150
151/*
152 * Socket and kqueues are prepared here. We have one global queue for both
153 * socket and timers events.
154 */
155static struct runtime_env *
156init_runtime_env(struct configuration *config)
157{
158	int serv_addr_len;
159	struct sockaddr_un serv_addr;
160
161	struct kevent eventlist;
162	struct timespec timeout;
163
164	struct runtime_env *retval;
165
166	TRACE_IN(init_runtime_env);
167	retval = calloc(1, sizeof(*retval));
168	assert(retval != NULL);
169
170	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
171
172	if (config->force_unlink == 1)
173		unlink(config->socket_path);
174
175	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
176	serv_addr.sun_family = PF_LOCAL;
177	strlcpy(serv_addr.sun_path, config->socket_path,
178		sizeof(serv_addr.sun_path));
179	serv_addr_len = sizeof(serv_addr.sun_family) +
180		strlen(serv_addr.sun_path) + 1;
181
182	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
183		serv_addr_len) == -1) {
184		close(retval->sockfd);
185		free(retval);
186
187		LOG_ERR_2("runtime environment", "can't bind socket to path: "
188			"%s", config->socket_path);
189		TRACE_OUT(init_runtime_env);
190		return (NULL);
191	}
192	LOG_MSG_2("runtime environment", "using socket %s",
193		config->socket_path);
194
195	/*
196	 * Here we're marking socket as non-blocking and setting its backlog
197	 * to the maximum value
198	 */
199	chmod(config->socket_path, config->socket_mode);
200	listen(retval->sockfd, -1);
201	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
202
203	retval->queue = kqueue();
204	assert(retval->queue != -1);
205
206	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
207		0, 0, 0);
208	memset(&timeout, 0, sizeof(struct timespec));
209	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
210
211	LOG_MSG_2("runtime environment", "successfully initialized");
212	TRACE_OUT(init_runtime_env);
213	return (retval);
214}
215
216static void
217destroy_runtime_env(struct runtime_env *env)
218{
219	TRACE_IN(destroy_runtime_env);
220	close(env->queue);
221	close(env->sockfd);
222	free(env);
223	TRACE_OUT(destroy_runtime_env);
224}
225
226static void
227accept_connection(struct kevent *event_data, struct runtime_env *env,
228	struct configuration *config)
229{
230	struct kevent	eventlist[2];
231	struct timespec	timeout;
232	struct query_state	*qstate;
233
234	int	fd;
235	int	res;
236
237	uid_t	euid;
238	gid_t	egid;
239
240	TRACE_IN(accept_connection);
241	fd = accept(event_data->ident, NULL, NULL);
242	if (fd == -1) {
243		LOG_ERR_2("accept_connection", "error %d during accept()",
244		    errno);
245		TRACE_OUT(accept_connection);
246		return;
247	}
248
249	if (getpeereid(fd, &euid, &egid) != 0) {
250		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
251			errno);
252		TRACE_OUT(accept_connection);
253		return;
254	}
255
256	qstate = init_query_state(fd, sizeof(int), euid, egid);
257	if (qstate == NULL) {
258		LOG_ERR_2("accept_connection", "can't init query_state");
259		TRACE_OUT(accept_connection);
260		return;
261	}
262
263	memset(&timeout, 0, sizeof(struct timespec));
264	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
265		0, qstate->timeout.tv_sec * 1000, qstate);
266	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
267		NOTE_LOWAT, qstate->kevent_watermark, qstate);
268	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
269	if (res < 0)
270		LOG_ERR_2("accept_connection", "kevent error");
271
272	TRACE_OUT(accept_connection);
273}
274
275static void
276process_socket_event(struct kevent *event_data, struct runtime_env *env,
277	struct configuration *config)
278{
279	struct kevent	eventlist[2];
280	struct timeval	query_timeout;
281	struct timespec	kevent_timeout;
282	int	nevents;
283	int	eof_res, res;
284	ssize_t	io_res;
285	struct query_state *qstate;
286
287	TRACE_IN(process_socket_event);
288	eof_res = event_data->flags & EV_EOF ? 1 : 0;
289	res = 0;
290
291	memset(&kevent_timeout, 0, sizeof(struct timespec));
292	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
293		0, 0, NULL);
294	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
295	if (nevents == -1) {
296		if (errno == ENOENT) {
297			/* the timer is already handling this event */
298			TRACE_OUT(process_socket_event);
299			return;
300		} else {
301			/* some other error happened */
302			LOG_ERR_2("process_socket_event", "kevent error, errno"
303				" is %d", errno);
304			TRACE_OUT(process_socket_event);
305			return;
306		}
307	}
308	qstate = (struct query_state *)event_data->udata;
309
310	/*
311	 * If the buffer that is to be send/received is too large,
312	 * we send it implicitly, by using query_io_buffer_read and
313	 * query_io_buffer_write functions in the query_state. These functions
314	 * use the temporary buffer, which is later send/received in parts.
315	 * The code below implements buffer splitting/mergind for send/receive
316	 * operations. It also does the actual socket IO operations.
317	 */
318	if (((qstate->use_alternate_io == 0) &&
319		(qstate->kevent_watermark <= (size_t)event_data->data)) ||
320		((qstate->use_alternate_io != 0) &&
321		(qstate->io_buffer_watermark <= (size_t)event_data->data))) {
322		if (qstate->use_alternate_io != 0) {
323			switch (qstate->io_buffer_filter) {
324			case EVFILT_READ:
325				io_res = query_socket_read(qstate,
326					qstate->io_buffer_p,
327					qstate->io_buffer_watermark);
328				if (io_res < 0) {
329					qstate->use_alternate_io = 0;
330					qstate->process_func = NULL;
331				} else {
332					qstate->io_buffer_p += io_res;
333					if (qstate->io_buffer_p ==
334					    	qstate->io_buffer +
335						qstate->io_buffer_size) {
336						qstate->io_buffer_p =
337						    qstate->io_buffer;
338						qstate->use_alternate_io = 0;
339					}
340				}
341			break;
342			default:
343			break;
344			}
345		}
346
347		if (qstate->use_alternate_io == 0) {
348			do {
349				res = qstate->process_func(qstate);
350			} while ((qstate->kevent_watermark == 0) &&
351					(qstate->process_func != NULL) &&
352					(res == 0));
353
354			if (res != 0)
355				qstate->process_func = NULL;
356		}
357
358		if ((qstate->use_alternate_io != 0) &&
359			(qstate->io_buffer_filter == EVFILT_WRITE)) {
360			io_res = query_socket_write(qstate, qstate->io_buffer_p,
361				qstate->io_buffer_watermark);
362			if (io_res < 0) {
363				qstate->use_alternate_io = 0;
364				qstate->process_func = NULL;
365			} else
366				qstate->io_buffer_p += io_res;
367		}
368	} else {
369		/* assuming that socket was closed */
370		qstate->process_func = NULL;
371		qstate->use_alternate_io = 0;
372	}
373
374	if (((qstate->process_func == NULL) &&
375	    	(qstate->use_alternate_io == 0)) ||
376		(eof_res != 0) || (res != 0)) {
377		destroy_query_state(qstate);
378		close(event_data->ident);
379		TRACE_OUT(process_socket_event);
380		return;
381	}
382
383	/* updating the query_state lifetime variable */
384	get_time_func(&query_timeout);
385	query_timeout.tv_usec = 0;
386	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
387	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
388		query_timeout.tv_sec = 0;
389	else
390		query_timeout.tv_sec = qstate->timeout.tv_sec -
391			query_timeout.tv_sec;
392
393	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
394		qstate->io_buffer + qstate->io_buffer_size))
395		qstate->use_alternate_io = 0;
396
397	if (qstate->use_alternate_io == 0) {
398		/*
399		 * If we must send/receive the large block of data,
400		 * we should prepare the query_state's io_XXX fields.
401		 * We should also substitute its write_func and read_func
402		 * with the query_io_buffer_write and query_io_buffer_read,
403		 * which will allow us to implicitly send/receive this large
404		 * buffer later (in the subsequent calls to the
405		 * process_socket_event).
406		 */
407		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
408			if (qstate->io_buffer != NULL)
409				free(qstate->io_buffer);
410
411			qstate->io_buffer = calloc(1,
412				qstate->kevent_watermark);
413			assert(qstate->io_buffer != NULL);
414
415			qstate->io_buffer_p = qstate->io_buffer;
416			qstate->io_buffer_size = qstate->kevent_watermark;
417			qstate->io_buffer_filter = qstate->kevent_filter;
418
419			qstate->write_func = query_io_buffer_write;
420			qstate->read_func = query_io_buffer_read;
421
422			if (qstate->kevent_filter == EVFILT_READ)
423				qstate->use_alternate_io = 1;
424
425			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
426			EV_SET(&eventlist[1], event_data->ident,
427				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
428				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
429		} else {
430			EV_SET(&eventlist[1], event_data->ident,
431		    		qstate->kevent_filter, EV_ADD | EV_ONESHOT,
432		    		NOTE_LOWAT, qstate->kevent_watermark, qstate);
433		}
434	} else {
435		if (qstate->io_buffer + qstate->io_buffer_size -
436		    	qstate->io_buffer_p <
437			MAX_SOCKET_IO_SIZE) {
438			qstate->io_buffer_watermark = qstate->io_buffer +
439				qstate->io_buffer_size - qstate->io_buffer_p;
440			EV_SET(&eventlist[1], event_data->ident,
441			    	qstate->io_buffer_filter,
442				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
443				qstate->io_buffer_watermark,
444				qstate);
445		} else {
446			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
447			EV_SET(&eventlist[1], event_data->ident,
448		    		qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
449		    		NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
450		}
451	}
452	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
453		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
454	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
455
456	TRACE_OUT(process_socket_event);
457}
458
459/*
460 * This routine is called if timer event has been signaled in the kqueue. It
461 * just closes the socket and destroys the query_state.
462 */
463static void
464process_timer_event(struct kevent *event_data, struct runtime_env *env,
465	struct configuration *config)
466{
467	struct query_state	*qstate;
468
469	TRACE_IN(process_timer_event);
470	qstate = (struct query_state *)event_data->udata;
471	destroy_query_state(qstate);
472	close(event_data->ident);
473	TRACE_OUT(process_timer_event);
474}
475
476/*
477 * Processing loop is the basic processing routine, that forms a body of each
478 * procssing thread
479 */
480static void
481processing_loop(cache the_cache, struct runtime_env *env,
482	struct configuration *config)
483{
484	struct timespec timeout;
485	const int eventlist_size = 1;
486	struct kevent eventlist[eventlist_size];
487	int nevents, i;
488
489	TRACE_MSG("=> processing_loop");
490	memset(&timeout, 0, sizeof(struct timespec));
491	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
492
493	for (;;) {
494		nevents = kevent(env->queue, NULL, 0, eventlist,
495	    		eventlist_size, NULL);
496		/*
497		 * we can only receive 1 event on success
498		 */
499		if (nevents == 1) {
500			struct kevent *event_data;
501			event_data = &eventlist[0];
502
503			if ((int)event_data->ident == env->sockfd) {
504				for (i = 0; i < event_data->data; ++i)
505				    accept_connection(event_data, env, config);
506
507				EV_SET(eventlist, s_runtime_env->sockfd,
508				    EVFILT_READ, EV_ADD | EV_ONESHOT,
509				    0, 0, 0);
510				memset(&timeout, 0,
511				    sizeof(struct timespec));
512				kevent(s_runtime_env->queue, eventlist,
513				    1, NULL, 0, &timeout);
514
515			} else {
516				switch (event_data->filter) {
517				case EVFILT_READ:
518				case EVFILT_WRITE:
519					process_socket_event(event_data,
520						env, config);
521					break;
522				case EVFILT_TIMER:
523					process_timer_event(event_data,
524						env, config);
525					break;
526				default:
527					break;
528				}
529			}
530		} else {
531			/* this branch shouldn't be currently executed */
532		}
533	}
534
535	TRACE_MSG("<= processing_loop");
536}
537
538/*
539 * Wrapper above the processing loop function. It sets the thread signal mask
540 * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
541 */
542static void *
543processing_thread(void *data)
544{
545	struct processing_thread_args	*args;
546	sigset_t new;
547
548	TRACE_MSG("=> processing_thread");
549	args = (struct processing_thread_args *)data;
550
551	sigemptyset(&new);
552	sigaddset(&new, SIGPIPE);
553	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
554		LOG_ERR_1("processing thread",
555			"thread can't block the SIGPIPE signal");
556
557	processing_loop(args->the_cache, args->the_runtime_env,
558		args->the_configuration);
559	free(args);
560	TRACE_MSG("<= processing_thread");
561
562	return (NULL);
563}
564
565void
566get_time_func(struct timeval *time)
567{
568	struct timespec res;
569	memset(&res, 0, sizeof(struct timespec));
570	clock_gettime(CLOCK_MONOTONIC, &res);
571
572	time->tv_sec = res.tv_sec;
573	time->tv_usec = 0;
574}
575
576/*
577 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
578 * will search for this symbol in the executable. This symbol is the
579 * attribute of the caching daemon. So, if it exists, nsdispatch won't try
580 * to connect to the caching daemon and will just ignore the 'cache'
581 * source in the nsswitch.conf. This method helps to avoid cycles and
582 * organize self-performing requests.
583 *
584 * (not actually a function; it used to be, but it doesn't make any
585 * difference, as long as it has external linkage)
586 */
587void *_nss_cache_cycle_prevention_function;
588
589int
590main(int argc, char *argv[])
591{
592	struct processing_thread_args *thread_args;
593	pthread_t *threads;
594
595	struct pidfh *pidfile;
596	pid_t pid;
597
598	char const *config_file;
599	char const *error_str;
600	int error_line;
601	int i, res;
602
603	int trace_mode_enabled;
604	int force_single_threaded;
605	int do_not_daemonize;
606	int clear_user_cache_entries, clear_all_cache_entries;
607	char *user_config_entry_name, *global_config_entry_name;
608	int show_statistics;
609	int daemon_mode, interactive_mode;
610
611
612	/* by default all debug messages are omitted */
613	TRACE_OFF();
614
615	/* parsing command line arguments */
616	trace_mode_enabled = 0;
617	force_single_threaded = 0;
618	do_not_daemonize = 0;
619	clear_user_cache_entries = 0;
620	clear_all_cache_entries = 0;
621	show_statistics = 0;
622	user_config_entry_name = NULL;
623	global_config_entry_name = NULL;
624	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
625		switch (res) {
626		case 'n':
627			do_not_daemonize = 1;
628			break;
629		case 's':
630			force_single_threaded = 1;
631			break;
632		case 't':
633			trace_mode_enabled = 1;
634			break;
635		case 'i':
636			clear_user_cache_entries = 1;
637			if (optarg != NULL)
638				if (strcmp(optarg, "all") != 0)
639					user_config_entry_name = strdup(optarg);
640			break;
641		case 'I':
642			clear_all_cache_entries = 1;
643			if (optarg != NULL)
644				if (strcmp(optarg, "all") != 0)
645					global_config_entry_name =
646						strdup(optarg);
647			break;
648		case 'd':
649			show_statistics = 1;
650			break;
651		case '?':
652		default:
653			usage();
654			/* NOT REACHED */
655		}
656	}
657
658	daemon_mode = do_not_daemonize | force_single_threaded |
659		trace_mode_enabled;
660	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
661		show_statistics;
662
663	if ((daemon_mode != 0) && (interactive_mode != 0)) {
664		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
665			"can't be used together");
666		usage();
667	}
668
669	if (interactive_mode != 0) {
670		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
671		char pidbuf[256];
672
673		struct nscd_connection_params connection_params;
674		nscd_connection connection;
675
676		int result;
677
678		if (pidfin == NULL)
679			errx(EXIT_FAILURE, "There is no daemon running.");
680
681		memset(pidbuf, 0, sizeof(pidbuf));
682		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
683		fclose(pidfin);
684
685		if (ferror(pidfin) != 0)
686			errx(EXIT_FAILURE, "Can't read from pidfile.");
687
688		if (sscanf(pidbuf, "%d", &pid) != 1)
689			errx(EXIT_FAILURE, "Invalid pidfile.");
690		LOG_MSG_1("main", "daemon PID is %d", pid);
691
692
693		memset(&connection_params, 0,
694			sizeof(struct nscd_connection_params));
695		connection_params.socket_path = DEFAULT_SOCKET_PATH;
696		connection = open_nscd_connection__(&connection_params);
697		if (connection == INVALID_NSCD_CONNECTION)
698			errx(EXIT_FAILURE, "Can't connect to the daemon.");
699
700		if (clear_user_cache_entries != 0) {
701			result = nscd_transform__(connection,
702				user_config_entry_name, TT_USER);
703			if (result != 0)
704				LOG_MSG_1("main",
705					"user cache transformation failed");
706			else
707				LOG_MSG_1("main",
708					"user cache_transformation "
709					"succeeded");
710		}
711
712		if (clear_all_cache_entries != 0) {
713			if (geteuid() != 0)
714				errx(EXIT_FAILURE, "Only root can initiate "
715					"global cache transformation.");
716
717			result = nscd_transform__(connection,
718				global_config_entry_name, TT_ALL);
719			if (result != 0)
720				LOG_MSG_1("main",
721					"global cache transformation "
722					"failed");
723			else
724				LOG_MSG_1("main",
725					"global cache transformation "
726					"succeeded");
727		}
728
729		close_nscd_connection__(connection);
730
731		free(user_config_entry_name);
732		free(global_config_entry_name);
733		return (EXIT_SUCCESS);
734	}
735
736	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
737	if (pidfile == NULL) {
738		if (errno == EEXIST)
739			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
740				pid);
741		warn("Cannot open or create pidfile");
742	}
743
744	if (trace_mode_enabled == 1)
745		TRACE_ON();
746
747	/* blocking the main thread from receiving SIGPIPE signal */
748	sigblock(sigmask(SIGPIPE));
749
750	/* daemonization */
751	if (do_not_daemonize == 0) {
752		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
753		if (res != 0) {
754			LOG_ERR_1("main", "can't daemonize myself: %s",
755		    		strerror(errno));
756			pidfile_remove(pidfile);
757			goto fin;
758		} else
759			LOG_MSG_1("main", "successfully daemonized");
760	}
761
762	pidfile_write(pidfile);
763
764	s_agent_table = init_agent_table();
765	register_agent(s_agent_table, init_passwd_agent());
766	register_agent(s_agent_table, init_passwd_mp_agent());
767	register_agent(s_agent_table, init_group_agent());
768	register_agent(s_agent_table, init_group_mp_agent());
769	register_agent(s_agent_table, init_services_agent());
770	register_agent(s_agent_table, init_services_mp_agent());
771	LOG_MSG_1("main", "request agents registered successfully");
772
773	/*
774 	 * Hosts agent can't work properly until we have access to the
775	 * appropriate dtab structures, which are used in nsdispatch
776	 * calls
777	 *
778	 register_agent(s_agent_table, init_hosts_agent());
779	*/
780
781	/* configuration initialization */
782	s_configuration = init_configuration();
783	fill_configuration_defaults(s_configuration);
784
785	error_str = NULL;
786	error_line = 0;
787	config_file = CONFIG_PATH;
788
789	res = parse_config_file(s_configuration, config_file, &error_str,
790		&error_line);
791	if ((res != 0) && (error_str == NULL)) {
792		config_file = DEFAULT_CONFIG_PATH;
793		res = parse_config_file(s_configuration, config_file,
794			&error_str, &error_line);
795	}
796
797	if (res != 0) {
798		if (error_str != NULL) {
799		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
800			config_file, error_line, error_str);
801		} else {
802		LOG_ERR_1("main", "no configuration file found "
803		    	"- was looking for %s and %s",
804			CONFIG_PATH, DEFAULT_CONFIG_PATH);
805		}
806		destroy_configuration(s_configuration);
807		return (-1);
808	}
809
810	if (force_single_threaded == 1)
811		s_configuration->threads_num = 1;
812
813	/* cache initialization */
814	s_cache = init_cache_(s_configuration);
815	if (s_cache == NULL) {
816		LOG_ERR_1("main", "can't initialize the cache");
817		destroy_configuration(s_configuration);
818		return (-1);
819	}
820
821	/* runtime environment initialization */
822	s_runtime_env = init_runtime_env(s_configuration);
823	if (s_runtime_env == NULL) {
824		LOG_ERR_1("main", "can't initialize the runtime environment");
825		destroy_configuration(s_configuration);
826		destroy_cache_(s_cache);
827		return (-1);
828	}
829
830	if (s_configuration->threads_num > 1) {
831		threads = calloc(s_configuration->threads_num,
832			sizeof(*threads));
833		for (i = 0; i < s_configuration->threads_num; ++i) {
834			thread_args = malloc(
835				sizeof(*thread_args));
836			thread_args->the_cache = s_cache;
837			thread_args->the_runtime_env = s_runtime_env;
838			thread_args->the_configuration = s_configuration;
839
840			LOG_MSG_1("main", "thread #%d was successfully created",
841				i);
842			pthread_create(&threads[i], NULL, processing_thread,
843				thread_args);
844
845			thread_args = NULL;
846		}
847
848		for (i = 0; i < s_configuration->threads_num; ++i)
849			pthread_join(threads[i], NULL);
850	} else {
851		LOG_MSG_1("main", "working in single-threaded mode");
852		processing_loop(s_cache, s_runtime_env, s_configuration);
853	}
854
855fin:
856	/* runtime environment destruction */
857	destroy_runtime_env(s_runtime_env);
858
859	/* cache destruction */
860	destroy_cache_(s_cache);
861
862	/* configuration destruction */
863	destroy_configuration(s_configuration);
864
865	/* agents table destruction */
866	destroy_agent_table(s_agent_table);
867
868	pidfile_remove(pidfile);
869	return (EXIT_SUCCESS);
870}
871