query.c revision 158115
1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/usr.sbin/nscd/query.c 158115 2006-04-28 12:03:38Z ume $");
30
31#include <sys/types.h>
32#include <sys/socket.h>
33#include <sys/time.h>
34#include <sys/event.h>
35#include <assert.h>
36#include <errno.h>
37#include <nsswitch.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include "config.h"
42#include "debug.h"
43#include "query.h"
44#include "log.h"
45#include "mp_ws_query.h"
46#include "mp_rs_query.h"
47#include "singletons.h"
48
49static const char negative_data[1] = { 0 };
50
51extern	void get_time_func(struct timeval *);
52
53static 	void clear_config_entry(struct configuration_entry *);
54static 	void clear_config_entry_part(struct configuration_entry *,
55	const char *, size_t);
56
57static	int on_query_startup(struct query_state *);
58static	void on_query_destroy(struct query_state *);
59
60static	int on_read_request_read1(struct query_state *);
61static	int on_read_request_read2(struct query_state *);
62static	int on_read_request_process(struct query_state *);
63static	int on_read_response_write1(struct query_state *);
64static	int on_read_response_write2(struct query_state *);
65
66static	int on_rw_mapper(struct query_state *);
67
68static	int on_transform_request_read1(struct query_state *);
69static	int on_transform_request_read2(struct query_state *);
70static	int on_transform_request_process(struct query_state *);
71static	int on_transform_response_write1(struct query_state *);
72
73static	int on_write_request_read1(struct query_state *);
74static	int on_write_request_read2(struct query_state *);
75static	int on_negative_write_request_process(struct query_state *);
76static	int on_write_request_process(struct query_state *);
77static	int on_write_response_write1(struct query_state *);
78
79/*
80 * Clears the specified configuration entry (clears the cache for positive and
81 * and negative entries) and also for all multipart entries.
82 */
83static void
84clear_config_entry(struct configuration_entry *config_entry)
85{
86	size_t i;
87
88	TRACE_IN(clear_config_entry);
89	configuration_lock_entry(config_entry, CELT_POSITIVE);
90	if (config_entry->positive_cache_entry != NULL)
91		transform_cache_entry(
92			config_entry->positive_cache_entry,
93			CTT_CLEAR);
94	configuration_unlock_entry(config_entry, CELT_POSITIVE);
95
96	configuration_lock_entry(config_entry, CELT_NEGATIVE);
97	if (config_entry->negative_cache_entry != NULL)
98		transform_cache_entry(
99			config_entry->negative_cache_entry,
100			CTT_CLEAR);
101	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
102
103	configuration_lock_entry(config_entry, CELT_MULTIPART);
104	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
105		transform_cache_entry(
106			config_entry->mp_cache_entries[i],
107			CTT_CLEAR);
108	configuration_unlock_entry(config_entry, CELT_MULTIPART);
109
110	TRACE_OUT(clear_config_entry);
111}
112
113/*
114 * Clears the specified configuration entry by deleting only the elements,
115 * that are owned by the user with specified eid_str.
116 */
117static void
118clear_config_entry_part(struct configuration_entry *config_entry,
119	const char *eid_str, size_t eid_str_length)
120{
121	cache_entry *start, *finish, *mp_entry;
122	TRACE_IN(clear_config_entry_part);
123	configuration_lock_entry(config_entry, CELT_POSITIVE);
124	if (config_entry->positive_cache_entry != NULL)
125		transform_cache_entry_part(
126			config_entry->positive_cache_entry,
127			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
128	configuration_unlock_entry(config_entry, CELT_POSITIVE);
129
130	configuration_lock_entry(config_entry, CELT_NEGATIVE);
131	if (config_entry->negative_cache_entry != NULL)
132		transform_cache_entry_part(
133			config_entry->negative_cache_entry,
134			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
135	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
136
137	configuration_lock_entry(config_entry, CELT_MULTIPART);
138	if (configuration_entry_find_mp_cache_entries(config_entry,
139		eid_str, &start, &finish) == 0) {
140		for (mp_entry = start; mp_entry != finish; ++mp_entry)
141			transform_cache_entry(*mp_entry, CTT_CLEAR);
142	}
143	configuration_unlock_entry(config_entry, CELT_MULTIPART);
144
145	TRACE_OUT(clear_config_entry_part);
146}
147
148/*
149 * This function is assigned to the query_state structue on its creation.
150 * It's main purpose is to receive credentials from the client.
151 */
152static int
153on_query_startup(struct query_state *qstate)
154{
155	struct msghdr	cred_hdr;
156	struct iovec	iov;
157	int elem_type;
158
159	struct {
160		struct cmsghdr	hdr;
161		struct cmsgcred	creds;
162	} cmsg;
163
164	TRACE_IN(on_query_startup);
165	assert(qstate != NULL);
166
167	memset(&cred_hdr, 0, sizeof(struct msghdr));
168	cred_hdr.msg_iov = &iov;
169	cred_hdr.msg_iovlen = 1;
170	cred_hdr.msg_control = &cmsg;
171	cred_hdr.msg_controllen = sizeof(cmsg);
172
173	memset(&iov, 0, sizeof(struct iovec));
174	iov.iov_base = &elem_type;
175	iov.iov_len = sizeof(int);
176
177	if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
178		TRACE_OUT(on_query_startup);
179		return (-1);
180	}
181
182	if (cmsg.hdr.cmsg_len != sizeof cmsg
183		|| cmsg.hdr.cmsg_level != SOL_SOCKET
184		|| cmsg.hdr.cmsg_type != SCM_CREDS) {
185		TRACE_OUT(on_query_startup);
186		return (-1);
187	}
188
189	qstate->uid = cmsg.creds.cmcred_uid;
190	qstate->gid = cmsg.creds.cmcred_gid;
191
192#if defined(NS_CACHED_EID_CHECKING) || defined(NS_STRICT_CACHED_EID_CHECKING)
193/*
194 * This check is probably a bit redundant - per-user cache is always separated
195 * by the euid/egid pair
196 */
197	if (check_query_eids(qstate) != 0) {
198#ifdef NS_STRICT_CACHED_EID_CHECKING
199		TRACE_OUT(on_query_startup);
200		return (-1);
201#else
202		if ((elem_type != CET_READ_REQUEST) &&
203			(elem_type != CET_MP_READ_SESSION_REQUEST) &&
204			(elem_type != CET_WRITE_REQUEST) &&
205			(elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
206			TRACE_OUT(on_query_startup);
207			return (-1);
208		}
209#endif
210	}
211#endif
212
213	switch (elem_type) {
214	case CET_WRITE_REQUEST:
215		qstate->process_func = on_write_request_read1;
216		break;
217	case CET_READ_REQUEST:
218		qstate->process_func = on_read_request_read1;
219		break;
220	case CET_TRANSFORM_REQUEST:
221		qstate->process_func = on_transform_request_read1;
222		break;
223	case CET_MP_WRITE_SESSION_REQUEST:
224		qstate->process_func = on_mp_write_session_request_read1;
225		break;
226	case CET_MP_READ_SESSION_REQUEST:
227		qstate->process_func = on_mp_read_session_request_read1;
228		break;
229	default:
230		TRACE_OUT(on_query_startup);
231		return (-1);
232	}
233
234	qstate->kevent_watermark = 0;
235	TRACE_OUT(on_query_startup);
236	return (0);
237}
238
239/*
240 * on_rw_mapper is used to process multiple read/write requests during
241 * one connection session. It's never called in the beginning (on query_state
242 * creation) as it does not process the multipart requests and does not
243 * receive credentials
244 */
245static int
246on_rw_mapper(struct query_state *qstate)
247{
248	ssize_t	result;
249	int	elem_type;
250
251	TRACE_IN(on_rw_mapper);
252	if (qstate->kevent_watermark == 0) {
253		qstate->kevent_watermark = sizeof(int);
254	} else {
255		result = qstate->read_func(qstate, &elem_type, sizeof(int));
256		if (result != sizeof(int)) {
257			TRACE_OUT(on_rw_mapper);
258			return (-1);
259		}
260
261		switch (elem_type) {
262		case CET_WRITE_REQUEST:
263			qstate->kevent_watermark = sizeof(size_t);
264			qstate->process_func = on_write_request_read1;
265		break;
266		case CET_READ_REQUEST:
267			qstate->kevent_watermark = sizeof(size_t);
268			qstate->process_func = on_read_request_read1;
269		break;
270		default:
271			TRACE_OUT(on_rw_mapper);
272			return (-1);
273		break;
274		}
275	}
276	TRACE_OUT(on_rw_mapper);
277	return (0);
278}
279
280/*
281 * The default query_destroy function
282 */
283static void
284on_query_destroy(struct query_state *qstate)
285{
286
287	TRACE_IN(on_query_destroy);
288	finalize_comm_element(&qstate->response);
289	finalize_comm_element(&qstate->request);
290	TRACE_OUT(on_query_destroy);
291}
292
293/*
294 * The functions below are used to process write requests.
295 * - on_write_request_read1 and on_write_request_read2 read the request itself
296 * - on_write_request_process processes it (if the client requests to
297 *    cache the negative result, the on_negative_write_request_process is used)
298 * - on_write_response_write1 sends the response
299 */
300static int
301on_write_request_read1(struct query_state *qstate)
302{
303	struct cache_write_request	*write_request;
304	ssize_t	result;
305
306	TRACE_IN(on_write_request_read1);
307	if (qstate->kevent_watermark == 0)
308		qstate->kevent_watermark = sizeof(size_t) * 3;
309	else {
310		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
311		write_request = get_cache_write_request(&qstate->request);
312
313		result = qstate->read_func(qstate, &write_request->entry_length,
314	    		sizeof(size_t));
315		result += qstate->read_func(qstate,
316	    		&write_request->cache_key_size, sizeof(size_t));
317		result += qstate->read_func(qstate,
318	    		&write_request->data_size, sizeof(size_t));
319
320		if (result != sizeof(size_t) * 3) {
321			TRACE_OUT(on_write_request_read1);
322			return (-1);
323		}
324
325		if (BUFSIZE_INVALID(write_request->entry_length) ||
326			BUFSIZE_INVALID(write_request->cache_key_size) ||
327			(BUFSIZE_INVALID(write_request->data_size) &&
328			(write_request->data_size != 0))) {
329			TRACE_OUT(on_write_request_read1);
330			return (-1);
331		}
332
333		write_request->entry = (char *)malloc(
334			write_request->entry_length + 1);
335		assert(write_request->entry != NULL);
336		memset(write_request->entry, 0,
337			write_request->entry_length + 1);
338
339		write_request->cache_key = (char *)malloc(
340			write_request->cache_key_size +
341			qstate->eid_str_length);
342		assert(write_request->cache_key != NULL);
343		memcpy(write_request->cache_key, qstate->eid_str,
344			qstate->eid_str_length);
345		memset(write_request->cache_key + qstate->eid_str_length, 0,
346			write_request->cache_key_size);
347
348		if (write_request->data_size != 0) {
349			write_request->data = (char *)malloc(
350				write_request->data_size);
351			assert(write_request->data != NULL);
352			memset(write_request->data, 0,
353				write_request->data_size);
354		}
355
356		qstate->kevent_watermark = write_request->entry_length +
357			write_request->cache_key_size +
358			write_request->data_size;
359		qstate->process_func = on_write_request_read2;
360	}
361
362	TRACE_OUT(on_write_request_read1);
363	return (0);
364}
365
366static int
367on_write_request_read2(struct query_state *qstate)
368{
369	struct cache_write_request	*write_request;
370	ssize_t	result;
371
372	TRACE_IN(on_write_request_read2);
373	write_request = get_cache_write_request(&qstate->request);
374
375	result = qstate->read_func(qstate, write_request->entry,
376		write_request->entry_length);
377	result += qstate->read_func(qstate, write_request->cache_key +
378		qstate->eid_str_length, write_request->cache_key_size);
379	if (write_request->data_size != 0)
380		result += qstate->read_func(qstate, write_request->data,
381			write_request->data_size);
382
383	if (result != qstate->kevent_watermark) {
384		TRACE_OUT(on_write_request_read2);
385		return (-1);
386	}
387	write_request->cache_key_size += qstate->eid_str_length;
388
389	qstate->kevent_watermark = 0;
390	if (write_request->data_size != 0)
391		qstate->process_func = on_write_request_process;
392	else
393	    	qstate->process_func = on_negative_write_request_process;
394	TRACE_OUT(on_write_request_read2);
395	return (0);
396}
397
398static	int
399on_write_request_process(struct query_state *qstate)
400{
401	struct cache_write_request	*write_request;
402	struct cache_write_response	*write_response;
403	cache_entry c_entry;
404
405	TRACE_IN(on_write_request_process);
406	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
407	write_response = get_cache_write_response(&qstate->response);
408	write_request = get_cache_write_request(&qstate->request);
409
410	qstate->config_entry = configuration_find_entry(
411		s_configuration, write_request->entry);
412
413	if (qstate->config_entry == NULL) {
414		write_response->error_code = ENOENT;
415
416		LOG_ERR_2("write_request", "can't find configuration"
417		    " entry '%s'. aborting request", write_request->entry);
418		goto fin;
419	}
420
421	if (qstate->config_entry->enabled == 0) {
422		write_response->error_code = EACCES;
423
424		LOG_ERR_2("write_request",
425			"configuration entry '%s' is disabled",
426			write_request->entry);
427		goto fin;
428	}
429
430	if (qstate->config_entry->perform_actual_lookups != 0) {
431		write_response->error_code = EOPNOTSUPP;
432
433		LOG_ERR_2("write_request",
434			"entry '%s' performs lookups by itself: "
435			"can't write to it", write_request->entry);
436		goto fin;
437	}
438
439	configuration_lock_rdlock(s_configuration);
440	c_entry = find_cache_entry(s_cache,
441    		qstate->config_entry->positive_cache_params.entry_name);
442	configuration_unlock(s_configuration);
443	if (c_entry != NULL) {
444		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
445		qstate->config_entry->positive_cache_entry = c_entry;
446		write_response->error_code = cache_write(c_entry,
447			write_request->cache_key,
448	    		write_request->cache_key_size,
449	    		write_request->data,
450			write_request->data_size);
451		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
452
453		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
454		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
455			memcpy(&qstate->timeout,
456				&qstate->config_entry->common_query_timeout,
457				sizeof(struct timeval));
458
459	} else
460		write_response->error_code = -1;
461
462fin:
463	qstate->kevent_filter = EVFILT_WRITE;
464	qstate->kevent_watermark = sizeof(int);
465	qstate->process_func = on_write_response_write1;
466
467	TRACE_OUT(on_write_request_process);
468	return (0);
469}
470
471static int
472on_negative_write_request_process(struct query_state *qstate)
473{
474	struct cache_write_request	*write_request;
475	struct cache_write_response	*write_response;
476	cache_entry c_entry;
477
478	TRACE_IN(on_negative_write_request_process);
479	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
480	write_response = get_cache_write_response(&qstate->response);
481	write_request = get_cache_write_request(&qstate->request);
482
483	qstate->config_entry = configuration_find_entry	(
484		s_configuration, write_request->entry);
485
486	if (qstate->config_entry == NULL) {
487		write_response->error_code = ENOENT;
488
489		LOG_ERR_2("negative_write_request",
490			"can't find configuration"
491		   	" entry '%s'. aborting request", write_request->entry);
492		goto fin;
493	}
494
495	if (qstate->config_entry->enabled == 0) {
496		write_response->error_code = EACCES;
497
498		LOG_ERR_2("negative_write_request",
499			"configuration entry '%s' is disabled",
500			write_request->entry);
501		goto fin;
502	}
503
504	if (qstate->config_entry->perform_actual_lookups != 0) {
505		write_response->error_code = EOPNOTSUPP;
506
507		LOG_ERR_2("negative_write_request",
508			"entry '%s' performs lookups by itself: "
509			"can't write to it", write_request->entry);
510		goto fin;
511	} else {
512#ifdef NS_CACHED_EID_CHECKING
513		if (check_query_eids(qstate) != 0) {
514			write_response->error_code = EPERM;
515			goto fin;
516		}
517#endif
518	}
519
520	configuration_lock_rdlock(s_configuration);
521	c_entry = find_cache_entry(s_cache,
522    		qstate->config_entry->negative_cache_params.entry_name);
523	configuration_unlock(s_configuration);
524	if (c_entry != NULL) {
525		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
526		qstate->config_entry->negative_cache_entry = c_entry;
527		write_response->error_code = cache_write(c_entry,
528			write_request->cache_key,
529	    		write_request->cache_key_size,
530	    		negative_data,
531			sizeof(negative_data));
532		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
533
534		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
535		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
536			memcpy(&qstate->timeout,
537				&qstate->config_entry->common_query_timeout,
538				sizeof(struct timeval));
539	} else
540		write_response->error_code = -1;
541
542fin:
543	qstate->kevent_filter = EVFILT_WRITE;
544	qstate->kevent_watermark = sizeof(int);
545	qstate->process_func = on_write_response_write1;
546
547	TRACE_OUT(on_negative_write_request_process);
548	return (0);
549}
550
551static int
552on_write_response_write1(struct query_state *qstate)
553{
554	struct cache_write_response	*write_response;
555	ssize_t	result;
556
557	TRACE_IN(on_write_response_write1);
558	write_response = get_cache_write_response(&qstate->response);
559	result = qstate->write_func(qstate, &write_response->error_code,
560		sizeof(int));
561	if (result != sizeof(int)) {
562		TRACE_OUT(on_write_response_write1);
563		return (-1);
564	}
565
566	finalize_comm_element(&qstate->request);
567	finalize_comm_element(&qstate->response);
568
569	qstate->kevent_watermark = sizeof(int);
570	qstate->kevent_filter = EVFILT_READ;
571	qstate->process_func = on_rw_mapper;
572
573	TRACE_OUT(on_write_response_write1);
574	return (0);
575}
576
577/*
578 * The functions below are used to process read requests.
579 * - on_read_request_read1 and on_read_request_read2 read the request itself
580 * - on_read_request_process processes it
581 * - on_read_response_write1 and on_read_response_write2 send the response
582 */
583static int
584on_read_request_read1(struct query_state *qstate)
585{
586	struct cache_read_request *read_request;
587	ssize_t	result;
588
589	TRACE_IN(on_read_request_read1);
590	if (qstate->kevent_watermark == 0)
591		qstate->kevent_watermark = sizeof(size_t) * 2;
592	else {
593		init_comm_element(&qstate->request, CET_READ_REQUEST);
594		read_request = get_cache_read_request(&qstate->request);
595
596		result = qstate->read_func(qstate,
597	    		&read_request->entry_length, sizeof(size_t));
598		result += qstate->read_func(qstate,
599	    		&read_request->cache_key_size, sizeof(size_t));
600
601		if (result != sizeof(size_t) * 2) {
602			TRACE_OUT(on_read_request_read1);
603			return (-1);
604		}
605
606		if (BUFSIZE_INVALID(read_request->entry_length) ||
607			BUFSIZE_INVALID(read_request->cache_key_size)) {
608			TRACE_OUT(on_read_request_read1);
609			return (-1);
610		}
611
612		read_request->entry = (char *)malloc(
613			read_request->entry_length + 1);
614		assert(read_request->entry != NULL);
615		memset(read_request->entry, 0, read_request->entry_length + 1);
616
617		read_request->cache_key = (char *)malloc(
618			read_request->cache_key_size +
619			qstate->eid_str_length);
620		assert(read_request->cache_key != NULL);
621		memcpy(read_request->cache_key, qstate->eid_str,
622			qstate->eid_str_length);
623		memset(read_request->cache_key + qstate->eid_str_length, 0,
624			read_request->cache_key_size);
625
626		qstate->kevent_watermark = read_request->entry_length +
627			read_request->cache_key_size;
628		qstate->process_func = on_read_request_read2;
629	}
630
631	TRACE_OUT(on_read_request_read1);
632	return (0);
633}
634
635static int
636on_read_request_read2(struct query_state *qstate)
637{
638	struct cache_read_request	*read_request;
639	ssize_t	result;
640
641	TRACE_IN(on_read_request_read2);
642	read_request = get_cache_read_request(&qstate->request);
643
644	result = qstate->read_func(qstate, read_request->entry,
645		read_request->entry_length);
646	result += qstate->read_func(qstate,
647		read_request->cache_key + qstate->eid_str_length,
648		read_request->cache_key_size);
649
650	if (result != qstate->kevent_watermark) {
651		TRACE_OUT(on_read_request_read2);
652		return (-1);
653	}
654	read_request->cache_key_size += qstate->eid_str_length;
655
656	qstate->kevent_watermark = 0;
657	qstate->process_func = on_read_request_process;
658
659	TRACE_OUT(on_read_request_read2);
660	return (0);
661}
662
663static int
664on_read_request_process(struct query_state *qstate)
665{
666	struct cache_read_request *read_request;
667	struct cache_read_response *read_response;
668	cache_entry	c_entry, neg_c_entry;
669
670	struct agent	*lookup_agent;
671	struct common_agent *c_agent;
672	int res;
673
674	TRACE_IN(on_read_request_process);
675	init_comm_element(&qstate->response, CET_READ_RESPONSE);
676	read_response = get_cache_read_response(&qstate->response);
677	read_request = get_cache_read_request(&qstate->request);
678
679	qstate->config_entry = configuration_find_entry(
680		s_configuration, read_request->entry);
681	if (qstate->config_entry == NULL) {
682		read_response->error_code = ENOENT;
683
684		LOG_ERR_2("read_request",
685			"can't find configuration "
686	    		"entry '%s'. aborting request", read_request->entry);
687	    	goto fin;
688	}
689
690	if (qstate->config_entry->enabled == 0) {
691		read_response->error_code = EACCES;
692
693		LOG_ERR_2("read_request",
694			"configuration entry '%s' is disabled",
695			read_request->entry);
696		goto fin;
697	}
698
699	/*
700	 * if we perform lookups by ourselves, then we don't need to separate
701	 * cache entries by euid and egid
702	 */
703	if (qstate->config_entry->perform_actual_lookups != 0)
704		memset(read_request->cache_key, 0, qstate->eid_str_length);
705	else {
706#ifdef NS_CACHED_EID_CHECKING
707		if (check_query_eids(qstate) != 0) {
708		/* if the lookup is not self-performing, we check for clients euid/egid */
709			read_response->error_code = EPERM;
710			goto fin;
711		}
712#endif
713	}
714
715	configuration_lock_rdlock(s_configuration);
716	c_entry = find_cache_entry(s_cache,
717    		qstate->config_entry->positive_cache_params.entry_name);
718	neg_c_entry = find_cache_entry(s_cache,
719		qstate->config_entry->negative_cache_params.entry_name);
720	configuration_unlock(s_configuration);
721	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
722		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
723		qstate->config_entry->positive_cache_entry = c_entry;
724		read_response->error_code = cache_read(c_entry,
725	    		read_request->cache_key,
726	    		read_request->cache_key_size, NULL,
727	    		&read_response->data_size);
728
729		if (read_response->error_code == -2) {
730			read_response->data = (char *)malloc(
731		    		read_response->data_size);
732			assert(read_response != NULL);
733			read_response->error_code = cache_read(c_entry,
734				read_request->cache_key,
735		    		read_request->cache_key_size,
736		    		read_response->data,
737		    		&read_response->data_size);
738		}
739		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
740
741		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
742		qstate->config_entry->negative_cache_entry = neg_c_entry;
743		if (read_response->error_code == -1) {
744			read_response->error_code = cache_read(neg_c_entry,
745				read_request->cache_key,
746				read_request->cache_key_size, NULL,
747				&read_response->data_size);
748
749			if (read_response->error_code == -2) {
750				read_response->error_code = 0;
751				read_response->data = NULL;
752				read_response->data_size = 0;
753			}
754		}
755		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
756
757		if ((read_response->error_code == -1) &&
758			(qstate->config_entry->perform_actual_lookups != 0)) {
759			free(read_response->data);
760			read_response->data = NULL;
761			read_response->data_size = 0;
762
763			lookup_agent = find_agent(s_agent_table,
764				read_request->entry, COMMON_AGENT);
765
766			if ((lookup_agent != NULL) &&
767			(lookup_agent->type == COMMON_AGENT)) {
768				c_agent = (struct common_agent *)lookup_agent;
769				res = c_agent->lookup_func(
770					read_request->cache_key +
771						qstate->eid_str_length,
772					read_request->cache_key_size -
773						qstate->eid_str_length,
774					&read_response->data,
775					&read_response->data_size);
776
777				if (res == NS_SUCCESS) {
778					read_response->error_code = 0;
779					configuration_lock_entry(
780						qstate->config_entry,
781						CELT_POSITIVE);
782					cache_write(c_entry,
783						read_request->cache_key,
784	    					read_request->cache_key_size,
785	    					read_response->data,
786						read_response->data_size);
787					configuration_unlock_entry(
788						qstate->config_entry,
789						CELT_POSITIVE);
790				} else if ((res == NS_NOTFOUND) ||
791					  (res == NS_RETURN)) {
792					configuration_lock_entry(
793						  qstate->config_entry,
794						  CELT_NEGATIVE);
795					cache_write(neg_c_entry,
796						read_request->cache_key,
797						read_request->cache_key_size,
798						negative_data,
799						sizeof(negative_data));
800					configuration_unlock_entry(
801						  qstate->config_entry,
802						  CELT_NEGATIVE);
803
804					read_response->error_code = 0;
805					read_response->data = NULL;
806					read_response->data_size = 0;
807				}
808			}
809		}
810
811		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
812		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
813			memcpy(&qstate->timeout,
814				&qstate->config_entry->common_query_timeout,
815				sizeof(struct timeval));
816	} else
817		read_response->error_code = -1;
818
819fin:
820	qstate->kevent_filter = EVFILT_WRITE;
821	if (read_response->error_code == 0)
822		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
823	else
824		qstate->kevent_watermark = sizeof(int);
825	qstate->process_func = on_read_response_write1;
826
827	TRACE_OUT(on_read_request_process);
828	return (0);
829}
830
831static int
832on_read_response_write1(struct query_state *qstate)
833{
834	struct cache_read_response	*read_response;
835	ssize_t	result;
836
837	TRACE_IN(on_read_response_write1);
838	read_response = get_cache_read_response(&qstate->response);
839
840	result = qstate->write_func(qstate, &read_response->error_code,
841		sizeof(int));
842
843	if (read_response->error_code == 0) {
844		result += qstate->write_func(qstate, &read_response->data_size,
845			sizeof(size_t));
846		if (result != qstate->kevent_watermark) {
847			TRACE_OUT(on_read_response_write1);
848			return (-1);
849		}
850
851		qstate->kevent_watermark = read_response->data_size;
852		qstate->process_func = on_read_response_write2;
853	} else {
854		if (result != qstate->kevent_watermark) {
855			TRACE_OUT(on_read_response_write1);
856			return (-1);
857		}
858
859		qstate->kevent_watermark = 0;
860		qstate->process_func = NULL;
861	}
862
863	TRACE_OUT(on_read_response_write1);
864	return (0);
865}
866
867static int
868on_read_response_write2(struct query_state *qstate)
869{
870	struct cache_read_response	*read_response;
871	ssize_t	result;
872
873	TRACE_IN(on_read_response_write2);
874	read_response = get_cache_read_response(&qstate->response);
875	if (read_response->data_size > 0) {
876		result = qstate->write_func(qstate, read_response->data,
877			read_response->data_size);
878		if (result != qstate->kevent_watermark) {
879			TRACE_OUT(on_read_response_write2);
880			return (-1);
881		}
882	}
883
884	finalize_comm_element(&qstate->request);
885	finalize_comm_element(&qstate->response);
886
887	qstate->kevent_watermark = sizeof(int);
888	qstate->kevent_filter = EVFILT_READ;
889	qstate->process_func = on_rw_mapper;
890	TRACE_OUT(on_read_response_write2);
891	return (0);
892}
893
894/*
895 * The functions below are used to process write requests.
896 * - on_transform_request_read1 and on_transform_request_read2 read the
897 *   request itself
898 * - on_transform_request_process processes it
899 * - on_transform_response_write1 sends the response
900 */
901static int
902on_transform_request_read1(struct query_state *qstate)
903{
904	struct cache_transform_request *transform_request;
905	ssize_t	result;
906
907	TRACE_IN(on_transform_request_read1);
908	if (qstate->kevent_watermark == 0)
909		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
910	else {
911		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
912		transform_request =
913			get_cache_transform_request(&qstate->request);
914
915		result = qstate->read_func(qstate,
916	    		&transform_request->entry_length, sizeof(size_t));
917		result += qstate->read_func(qstate,
918	    		&transform_request->transformation_type, sizeof(int));
919
920		if (result != sizeof(size_t) + sizeof(int)) {
921			TRACE_OUT(on_transform_request_read1);
922			return (-1);
923		}
924
925		if ((transform_request->transformation_type != TT_USER) &&
926		    (transform_request->transformation_type != TT_ALL)) {
927			TRACE_OUT(on_transform_request_read1);
928			return (-1);
929		}
930
931		if (transform_request->entry_length != 0) {
932			if (BUFSIZE_INVALID(transform_request->entry_length)) {
933				TRACE_OUT(on_transform_request_read1);
934				return (-1);
935			}
936
937			transform_request->entry = (char *)malloc(
938				transform_request->entry_length + 1);
939			assert(transform_request->entry != NULL);
940			memset(transform_request->entry, 0,
941				transform_request->entry_length + 1);
942
943			qstate->process_func = on_transform_request_read2;
944		} else
945			qstate->process_func = on_transform_request_process;
946
947		qstate->kevent_watermark = transform_request->entry_length;
948	}
949
950	TRACE_OUT(on_transform_request_read1);
951	return (0);
952}
953
954static int
955on_transform_request_read2(struct query_state *qstate)
956{
957	struct cache_transform_request	*transform_request;
958	ssize_t	result;
959
960	TRACE_IN(on_transform_request_read2);
961	transform_request = get_cache_transform_request(&qstate->request);
962
963	result = qstate->read_func(qstate, transform_request->entry,
964		transform_request->entry_length);
965
966	if (result != qstate->kevent_watermark) {
967		TRACE_OUT(on_transform_request_read2);
968		return (-1);
969	}
970
971	qstate->kevent_watermark = 0;
972	qstate->process_func = on_transform_request_process;
973
974	TRACE_OUT(on_transform_request_read2);
975	return (0);
976}
977
978static int
979on_transform_request_process(struct query_state *qstate)
980{
981	struct cache_transform_request *transform_request;
982	struct cache_transform_response *transform_response;
983	struct configuration_entry *config_entry;
984	size_t	i, size;
985
986	TRACE_IN(on_transform_request_process);
987	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
988	transform_response = get_cache_transform_response(&qstate->response);
989	transform_request = get_cache_transform_request(&qstate->request);
990
991	switch (transform_request->transformation_type) {
992	case TT_USER:
993		if (transform_request->entry == NULL) {
994			size = configuration_get_entries_size(s_configuration);
995			for (i = 0; i < size; ++i) {
996			    config_entry = configuration_get_entry(
997				s_configuration, i);
998
999			    if (config_entry->perform_actual_lookups == 0)
1000			    	clear_config_entry_part(config_entry,
1001				    qstate->eid_str, qstate->eid_str_length);
1002			}
1003		} else {
1004			qstate->config_entry = configuration_find_entry(
1005				s_configuration, transform_request->entry);
1006
1007			if (qstate->config_entry == NULL) {
1008				LOG_ERR_2("transform_request",
1009					"can't find configuration"
1010		   			" entry '%s'. aborting request",
1011					transform_request->entry);
1012				transform_response->error_code = -1;
1013				goto fin;
1014			}
1015
1016			if (qstate->config_entry->perform_actual_lookups != 0) {
1017				LOG_ERR_2("transform_request",
1018					"can't transform the cache entry %s"
1019					", because it ised for actual lookups",
1020					transform_request->entry);
1021				transform_response->error_code = -1;
1022				goto fin;
1023			}
1024
1025			clear_config_entry_part(qstate->config_entry,
1026				qstate->eid_str, qstate->eid_str_length);
1027		}
1028		break;
1029	case TT_ALL:
1030		if (qstate->euid != 0)
1031			transform_response->error_code = -1;
1032		else {
1033			if (transform_request->entry == NULL) {
1034				size = configuration_get_entries_size(
1035					s_configuration);
1036				for (i = 0; i < size; ++i) {
1037				    clear_config_entry(
1038					configuration_get_entry(
1039						s_configuration, i));
1040				}
1041			} else {
1042				qstate->config_entry = configuration_find_entry(
1043					s_configuration,
1044					transform_request->entry);
1045
1046				if (qstate->config_entry == NULL) {
1047					LOG_ERR_2("transform_request",
1048						"can't find configuration"
1049		   				" entry '%s'. aborting request",
1050						transform_request->entry);
1051					transform_response->error_code = -1;
1052					goto fin;
1053				}
1054
1055				clear_config_entry(qstate->config_entry);
1056			}
1057		}
1058		break;
1059	default:
1060		transform_response->error_code = -1;
1061	}
1062
1063fin:
1064	qstate->kevent_watermark = 0;
1065	qstate->process_func = on_transform_response_write1;
1066	TRACE_OUT(on_transform_request_process);
1067	return (0);
1068}
1069
1070static int
1071on_transform_response_write1(struct query_state *qstate)
1072{
1073	struct cache_transform_response	*transform_response;
1074	ssize_t	result;
1075
1076	TRACE_IN(on_transform_response_write1);
1077	transform_response = get_cache_transform_response(&qstate->response);
1078	result = qstate->write_func(qstate, &transform_response->error_code,
1079		sizeof(int));
1080	if (result != sizeof(int)) {
1081		TRACE_OUT(on_transform_response_write1);
1082		return (-1);
1083	}
1084
1085	finalize_comm_element(&qstate->request);
1086	finalize_comm_element(&qstate->response);
1087
1088	qstate->kevent_watermark = 0;
1089	qstate->process_func = NULL;
1090	TRACE_OUT(on_transform_response_write1);
1091	return (0);
1092}
1093
1094/*
1095 * Checks if the client's euid and egid do not differ from its uid and gid.
1096 * Returns 0 on success.
1097 */
1098int
1099check_query_eids(struct query_state *qstate)
1100{
1101
1102	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1103}
1104
1105/*
1106 * Uses the qstate fields to process an "alternate" read - when the buffer is
1107 * too large to be received during one socket read operation
1108 */
1109ssize_t
1110query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1111{
1112	ssize_t	result;
1113
1114	TRACE_IN(query_io_buffer_read);
1115	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1116		return (-1);
1117
1118	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1119			qstate->io_buffer_p)
1120		result = nbytes;
1121	else
1122		result = qstate->io_buffer + qstate->io_buffer_size -
1123			qstate->io_buffer_p;
1124
1125	memcpy(buf, qstate->io_buffer_p, result);
1126	qstate->io_buffer_p += result;
1127
1128	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1129		free(qstate->io_buffer);
1130		qstate->io_buffer = NULL;
1131
1132		qstate->write_func = query_socket_write;
1133		qstate->read_func = query_socket_read;
1134	}
1135
1136	TRACE_OUT(query_io_buffer_read);
1137	return (result);
1138}
1139
1140/*
1141 * Uses the qstate fields to process an "alternate" write - when the buffer is
1142 * too large to be sent during one socket write operation
1143 */
1144ssize_t
1145query_io_buffer_write(struct query_state *qstate, const void *buf,
1146	size_t nbytes)
1147{
1148	ssize_t	result;
1149
1150	TRACE_IN(query_io_buffer_write);
1151	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1152		return (-1);
1153
1154	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1155			qstate->io_buffer_p)
1156		result = nbytes;
1157	else
1158		result = qstate->io_buffer + qstate->io_buffer_size -
1159		qstate->io_buffer_p;
1160
1161	memcpy(qstate->io_buffer_p, buf, result);
1162	qstate->io_buffer_p += result;
1163
1164	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1165		qstate->use_alternate_io = 1;
1166		qstate->io_buffer_p = qstate->io_buffer;
1167
1168		qstate->write_func = query_socket_write;
1169		qstate->read_func = query_socket_read;
1170	}
1171
1172	TRACE_OUT(query_io_buffer_write);
1173	return (result);
1174}
1175
1176/*
1177 * The default "read" function, which reads data directly from socket
1178 */
1179ssize_t
1180query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1181{
1182	ssize_t	result;
1183
1184	TRACE_IN(query_socket_read);
1185	if (qstate->socket_failed != 0) {
1186		TRACE_OUT(query_socket_read);
1187		return (-1);
1188	}
1189
1190	result = read(qstate->sockfd, buf, nbytes);
1191	if ((result == -1) || (result < nbytes))
1192		qstate->socket_failed = 1;
1193
1194	TRACE_OUT(query_socket_read);
1195	return (result);
1196}
1197
1198/*
1199 * The default "write" function, which writes data directly to socket
1200 */
1201ssize_t
1202query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1203{
1204	ssize_t	result;
1205
1206	TRACE_IN(query_socket_write);
1207	if (qstate->socket_failed != 0) {
1208		TRACE_OUT(query_socket_write);
1209		return (-1);
1210	}
1211
1212	result = write(qstate->sockfd, buf, nbytes);
1213	if ((result == -1) || (result < nbytes))
1214		qstate->socket_failed = 1;
1215
1216	TRACE_OUT(query_socket_write);
1217	return (result);
1218}
1219
1220/*
1221 * Initializes the query_state structure by filling it with the default values.
1222 */
1223struct query_state *
1224init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1225{
1226	struct query_state	*retval;
1227
1228	TRACE_IN(init_query_state);
1229	retval = (struct query_state *)malloc(sizeof(struct query_state));
1230	assert(retval != NULL);
1231	memset(retval, 0, sizeof(struct query_state));
1232
1233	retval->sockfd = sockfd;
1234	retval->kevent_filter = EVFILT_READ;
1235	retval->kevent_watermark = kevent_watermark;
1236
1237	retval->euid = euid;
1238	retval->egid = egid;
1239	retval->uid = retval->gid = -1;
1240
1241	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1242		retval->egid) == -1) {
1243		free(retval);
1244		return (NULL);
1245	}
1246	retval->eid_str_length = strlen(retval->eid_str);
1247
1248	init_comm_element(&retval->request, CET_UNDEFINED);
1249	init_comm_element(&retval->response, CET_UNDEFINED);
1250	retval->process_func = on_query_startup;
1251	retval->destroy_func = on_query_destroy;
1252
1253	retval->write_func = query_socket_write;
1254	retval->read_func = query_socket_read;
1255
1256	get_time_func(&retval->creation_time);
1257	memcpy(&retval->timeout, &s_configuration->query_timeout,
1258		sizeof(struct timeval));
1259
1260	TRACE_OUT(init_query_state);
1261	return (retval);
1262}
1263
1264void
1265destroy_query_state(struct query_state *qstate)
1266{
1267
1268	TRACE_IN(destroy_query_state);
1269	if (qstate->eid_str != NULL)
1270	    free(qstate->eid_str);
1271
1272	if (qstate->io_buffer != NULL)
1273		free(qstate->io_buffer);
1274
1275	qstate->destroy_func(qstate);
1276	free(qstate);
1277	TRACE_OUT(destroy_query_state);
1278}
1279