Deleted Added
sdiff udiff text old ( 183770 ) new ( 194089 )
full compact
1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/usr.sbin/nscd/query.c 183770 2008-10-12 00:44:27Z delphij $");
30
31#include <sys/types.h>
32#include <sys/socket.h>
33#include <sys/time.h>
34#include <sys/event.h>
35#include <assert.h>
36#include <errno.h>
37#include <nsswitch.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include "config.h"
42#include "debug.h"
43#include "query.h"
44#include "log.h"
45#include "mp_ws_query.h"
46#include "mp_rs_query.h"
47#include "singletons.h"
48
49static const char negative_data[1] = { 0 };
50
51extern void get_time_func(struct timeval *);
52
53static void clear_config_entry(struct configuration_entry *);
54static void clear_config_entry_part(struct configuration_entry *,
55 const char *, size_t);
56
57static int on_query_startup(struct query_state *);
58static void on_query_destroy(struct query_state *);
59
60static int on_read_request_read1(struct query_state *);
61static int on_read_request_read2(struct query_state *);
62static int on_read_request_process(struct query_state *);
63static int on_read_response_write1(struct query_state *);
64static int on_read_response_write2(struct query_state *);
65
66static int on_rw_mapper(struct query_state *);
67
68static int on_transform_request_read1(struct query_state *);
69static int on_transform_request_read2(struct query_state *);
70static int on_transform_request_process(struct query_state *);
71static int on_transform_response_write1(struct query_state *);
72
73static int on_write_request_read1(struct query_state *);
74static int on_write_request_read2(struct query_state *);
75static int on_negative_write_request_process(struct query_state *);
76static int on_write_request_process(struct query_state *);
77static int on_write_response_write1(struct query_state *);
78
79/*
80 * Clears the specified configuration entry (clears the cache for positive and
81 * and negative entries) and also for all multipart entries.
82 */
83static void
84clear_config_entry(struct configuration_entry *config_entry)
85{
86 size_t i;
87
88 TRACE_IN(clear_config_entry);
89 configuration_lock_entry(config_entry, CELT_POSITIVE);
90 if (config_entry->positive_cache_entry != NULL)
91 transform_cache_entry(
92 config_entry->positive_cache_entry,
93 CTT_CLEAR);
94 configuration_unlock_entry(config_entry, CELT_POSITIVE);
95
96 configuration_lock_entry(config_entry, CELT_NEGATIVE);
97 if (config_entry->negative_cache_entry != NULL)
98 transform_cache_entry(
99 config_entry->negative_cache_entry,
100 CTT_CLEAR);
101 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
102
103 configuration_lock_entry(config_entry, CELT_MULTIPART);
104 for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
105 transform_cache_entry(
106 config_entry->mp_cache_entries[i],
107 CTT_CLEAR);
108 configuration_unlock_entry(config_entry, CELT_MULTIPART);
109
110 TRACE_OUT(clear_config_entry);
111}
112
113/*
114 * Clears the specified configuration entry by deleting only the elements,
115 * that are owned by the user with specified eid_str.
116 */
117static void
118clear_config_entry_part(struct configuration_entry *config_entry,
119 const char *eid_str, size_t eid_str_length)
120{
121 cache_entry *start, *finish, *mp_entry;
122 TRACE_IN(clear_config_entry_part);
123 configuration_lock_entry(config_entry, CELT_POSITIVE);
124 if (config_entry->positive_cache_entry != NULL)
125 transform_cache_entry_part(
126 config_entry->positive_cache_entry,
127 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
128 configuration_unlock_entry(config_entry, CELT_POSITIVE);
129
130 configuration_lock_entry(config_entry, CELT_NEGATIVE);
131 if (config_entry->negative_cache_entry != NULL)
132 transform_cache_entry_part(
133 config_entry->negative_cache_entry,
134 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
135 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
136
137 configuration_lock_entry(config_entry, CELT_MULTIPART);
138 if (configuration_entry_find_mp_cache_entries(config_entry,
139 eid_str, &start, &finish) == 0) {
140 for (mp_entry = start; mp_entry != finish; ++mp_entry)
141 transform_cache_entry(*mp_entry, CTT_CLEAR);
142 }
143 configuration_unlock_entry(config_entry, CELT_MULTIPART);
144
145 TRACE_OUT(clear_config_entry_part);
146}
147
148/*
149 * This function is assigned to the query_state structue on its creation.
150 * It's main purpose is to receive credentials from the client.
151 */
152static int
153on_query_startup(struct query_state *qstate)
154{
155 struct msghdr cred_hdr;
156 struct iovec iov;
157 struct cmsgcred *cred;
158 int elem_type;
159
160 struct {
161 struct cmsghdr hdr;
162 char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
163 } cmsg;
164
165 TRACE_IN(on_query_startup);
166 assert(qstate != NULL);
167
168 memset(&cred_hdr, 0, sizeof(struct msghdr));
169 cred_hdr.msg_iov = &iov;
170 cred_hdr.msg_iovlen = 1;
171 cred_hdr.msg_control = (caddr_t)&cmsg;
172 cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred));
173
174 memset(&iov, 0, sizeof(struct iovec));
175 iov.iov_base = &elem_type;
176 iov.iov_len = sizeof(int);
177
178 if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
179 TRACE_OUT(on_query_startup);
180 return (-1);
181 }
182
183 if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
184 || cmsg.hdr.cmsg_level != SOL_SOCKET
185 || cmsg.hdr.cmsg_type != SCM_CREDS) {
186 TRACE_OUT(on_query_startup);
187 return (-1);
188 }
189
190 cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
191 qstate->uid = cred->cmcred_uid;
192 qstate->gid = cred->cmcred_gid;
193
194#if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
195/*
196 * This check is probably a bit redundant - per-user cache is always separated
197 * by the euid/egid pair
198 */
199 if (check_query_eids(qstate) != 0) {
200#ifdef NS_STRICT_NSCD_EID_CHECKING
201 TRACE_OUT(on_query_startup);
202 return (-1);
203#else
204 if ((elem_type != CET_READ_REQUEST) &&
205 (elem_type != CET_MP_READ_SESSION_REQUEST) &&
206 (elem_type != CET_WRITE_REQUEST) &&
207 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
208 TRACE_OUT(on_query_startup);
209 return (-1);
210 }
211#endif
212 }
213#endif
214
215 switch (elem_type) {
216 case CET_WRITE_REQUEST:
217 qstate->process_func = on_write_request_read1;
218 break;
219 case CET_READ_REQUEST:
220 qstate->process_func = on_read_request_read1;
221 break;
222 case CET_TRANSFORM_REQUEST:
223 qstate->process_func = on_transform_request_read1;
224 break;
225 case CET_MP_WRITE_SESSION_REQUEST:
226 qstate->process_func = on_mp_write_session_request_read1;
227 break;
228 case CET_MP_READ_SESSION_REQUEST:
229 qstate->process_func = on_mp_read_session_request_read1;
230 break;
231 default:
232 TRACE_OUT(on_query_startup);
233 return (-1);
234 }
235
236 qstate->kevent_watermark = 0;
237 TRACE_OUT(on_query_startup);
238 return (0);
239}
240
241/*
242 * on_rw_mapper is used to process multiple read/write requests during
243 * one connection session. It's never called in the beginning (on query_state
244 * creation) as it does not process the multipart requests and does not
245 * receive credentials
246 */
247static int
248on_rw_mapper(struct query_state *qstate)
249{
250 ssize_t result;
251 int elem_type;
252
253 TRACE_IN(on_rw_mapper);
254 if (qstate->kevent_watermark == 0) {
255 qstate->kevent_watermark = sizeof(int);
256 } else {
257 result = qstate->read_func(qstate, &elem_type, sizeof(int));
258 if (result != sizeof(int)) {
259 TRACE_OUT(on_rw_mapper);
260 return (-1);
261 }
262
263 switch (elem_type) {
264 case CET_WRITE_REQUEST:
265 qstate->kevent_watermark = sizeof(size_t);
266 qstate->process_func = on_write_request_read1;
267 break;
268 case CET_READ_REQUEST:
269 qstate->kevent_watermark = sizeof(size_t);
270 qstate->process_func = on_read_request_read1;
271 break;
272 default:
273 TRACE_OUT(on_rw_mapper);
274 return (-1);
275 break;
276 }
277 }
278 TRACE_OUT(on_rw_mapper);
279 return (0);
280}
281
282/*
283 * The default query_destroy function
284 */
285static void
286on_query_destroy(struct query_state *qstate)
287{
288
289 TRACE_IN(on_query_destroy);
290 finalize_comm_element(&qstate->response);
291 finalize_comm_element(&qstate->request);
292 TRACE_OUT(on_query_destroy);
293}
294
295/*
296 * The functions below are used to process write requests.
297 * - on_write_request_read1 and on_write_request_read2 read the request itself
298 * - on_write_request_process processes it (if the client requests to
299 * cache the negative result, the on_negative_write_request_process is used)
300 * - on_write_response_write1 sends the response
301 */
302static int
303on_write_request_read1(struct query_state *qstate)
304{
305 struct cache_write_request *write_request;
306 ssize_t result;
307
308 TRACE_IN(on_write_request_read1);
309 if (qstate->kevent_watermark == 0)
310 qstate->kevent_watermark = sizeof(size_t) * 3;
311 else {
312 init_comm_element(&qstate->request, CET_WRITE_REQUEST);
313 write_request = get_cache_write_request(&qstate->request);
314
315 result = qstate->read_func(qstate, &write_request->entry_length,
316 sizeof(size_t));
317 result += qstate->read_func(qstate,
318 &write_request->cache_key_size, sizeof(size_t));
319 result += qstate->read_func(qstate,
320 &write_request->data_size, sizeof(size_t));
321
322 if (result != sizeof(size_t) * 3) {
323 TRACE_OUT(on_write_request_read1);
324 return (-1);
325 }
326
327 if (BUFSIZE_INVALID(write_request->entry_length) ||
328 BUFSIZE_INVALID(write_request->cache_key_size) ||
329 (BUFSIZE_INVALID(write_request->data_size) &&
330 (write_request->data_size != 0))) {
331 TRACE_OUT(on_write_request_read1);
332 return (-1);
333 }
334
335 write_request->entry = (char *)calloc(1,
336 write_request->entry_length + 1);
337 assert(write_request->entry != NULL);
338
339 write_request->cache_key = (char *)calloc(1,
340 write_request->cache_key_size +
341 qstate->eid_str_length);
342 assert(write_request->cache_key != NULL);
343 memcpy(write_request->cache_key, qstate->eid_str,
344 qstate->eid_str_length);
345
346 if (write_request->data_size != 0) {
347 write_request->data = (char *)calloc(1,
348 write_request->data_size);
349 assert(write_request->data != NULL);
350 }
351
352 qstate->kevent_watermark = write_request->entry_length +
353 write_request->cache_key_size +
354 write_request->data_size;
355 qstate->process_func = on_write_request_read2;
356 }
357
358 TRACE_OUT(on_write_request_read1);
359 return (0);
360}
361
362static int
363on_write_request_read2(struct query_state *qstate)
364{
365 struct cache_write_request *write_request;
366 ssize_t result;
367
368 TRACE_IN(on_write_request_read2);
369 write_request = get_cache_write_request(&qstate->request);
370
371 result = qstate->read_func(qstate, write_request->entry,
372 write_request->entry_length);
373 result += qstate->read_func(qstate, write_request->cache_key +
374 qstate->eid_str_length, write_request->cache_key_size);
375 if (write_request->data_size != 0)
376 result += qstate->read_func(qstate, write_request->data,
377 write_request->data_size);
378
379 if (result != qstate->kevent_watermark) {
380 TRACE_OUT(on_write_request_read2);
381 return (-1);
382 }
383 write_request->cache_key_size += qstate->eid_str_length;
384
385 qstate->kevent_watermark = 0;
386 if (write_request->data_size != 0)
387 qstate->process_func = on_write_request_process;
388 else
389 qstate->process_func = on_negative_write_request_process;
390 TRACE_OUT(on_write_request_read2);
391 return (0);
392}
393
394static int
395on_write_request_process(struct query_state *qstate)
396{
397 struct cache_write_request *write_request;
398 struct cache_write_response *write_response;
399 cache_entry c_entry;
400
401 TRACE_IN(on_write_request_process);
402 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
403 write_response = get_cache_write_response(&qstate->response);
404 write_request = get_cache_write_request(&qstate->request);
405
406 qstate->config_entry = configuration_find_entry(
407 s_configuration, write_request->entry);
408
409 if (qstate->config_entry == NULL) {
410 write_response->error_code = ENOENT;
411
412 LOG_ERR_2("write_request", "can't find configuration"
413 " entry '%s'. aborting request", write_request->entry);
414 goto fin;
415 }
416
417 if (qstate->config_entry->enabled == 0) {
418 write_response->error_code = EACCES;
419
420 LOG_ERR_2("write_request",
421 "configuration entry '%s' is disabled",
422 write_request->entry);
423 goto fin;
424 }
425
426 if (qstate->config_entry->perform_actual_lookups != 0) {
427 write_response->error_code = EOPNOTSUPP;
428
429 LOG_ERR_2("write_request",
430 "entry '%s' performs lookups by itself: "
431 "can't write to it", write_request->entry);
432 goto fin;
433 }
434
435 configuration_lock_rdlock(s_configuration);
436 c_entry = find_cache_entry(s_cache,
437 qstate->config_entry->positive_cache_params.entry_name);
438 configuration_unlock(s_configuration);
439 if (c_entry != NULL) {
440 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
441 qstate->config_entry->positive_cache_entry = c_entry;
442 write_response->error_code = cache_write(c_entry,
443 write_request->cache_key,
444 write_request->cache_key_size,
445 write_request->data,
446 write_request->data_size);
447 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
448
449 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
450 (qstate->config_entry->common_query_timeout.tv_usec != 0))
451 memcpy(&qstate->timeout,
452 &qstate->config_entry->common_query_timeout,
453 sizeof(struct timeval));
454
455 } else
456 write_response->error_code = -1;
457
458fin:
459 qstate->kevent_filter = EVFILT_WRITE;
460 qstate->kevent_watermark = sizeof(int);
461 qstate->process_func = on_write_response_write1;
462
463 TRACE_OUT(on_write_request_process);
464 return (0);
465}
466
467static int
468on_negative_write_request_process(struct query_state *qstate)
469{
470 struct cache_write_request *write_request;
471 struct cache_write_response *write_response;
472 cache_entry c_entry;
473
474 TRACE_IN(on_negative_write_request_process);
475 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
476 write_response = get_cache_write_response(&qstate->response);
477 write_request = get_cache_write_request(&qstate->request);
478
479 qstate->config_entry = configuration_find_entry (
480 s_configuration, write_request->entry);
481
482 if (qstate->config_entry == NULL) {
483 write_response->error_code = ENOENT;
484
485 LOG_ERR_2("negative_write_request",
486 "can't find configuration"
487 " entry '%s'. aborting request", write_request->entry);
488 goto fin;
489 }
490
491 if (qstate->config_entry->enabled == 0) {
492 write_response->error_code = EACCES;
493
494 LOG_ERR_2("negative_write_request",
495 "configuration entry '%s' is disabled",
496 write_request->entry);
497 goto fin;
498 }
499
500 if (qstate->config_entry->perform_actual_lookups != 0) {
501 write_response->error_code = EOPNOTSUPP;
502
503 LOG_ERR_2("negative_write_request",
504 "entry '%s' performs lookups by itself: "
505 "can't write to it", write_request->entry);
506 goto fin;
507 } else {
508#ifdef NS_NSCD_EID_CHECKING
509 if (check_query_eids(qstate) != 0) {
510 write_response->error_code = EPERM;
511 goto fin;
512 }
513#endif
514 }
515
516 configuration_lock_rdlock(s_configuration);
517 c_entry = find_cache_entry(s_cache,
518 qstate->config_entry->negative_cache_params.entry_name);
519 configuration_unlock(s_configuration);
520 if (c_entry != NULL) {
521 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
522 qstate->config_entry->negative_cache_entry = c_entry;
523 write_response->error_code = cache_write(c_entry,
524 write_request->cache_key,
525 write_request->cache_key_size,
526 negative_data,
527 sizeof(negative_data));
528 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
529
530 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
531 (qstate->config_entry->common_query_timeout.tv_usec != 0))
532 memcpy(&qstate->timeout,
533 &qstate->config_entry->common_query_timeout,
534 sizeof(struct timeval));
535 } else
536 write_response->error_code = -1;
537
538fin:
539 qstate->kevent_filter = EVFILT_WRITE;
540 qstate->kevent_watermark = sizeof(int);
541 qstate->process_func = on_write_response_write1;
542
543 TRACE_OUT(on_negative_write_request_process);
544 return (0);
545}
546
547static int
548on_write_response_write1(struct query_state *qstate)
549{
550 struct cache_write_response *write_response;
551 ssize_t result;
552
553 TRACE_IN(on_write_response_write1);
554 write_response = get_cache_write_response(&qstate->response);
555 result = qstate->write_func(qstate, &write_response->error_code,
556 sizeof(int));
557 if (result != sizeof(int)) {
558 TRACE_OUT(on_write_response_write1);
559 return (-1);
560 }
561
562 finalize_comm_element(&qstate->request);
563 finalize_comm_element(&qstate->response);
564
565 qstate->kevent_watermark = sizeof(int);
566 qstate->kevent_filter = EVFILT_READ;
567 qstate->process_func = on_rw_mapper;
568
569 TRACE_OUT(on_write_response_write1);
570 return (0);
571}
572
573/*
574 * The functions below are used to process read requests.
575 * - on_read_request_read1 and on_read_request_read2 read the request itself
576 * - on_read_request_process processes it
577 * - on_read_response_write1 and on_read_response_write2 send the response
578 */
579static int
580on_read_request_read1(struct query_state *qstate)
581{
582 struct cache_read_request *read_request;
583 ssize_t result;
584
585 TRACE_IN(on_read_request_read1);
586 if (qstate->kevent_watermark == 0)
587 qstate->kevent_watermark = sizeof(size_t) * 2;
588 else {
589 init_comm_element(&qstate->request, CET_READ_REQUEST);
590 read_request = get_cache_read_request(&qstate->request);
591
592 result = qstate->read_func(qstate,
593 &read_request->entry_length, sizeof(size_t));
594 result += qstate->read_func(qstate,
595 &read_request->cache_key_size, sizeof(size_t));
596
597 if (result != sizeof(size_t) * 2) {
598 TRACE_OUT(on_read_request_read1);
599 return (-1);
600 }
601
602 if (BUFSIZE_INVALID(read_request->entry_length) ||
603 BUFSIZE_INVALID(read_request->cache_key_size)) {
604 TRACE_OUT(on_read_request_read1);
605 return (-1);
606 }
607
608 read_request->entry = (char *)calloc(1,
609 read_request->entry_length + 1);
610 assert(read_request->entry != NULL);
611
612 read_request->cache_key = (char *)calloc(1,
613 read_request->cache_key_size +
614 qstate->eid_str_length);
615 assert(read_request->cache_key != NULL);
616 memcpy(read_request->cache_key, qstate->eid_str,
617 qstate->eid_str_length);
618
619 qstate->kevent_watermark = read_request->entry_length +
620 read_request->cache_key_size;
621 qstate->process_func = on_read_request_read2;
622 }
623
624 TRACE_OUT(on_read_request_read1);
625 return (0);
626}
627
628static int
629on_read_request_read2(struct query_state *qstate)
630{
631 struct cache_read_request *read_request;
632 ssize_t result;
633
634 TRACE_IN(on_read_request_read2);
635 read_request = get_cache_read_request(&qstate->request);
636
637 result = qstate->read_func(qstate, read_request->entry,
638 read_request->entry_length);
639 result += qstate->read_func(qstate,
640 read_request->cache_key + qstate->eid_str_length,
641 read_request->cache_key_size);
642
643 if (result != qstate->kevent_watermark) {
644 TRACE_OUT(on_read_request_read2);
645 return (-1);
646 }
647 read_request->cache_key_size += qstate->eid_str_length;
648
649 qstate->kevent_watermark = 0;
650 qstate->process_func = on_read_request_process;
651
652 TRACE_OUT(on_read_request_read2);
653 return (0);
654}
655
656static int
657on_read_request_process(struct query_state *qstate)
658{
659 struct cache_read_request *read_request;
660 struct cache_read_response *read_response;
661 cache_entry c_entry, neg_c_entry;
662
663 struct agent *lookup_agent;
664 struct common_agent *c_agent;
665 int res;
666
667 TRACE_IN(on_read_request_process);
668 init_comm_element(&qstate->response, CET_READ_RESPONSE);
669 read_response = get_cache_read_response(&qstate->response);
670 read_request = get_cache_read_request(&qstate->request);
671
672 qstate->config_entry = configuration_find_entry(
673 s_configuration, read_request->entry);
674 if (qstate->config_entry == NULL) {
675 read_response->error_code = ENOENT;
676
677 LOG_ERR_2("read_request",
678 "can't find configuration "
679 "entry '%s'. aborting request", read_request->entry);
680 goto fin;
681 }
682
683 if (qstate->config_entry->enabled == 0) {
684 read_response->error_code = EACCES;
685
686 LOG_ERR_2("read_request",
687 "configuration entry '%s' is disabled",
688 read_request->entry);
689 goto fin;
690 }
691
692 /*
693 * if we perform lookups by ourselves, then we don't need to separate
694 * cache entries by euid and egid
695 */
696 if (qstate->config_entry->perform_actual_lookups != 0)
697 memset(read_request->cache_key, 0, qstate->eid_str_length);
698 else {
699#ifdef NS_NSCD_EID_CHECKING
700 if (check_query_eids(qstate) != 0) {
701 /* if the lookup is not self-performing, we check for clients euid/egid */
702 read_response->error_code = EPERM;
703 goto fin;
704 }
705#endif
706 }
707
708 configuration_lock_rdlock(s_configuration);
709 c_entry = find_cache_entry(s_cache,
710 qstate->config_entry->positive_cache_params.entry_name);
711 neg_c_entry = find_cache_entry(s_cache,
712 qstate->config_entry->negative_cache_params.entry_name);
713 configuration_unlock(s_configuration);
714 if ((c_entry != NULL) && (neg_c_entry != NULL)) {
715 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
716 qstate->config_entry->positive_cache_entry = c_entry;
717 read_response->error_code = cache_read(c_entry,
718 read_request->cache_key,
719 read_request->cache_key_size, NULL,
720 &read_response->data_size);
721
722 if (read_response->error_code == -2) {
723 read_response->data = (char *)malloc(
724 read_response->data_size);
725 assert(read_response != NULL);
726 read_response->error_code = cache_read(c_entry,
727 read_request->cache_key,
728 read_request->cache_key_size,
729 read_response->data,
730 &read_response->data_size);
731 }
732 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
733
734 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
735 qstate->config_entry->negative_cache_entry = neg_c_entry;
736 if (read_response->error_code == -1) {
737 read_response->error_code = cache_read(neg_c_entry,
738 read_request->cache_key,
739 read_request->cache_key_size, NULL,
740 &read_response->data_size);
741
742 if (read_response->error_code == -2) {
743 read_response->error_code = 0;
744 read_response->data = NULL;
745 read_response->data_size = 0;
746 }
747 }
748 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
749
750 if ((read_response->error_code == -1) &&
751 (qstate->config_entry->perform_actual_lookups != 0)) {
752 free(read_response->data);
753 read_response->data = NULL;
754 read_response->data_size = 0;
755
756 lookup_agent = find_agent(s_agent_table,
757 read_request->entry, COMMON_AGENT);
758
759 if ((lookup_agent != NULL) &&
760 (lookup_agent->type == COMMON_AGENT)) {
761 c_agent = (struct common_agent *)lookup_agent;
762 res = c_agent->lookup_func(
763 read_request->cache_key +
764 qstate->eid_str_length,
765 read_request->cache_key_size -
766 qstate->eid_str_length,
767 &read_response->data,
768 &read_response->data_size);
769
770 if (res == NS_SUCCESS) {
771 read_response->error_code = 0;
772 configuration_lock_entry(
773 qstate->config_entry,
774 CELT_POSITIVE);
775 cache_write(c_entry,
776 read_request->cache_key,
777 read_request->cache_key_size,
778 read_response->data,
779 read_response->data_size);
780 configuration_unlock_entry(
781 qstate->config_entry,
782 CELT_POSITIVE);
783 } else if ((res == NS_NOTFOUND) ||
784 (res == NS_RETURN)) {
785 configuration_lock_entry(
786 qstate->config_entry,
787 CELT_NEGATIVE);
788 cache_write(neg_c_entry,
789 read_request->cache_key,
790 read_request->cache_key_size,
791 negative_data,
792 sizeof(negative_data));
793 configuration_unlock_entry(
794 qstate->config_entry,
795 CELT_NEGATIVE);
796
797 read_response->error_code = 0;
798 read_response->data = NULL;
799 read_response->data_size = 0;
800 }
801 }
802 }
803
804 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
805 (qstate->config_entry->common_query_timeout.tv_usec != 0))
806 memcpy(&qstate->timeout,
807 &qstate->config_entry->common_query_timeout,
808 sizeof(struct timeval));
809 } else
810 read_response->error_code = -1;
811
812fin:
813 qstate->kevent_filter = EVFILT_WRITE;
814 if (read_response->error_code == 0)
815 qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
816 else
817 qstate->kevent_watermark = sizeof(int);
818 qstate->process_func = on_read_response_write1;
819
820 TRACE_OUT(on_read_request_process);
821 return (0);
822}
823
824static int
825on_read_response_write1(struct query_state *qstate)
826{
827 struct cache_read_response *read_response;
828 ssize_t result;
829
830 TRACE_IN(on_read_response_write1);
831 read_response = get_cache_read_response(&qstate->response);
832
833 result = qstate->write_func(qstate, &read_response->error_code,
834 sizeof(int));
835
836 if (read_response->error_code == 0) {
837 result += qstate->write_func(qstate, &read_response->data_size,
838 sizeof(size_t));
839 if (result != qstate->kevent_watermark) {
840 TRACE_OUT(on_read_response_write1);
841 return (-1);
842 }
843
844 qstate->kevent_watermark = read_response->data_size;
845 qstate->process_func = on_read_response_write2;
846 } else {
847 if (result != qstate->kevent_watermark) {
848 TRACE_OUT(on_read_response_write1);
849 return (-1);
850 }
851
852 qstate->kevent_watermark = 0;
853 qstate->process_func = NULL;
854 }
855
856 TRACE_OUT(on_read_response_write1);
857 return (0);
858}
859
860static int
861on_read_response_write2(struct query_state *qstate)
862{
863 struct cache_read_response *read_response;
864 ssize_t result;
865
866 TRACE_IN(on_read_response_write2);
867 read_response = get_cache_read_response(&qstate->response);
868 if (read_response->data_size > 0) {
869 result = qstate->write_func(qstate, read_response->data,
870 read_response->data_size);
871 if (result != qstate->kevent_watermark) {
872 TRACE_OUT(on_read_response_write2);
873 return (-1);
874 }
875 }
876
877 finalize_comm_element(&qstate->request);
878 finalize_comm_element(&qstate->response);
879
880 qstate->kevent_watermark = sizeof(int);
881 qstate->kevent_filter = EVFILT_READ;
882 qstate->process_func = on_rw_mapper;
883 TRACE_OUT(on_read_response_write2);
884 return (0);
885}
886
887/*
888 * The functions below are used to process write requests.
889 * - on_transform_request_read1 and on_transform_request_read2 read the
890 * request itself
891 * - on_transform_request_process processes it
892 * - on_transform_response_write1 sends the response
893 */
894static int
895on_transform_request_read1(struct query_state *qstate)
896{
897 struct cache_transform_request *transform_request;
898 ssize_t result;
899
900 TRACE_IN(on_transform_request_read1);
901 if (qstate->kevent_watermark == 0)
902 qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
903 else {
904 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
905 transform_request =
906 get_cache_transform_request(&qstate->request);
907
908 result = qstate->read_func(qstate,
909 &transform_request->entry_length, sizeof(size_t));
910 result += qstate->read_func(qstate,
911 &transform_request->transformation_type, sizeof(int));
912
913 if (result != sizeof(size_t) + sizeof(int)) {
914 TRACE_OUT(on_transform_request_read1);
915 return (-1);
916 }
917
918 if ((transform_request->transformation_type != TT_USER) &&
919 (transform_request->transformation_type != TT_ALL)) {
920 TRACE_OUT(on_transform_request_read1);
921 return (-1);
922 }
923
924 if (transform_request->entry_length != 0) {
925 if (BUFSIZE_INVALID(transform_request->entry_length)) {
926 TRACE_OUT(on_transform_request_read1);
927 return (-1);
928 }
929
930 transform_request->entry = (char *)calloc(1,
931 transform_request->entry_length + 1);
932 assert(transform_request->entry != NULL);
933
934 qstate->process_func = on_transform_request_read2;
935 } else
936 qstate->process_func = on_transform_request_process;
937
938 qstate->kevent_watermark = transform_request->entry_length;
939 }
940
941 TRACE_OUT(on_transform_request_read1);
942 return (0);
943}
944
945static int
946on_transform_request_read2(struct query_state *qstate)
947{
948 struct cache_transform_request *transform_request;
949 ssize_t result;
950
951 TRACE_IN(on_transform_request_read2);
952 transform_request = get_cache_transform_request(&qstate->request);
953
954 result = qstate->read_func(qstate, transform_request->entry,
955 transform_request->entry_length);
956
957 if (result != qstate->kevent_watermark) {
958 TRACE_OUT(on_transform_request_read2);
959 return (-1);
960 }
961
962 qstate->kevent_watermark = 0;
963 qstate->process_func = on_transform_request_process;
964
965 TRACE_OUT(on_transform_request_read2);
966 return (0);
967}
968
969static int
970on_transform_request_process(struct query_state *qstate)
971{
972 struct cache_transform_request *transform_request;
973 struct cache_transform_response *transform_response;
974 struct configuration_entry *config_entry;
975 size_t i, size;
976
977 TRACE_IN(on_transform_request_process);
978 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
979 transform_response = get_cache_transform_response(&qstate->response);
980 transform_request = get_cache_transform_request(&qstate->request);
981
982 switch (transform_request->transformation_type) {
983 case TT_USER:
984 if (transform_request->entry == NULL) {
985 size = configuration_get_entries_size(s_configuration);
986 for (i = 0; i < size; ++i) {
987 config_entry = configuration_get_entry(
988 s_configuration, i);
989
990 if (config_entry->perform_actual_lookups == 0)
991 clear_config_entry_part(config_entry,
992 qstate->eid_str, qstate->eid_str_length);
993 }
994 } else {
995 qstate->config_entry = configuration_find_entry(
996 s_configuration, transform_request->entry);
997
998 if (qstate->config_entry == NULL) {
999 LOG_ERR_2("transform_request",
1000 "can't find configuration"
1001 " entry '%s'. aborting request",
1002 transform_request->entry);
1003 transform_response->error_code = -1;
1004 goto fin;
1005 }
1006
1007 if (qstate->config_entry->perform_actual_lookups != 0) {
1008 LOG_ERR_2("transform_request",
1009 "can't transform the cache entry %s"
1010 ", because it ised for actual lookups",
1011 transform_request->entry);
1012 transform_response->error_code = -1;
1013 goto fin;
1014 }
1015
1016 clear_config_entry_part(qstate->config_entry,
1017 qstate->eid_str, qstate->eid_str_length);
1018 }
1019 break;
1020 case TT_ALL:
1021 if (qstate->euid != 0)
1022 transform_response->error_code = -1;
1023 else {
1024 if (transform_request->entry == NULL) {
1025 size = configuration_get_entries_size(
1026 s_configuration);
1027 for (i = 0; i < size; ++i) {
1028 clear_config_entry(
1029 configuration_get_entry(
1030 s_configuration, i));
1031 }
1032 } else {
1033 qstate->config_entry = configuration_find_entry(
1034 s_configuration,
1035 transform_request->entry);
1036
1037 if (qstate->config_entry == NULL) {
1038 LOG_ERR_2("transform_request",
1039 "can't find configuration"
1040 " entry '%s'. aborting request",
1041 transform_request->entry);
1042 transform_response->error_code = -1;
1043 goto fin;
1044 }
1045
1046 clear_config_entry(qstate->config_entry);
1047 }
1048 }
1049 break;
1050 default:
1051 transform_response->error_code = -1;
1052 }
1053
1054fin:
1055 qstate->kevent_watermark = 0;
1056 qstate->process_func = on_transform_response_write1;
1057 TRACE_OUT(on_transform_request_process);
1058 return (0);
1059}
1060
1061static int
1062on_transform_response_write1(struct query_state *qstate)
1063{
1064 struct cache_transform_response *transform_response;
1065 ssize_t result;
1066
1067 TRACE_IN(on_transform_response_write1);
1068 transform_response = get_cache_transform_response(&qstate->response);
1069 result = qstate->write_func(qstate, &transform_response->error_code,
1070 sizeof(int));
1071 if (result != sizeof(int)) {
1072 TRACE_OUT(on_transform_response_write1);
1073 return (-1);
1074 }
1075
1076 finalize_comm_element(&qstate->request);
1077 finalize_comm_element(&qstate->response);
1078
1079 qstate->kevent_watermark = 0;
1080 qstate->process_func = NULL;
1081 TRACE_OUT(on_transform_response_write1);
1082 return (0);
1083}
1084
1085/*
1086 * Checks if the client's euid and egid do not differ from its uid and gid.
1087 * Returns 0 on success.
1088 */
1089int
1090check_query_eids(struct query_state *qstate)
1091{
1092
1093 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1094}
1095
1096/*
1097 * Uses the qstate fields to process an "alternate" read - when the buffer is
1098 * too large to be received during one socket read operation
1099 */
1100ssize_t
1101query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1102{
1103 ssize_t result;
1104
1105 TRACE_IN(query_io_buffer_read);
1106 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1107 return (-1);
1108
1109 if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1110 qstate->io_buffer_p)
1111 result = nbytes;
1112 else
1113 result = qstate->io_buffer + qstate->io_buffer_size -
1114 qstate->io_buffer_p;
1115
1116 memcpy(buf, qstate->io_buffer_p, result);
1117 qstate->io_buffer_p += result;
1118
1119 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1120 free(qstate->io_buffer);
1121 qstate->io_buffer = NULL;
1122
1123 qstate->write_func = query_socket_write;
1124 qstate->read_func = query_socket_read;
1125 }
1126
1127 TRACE_OUT(query_io_buffer_read);
1128 return (result);
1129}
1130
1131/*
1132 * Uses the qstate fields to process an "alternate" write - when the buffer is
1133 * too large to be sent during one socket write operation
1134 */
1135ssize_t
1136query_io_buffer_write(struct query_state *qstate, const void *buf,
1137 size_t nbytes)
1138{
1139 ssize_t result;
1140
1141 TRACE_IN(query_io_buffer_write);
1142 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1143 return (-1);
1144
1145 if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1146 qstate->io_buffer_p)
1147 result = nbytes;
1148 else
1149 result = qstate->io_buffer + qstate->io_buffer_size -
1150 qstate->io_buffer_p;
1151
1152 memcpy(qstate->io_buffer_p, buf, result);
1153 qstate->io_buffer_p += result;
1154
1155 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1156 qstate->use_alternate_io = 1;
1157 qstate->io_buffer_p = qstate->io_buffer;
1158
1159 qstate->write_func = query_socket_write;
1160 qstate->read_func = query_socket_read;
1161 }
1162
1163 TRACE_OUT(query_io_buffer_write);
1164 return (result);
1165}
1166
1167/*
1168 * The default "read" function, which reads data directly from socket
1169 */
1170ssize_t
1171query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1172{
1173 ssize_t result;
1174
1175 TRACE_IN(query_socket_read);
1176 if (qstate->socket_failed != 0) {
1177 TRACE_OUT(query_socket_read);
1178 return (-1);
1179 }
1180
1181 result = read(qstate->sockfd, buf, nbytes);
1182 if ((result == -1) || (result < nbytes))
1183 qstate->socket_failed = 1;
1184
1185 TRACE_OUT(query_socket_read);
1186 return (result);
1187}
1188
1189/*
1190 * The default "write" function, which writes data directly to socket
1191 */
1192ssize_t
1193query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1194{
1195 ssize_t result;
1196
1197 TRACE_IN(query_socket_write);
1198 if (qstate->socket_failed != 0) {
1199 TRACE_OUT(query_socket_write);
1200 return (-1);
1201 }
1202
1203 result = write(qstate->sockfd, buf, nbytes);
1204 if ((result == -1) || (result < nbytes))
1205 qstate->socket_failed = 1;
1206
1207 TRACE_OUT(query_socket_write);
1208 return (result);
1209}
1210
1211/*
1212 * Initializes the query_state structure by filling it with the default values.
1213 */
1214struct query_state *
1215init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1216{
1217 struct query_state *retval;
1218
1219 TRACE_IN(init_query_state);
1220 retval = (struct query_state *)calloc(1, sizeof(struct query_state));
1221 assert(retval != NULL);
1222
1223 retval->sockfd = sockfd;
1224 retval->kevent_filter = EVFILT_READ;
1225 retval->kevent_watermark = kevent_watermark;
1226
1227 retval->euid = euid;
1228 retval->egid = egid;
1229 retval->uid = retval->gid = -1;
1230
1231 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1232 retval->egid) == -1) {
1233 free(retval);
1234 return (NULL);
1235 }
1236 retval->eid_str_length = strlen(retval->eid_str);
1237
1238 init_comm_element(&retval->request, CET_UNDEFINED);
1239 init_comm_element(&retval->response, CET_UNDEFINED);
1240 retval->process_func = on_query_startup;
1241 retval->destroy_func = on_query_destroy;
1242
1243 retval->write_func = query_socket_write;
1244 retval->read_func = query_socket_read;
1245
1246 get_time_func(&retval->creation_time);
1247 memcpy(&retval->timeout, &s_configuration->query_timeout,
1248 sizeof(struct timeval));
1249
1250 TRACE_OUT(init_query_state);
1251 return (retval);
1252}
1253
1254void
1255destroy_query_state(struct query_state *qstate)
1256{
1257
1258 TRACE_IN(destroy_query_state);
1259 if (qstate->eid_str != NULL)
1260 free(qstate->eid_str);
1261
1262 if (qstate->io_buffer != NULL)
1263 free(qstate->io_buffer);
1264
1265 qstate->destroy_func(qstate);
1266 free(qstate);
1267 TRACE_OUT(destroy_query_state);
1268}