1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/usr.sbin/nscd/cachelib.c 315600 2017-03-20 00:55:24Z pfg $");
30
31#include <sys/time.h>
32
33#include <assert.h>
34#include <stdlib.h>
35#include <string.h>
36
37#include "cachelib.h"
38#include "debug.h"
39
40#define INITIAL_ENTRIES_CAPACITY 32
41#define ENTRIES_CAPACITY_STEP 32
42
43#define STRING_SIMPLE_HASH_BODY(in_var, var, a, M)		\
44	for ((var) = 0; *(in_var) != '\0'; ++(in_var))		\
45		(var) = ((a)*(var) + *(in_var)) % (M)
46
47#define STRING_SIMPLE_MP2_HASH_BODY(in_var, var, a, M)		\
48	for ((var) = 0; *(in_var) != 0; ++(in_var))		\
49		(var) = ((a)*(var) + *(in_var)) & (M - 1)
50
51static int cache_elemsize_common_continue_func(struct cache_common_entry_ *,
52	struct cache_policy_item_ *);
53static int cache_lifetime_common_continue_func(struct cache_common_entry_ *,
54	struct cache_policy_item_ *);
55static void clear_cache_entry(struct cache_entry_ *);
56static void destroy_cache_entry(struct cache_entry_ *);
57static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *);
58static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *);
59static int entries_bsearch_cmp_func(const void *, const void *);
60static int entries_qsort_cmp_func(const void *, const void *);
61static struct cache_entry_ ** find_cache_entry_p(struct cache_ *,
62	const char *);
63static void flush_cache_entry(struct cache_entry_ *);
64static void flush_cache_policy(struct cache_common_entry_ *,
65	struct cache_policy_ *, struct cache_policy_ *,
66		int (*)(struct cache_common_entry_ *,
67		struct cache_policy_item_ *));
68static int ht_items_cmp_func(const void *, const void *);
69static int ht_items_fixed_size_left_cmp_func(const void *, const void *);
70static hashtable_index_t ht_item_hash_func(const void *, size_t);
71
72/*
73 * Hashing and comparing routines, that are used with the hash tables
74 */
75static int
76ht_items_cmp_func(const void *p1, const void *p2)
77{
78    	struct cache_ht_item_data_ *hp1, *hp2;
79	size_t min_size;
80	int result;
81
82	hp1 = (struct cache_ht_item_data_ *)p1;
83	hp2 = (struct cache_ht_item_data_ *)p2;
84
85	assert(hp1->key != NULL);
86	assert(hp2->key != NULL);
87
88	if (hp1->key_size != hp2->key_size) {
89		min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size :
90			hp2->key_size;
91		result = memcmp(hp1->key, hp2->key, min_size);
92
93		if (result == 0)
94			return ((hp1->key_size < hp2->key_size) ? -1 : 1);
95		else
96			return (result);
97	} else
98		return (memcmp(hp1->key, hp2->key, hp1->key_size));
99}
100
101static int
102ht_items_fixed_size_left_cmp_func(const void *p1, const void *p2)
103{
104    	struct cache_ht_item_data_ *hp1, *hp2;
105	size_t min_size;
106	int result;
107
108	hp1 = (struct cache_ht_item_data_ *)p1;
109	hp2 = (struct cache_ht_item_data_ *)p2;
110
111	assert(hp1->key != NULL);
112	assert(hp2->key != NULL);
113
114	if (hp1->key_size != hp2->key_size) {
115		min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size :
116			hp2->key_size;
117		result = memcmp(hp1->key, hp2->key, min_size);
118
119		if (result == 0)
120			if (min_size == hp1->key_size)
121			    return (0);
122			else
123			    return ((hp1->key_size < hp2->key_size) ? -1 : 1);
124		else
125			return (result);
126	} else
127		return (memcmp(hp1->key, hp2->key, hp1->key_size));
128}
129
130static hashtable_index_t
131ht_item_hash_func(const void *p, size_t cache_entries_size)
132{
133    	struct cache_ht_item_data_ *hp;
134	size_t i;
135
136	hashtable_index_t retval;
137
138	hp = (struct cache_ht_item_data_ *)p;
139	assert(hp->key != NULL);
140
141	retval = 0;
142	for (i = 0; i < hp->key_size; ++i)
143	    retval = (127 * retval + (unsigned char)hp->key[i]) %
144		cache_entries_size;
145
146	return retval;
147}
148
149HASHTABLE_PROTOTYPE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_);
150HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data,
151	ht_item_hash_func, ht_items_cmp_func);
152
153/*
154 * Routines to sort and search the entries by name
155 */
156static int
157entries_bsearch_cmp_func(const void *key, const void *ent)
158{
159
160	assert(key != NULL);
161	assert(ent != NULL);
162
163	return (strcmp((char const *)key,
164		(*(struct cache_entry_ const **)ent)->name));
165}
166
167static int
168entries_qsort_cmp_func(const void *e1, const void *e2)
169{
170
171	assert(e1 != NULL);
172	assert(e2 != NULL);
173
174	return (strcmp((*(struct cache_entry_ const **)e1)->name,
175		(*(struct cache_entry_ const **)e2)->name));
176}
177
178static struct cache_entry_ **
179find_cache_entry_p(struct cache_ *the_cache, const char *entry_name)
180{
181
182	return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries,
183		the_cache->entries_size, sizeof(struct cache_entry_ *),
184		entries_bsearch_cmp_func)));
185}
186
187static void
188destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws)
189{
190
191	struct cache_mp_data_item_	*data_item;
192
193	TRACE_IN(destroy_cache_mp_write_session);
194	assert(ws != NULL);
195	while (!TAILQ_EMPTY(&ws->items)) {
196		data_item = TAILQ_FIRST(&ws->items);
197		TAILQ_REMOVE(&ws->items, data_item, entries);
198		free(data_item->value);
199		free(data_item);
200	}
201
202	free(ws);
203	TRACE_OUT(destroy_cache_mp_write_session);
204}
205
206static void
207destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs)
208{
209
210	TRACE_IN(destroy_cache_mp_read_session);
211	assert(rs != NULL);
212	free(rs);
213	TRACE_OUT(destroy_cache_mp_read_session);
214}
215
216static void
217destroy_cache_entry(struct cache_entry_ *entry)
218{
219	struct cache_common_entry_	*common_entry;
220	struct cache_mp_entry_		*mp_entry;
221	struct cache_mp_read_session_	*rs;
222	struct cache_mp_write_session_	*ws;
223	struct cache_ht_item_ *ht_item;
224	struct cache_ht_item_data_ *ht_item_data;
225
226	TRACE_IN(destroy_cache_entry);
227	assert(entry != NULL);
228
229	if (entry->params->entry_type == CET_COMMON) {
230		common_entry = (struct cache_common_entry_ *)entry;
231
232		HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
233			HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data)
234			{
235				free(ht_item_data->key);
236				free(ht_item_data->value);
237			}
238			HASHTABLE_ENTRY_CLEAR(ht_item, data);
239		}
240
241		HASHTABLE_DESTROY(&(common_entry->items), data);
242
243		/* FIFO policy is always first */
244		destroy_cache_fifo_policy(common_entry->policies[0]);
245		switch (common_entry->common_params.policy) {
246		case CPT_LRU:
247			destroy_cache_lru_policy(common_entry->policies[1]);
248			break;
249		case CPT_LFU:
250			destroy_cache_lfu_policy(common_entry->policies[1]);
251			break;
252		default:
253		break;
254		}
255		free(common_entry->policies);
256	} else {
257		mp_entry = (struct cache_mp_entry_ *)entry;
258
259		while (!TAILQ_EMPTY(&mp_entry->ws_head)) {
260			ws = TAILQ_FIRST(&mp_entry->ws_head);
261			TAILQ_REMOVE(&mp_entry->ws_head, ws, entries);
262			destroy_cache_mp_write_session(ws);
263		}
264
265		while (!TAILQ_EMPTY(&mp_entry->rs_head)) {
266			rs = TAILQ_FIRST(&mp_entry->rs_head);
267			TAILQ_REMOVE(&mp_entry->rs_head, rs, entries);
268			destroy_cache_mp_read_session(rs);
269		}
270
271		if (mp_entry->completed_write_session != NULL)
272			destroy_cache_mp_write_session(
273				mp_entry->completed_write_session);
274
275		if (mp_entry->pending_write_session != NULL)
276			destroy_cache_mp_write_session(
277				mp_entry->pending_write_session);
278	}
279
280	free(entry->name);
281	free(entry);
282	TRACE_OUT(destroy_cache_entry);
283}
284
285static void
286clear_cache_entry(struct cache_entry_ *entry)
287{
288	struct cache_mp_entry_		*mp_entry;
289	struct cache_common_entry_	*common_entry;
290	struct cache_ht_item_ *ht_item;
291	struct cache_ht_item_data_ *ht_item_data;
292	struct cache_policy_ *policy;
293	struct cache_policy_item_ *item, *next_item;
294	size_t entry_size;
295	unsigned int i;
296
297	if (entry->params->entry_type == CET_COMMON) {
298		common_entry = (struct cache_common_entry_ *)entry;
299
300		entry_size = 0;
301		HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
302			HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data)
303			{
304				free(ht_item_data->key);
305				free(ht_item_data->value);
306			}
307			entry_size += HASHTABLE_ENTRY_SIZE(ht_item, data);
308			HASHTABLE_ENTRY_CLEAR(ht_item, data);
309		}
310
311		common_entry->items_size -= entry_size;
312		for (i = 0; i < common_entry->policies_size; ++i) {
313			policy = common_entry->policies[i];
314
315			next_item = NULL;
316			item = policy->get_first_item_func(policy);
317			while (item != NULL) {
318				next_item = policy->get_next_item_func(policy,
319			    		item);
320				policy->remove_item_func(policy, item);
321				policy->destroy_item_func(item);
322				item = next_item;
323			}
324		}
325	} else {
326		mp_entry = (struct cache_mp_entry_ *)entry;
327
328		if (mp_entry->rs_size == 0) {
329			if (mp_entry->completed_write_session != NULL) {
330				destroy_cache_mp_write_session(
331					mp_entry->completed_write_session);
332				mp_entry->completed_write_session = NULL;
333			}
334
335			memset(&mp_entry->creation_time, 0,
336				sizeof(struct timeval));
337			memset(&mp_entry->last_request_time, 0,
338				sizeof(struct timeval));
339		}
340	}
341}
342
343/*
344 * When passed to the flush_cache_policy, ensures that all old elements are
345 * deleted.
346 */
347static int
348cache_lifetime_common_continue_func(struct cache_common_entry_ *entry,
349	struct cache_policy_item_ *item)
350{
351
352	return ((item->last_request_time.tv_sec - item->creation_time.tv_sec >
353		entry->common_params.max_lifetime.tv_sec) ? 1: 0);
354}
355
356/*
357 * When passed to the flush_cache_policy, ensures that all elements, that
358 * exceed the size limit, are deleted.
359 */
360static int
361cache_elemsize_common_continue_func(struct cache_common_entry_ *entry,
362	struct cache_policy_item_ *item)
363{
364
365	return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1
366    		: 0);
367}
368
369/*
370 * Removes the elements from the cache entry, while the continue_func returns 1.
371 */
372static void
373flush_cache_policy(struct cache_common_entry_ *entry,
374	struct cache_policy_ *policy,
375	struct cache_policy_ *connected_policy,
376	int (*continue_func)(struct cache_common_entry_ *,
377		struct cache_policy_item_ *))
378{
379	struct cache_policy_item_ *item, *next_item, *connected_item;
380	struct cache_ht_item_ *ht_item;
381	struct cache_ht_item_data_ *ht_item_data, ht_key;
382	hashtable_index_t hash;
383
384	assert(policy != NULL);
385
386	next_item = NULL;
387	item = policy->get_first_item_func(policy);
388	while ((item != NULL) && (continue_func(entry, item) == 1)) {
389		next_item = policy->get_next_item_func(policy, item);
390
391		connected_item = item->connected_item;
392		policy->remove_item_func(policy, item);
393
394		memset(&ht_key, 0, sizeof(struct cache_ht_item_data_));
395		ht_key.key = item->key;
396		ht_key.key_size = item->key_size;
397
398		hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &entry->items,
399			&ht_key);
400		assert(hash < HASHTABLE_ENTRIES_COUNT(&entry->items));
401
402		ht_item = HASHTABLE_GET_ENTRY(&(entry->items), hash);
403		ht_item_data = HASHTABLE_ENTRY_FIND(cache_ht_, ht_item,
404			&ht_key);
405		assert(ht_item_data != NULL);
406		free(ht_item_data->key);
407		free(ht_item_data->value);
408		HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data);
409		--entry->items_size;
410
411		policy->destroy_item_func(item);
412
413		if (connected_item != NULL) {
414			connected_policy->remove_item_func(connected_policy,
415				connected_item);
416			connected_policy->destroy_item_func(connected_item);
417		}
418
419		item = next_item;
420	}
421}
422
423static void
424flush_cache_entry(struct cache_entry_ *entry)
425{
426	struct cache_mp_entry_		*mp_entry;
427	struct cache_common_entry_	*common_entry;
428	struct cache_policy_ *policy, *connected_policy;
429
430	connected_policy = NULL;
431	if (entry->params->entry_type == CET_COMMON) {
432		common_entry = (struct cache_common_entry_ *)entry;
433		if ((common_entry->common_params.max_lifetime.tv_sec != 0) ||
434		    (common_entry->common_params.max_lifetime.tv_usec != 0)) {
435
436			policy = common_entry->policies[0];
437			if (common_entry->policies_size > 1)
438				connected_policy = common_entry->policies[1];
439
440			flush_cache_policy(common_entry, policy,
441				connected_policy,
442				cache_lifetime_common_continue_func);
443		}
444
445
446		if ((common_entry->common_params.max_elemsize != 0) &&
447			common_entry->items_size >
448			common_entry->common_params.max_elemsize) {
449
450			if (common_entry->policies_size > 1) {
451				policy = common_entry->policies[1];
452				connected_policy = common_entry->policies[0];
453			} else {
454				policy = common_entry->policies[0];
455				connected_policy = NULL;
456			}
457
458			flush_cache_policy(common_entry, policy,
459				connected_policy,
460				cache_elemsize_common_continue_func);
461		}
462	} else {
463		mp_entry = (struct cache_mp_entry_ *)entry;
464
465		if ((mp_entry->mp_params.max_lifetime.tv_sec != 0)
466			|| (mp_entry->mp_params.max_lifetime.tv_usec != 0)) {
467
468			if (mp_entry->last_request_time.tv_sec -
469				mp_entry->last_request_time.tv_sec >
470				mp_entry->mp_params.max_lifetime.tv_sec)
471				clear_cache_entry(entry);
472		}
473	}
474}
475
476struct cache_ *
477init_cache(struct cache_params const *params)
478{
479	struct cache_ *retval;
480
481	TRACE_IN(init_cache);
482	assert(params != NULL);
483
484	retval = calloc(1, sizeof(*retval));
485	assert(retval != NULL);
486
487	assert(params != NULL);
488	memcpy(&retval->params, params, sizeof(struct cache_params));
489
490	retval->entries = calloc(INITIAL_ENTRIES_CAPACITY,
491		sizeof(*retval->entries));
492	assert(retval->entries != NULL);
493
494	retval->entries_capacity = INITIAL_ENTRIES_CAPACITY;
495	retval->entries_size = 0;
496
497	TRACE_OUT(init_cache);
498	return (retval);
499}
500
501void
502destroy_cache(struct cache_ *the_cache)
503{
504
505	TRACE_IN(destroy_cache);
506	assert(the_cache != NULL);
507
508	if (the_cache->entries != NULL) {
509		size_t i;
510		for (i = 0; i < the_cache->entries_size; ++i)
511			destroy_cache_entry(the_cache->entries[i]);
512
513		free(the_cache->entries);
514	}
515
516	free(the_cache);
517	TRACE_OUT(destroy_cache);
518}
519
520int
521register_cache_entry(struct cache_ *the_cache,
522	struct cache_entry_params const *params)
523{
524	int policies_size;
525	size_t entry_name_size;
526	struct cache_common_entry_	*new_common_entry;
527	struct cache_mp_entry_		*new_mp_entry;
528
529	TRACE_IN(register_cache_entry);
530	assert(the_cache != NULL);
531
532	if (find_cache_entry(the_cache, params->entry_name) != NULL) {
533		TRACE_OUT(register_cache_entry);
534		return (-1);
535	}
536
537	if (the_cache->entries_size == the_cache->entries_capacity) {
538		struct cache_entry_ **new_entries;
539		size_t	new_capacity;
540
541		new_capacity = the_cache->entries_capacity +
542			ENTRIES_CAPACITY_STEP;
543		new_entries = calloc(new_capacity,
544			sizeof(*new_entries));
545		assert(new_entries != NULL);
546
547		memcpy(new_entries, the_cache->entries,
548			sizeof(struct cache_entry_ *)
549			* the_cache->entries_size);
550
551		free(the_cache->entries);
552		the_cache->entries = new_entries;
553	}
554
555	entry_name_size = strlen(params->entry_name) + 1;
556	switch (params->entry_type)
557	{
558	case CET_COMMON:
559		new_common_entry = calloc(1,
560			sizeof(*new_common_entry));
561		assert(new_common_entry != NULL);
562
563		memcpy(&new_common_entry->common_params, params,
564			sizeof(struct common_cache_entry_params));
565		new_common_entry->params =
566		  (struct cache_entry_params *)&new_common_entry->common_params;
567
568		new_common_entry->common_params.cep.entry_name = calloc(1,
569			entry_name_size);
570		assert(new_common_entry->common_params.cep.entry_name != NULL);
571		strlcpy(new_common_entry->common_params.cep.entry_name,
572			params->entry_name, entry_name_size);
573		new_common_entry->name =
574			new_common_entry->common_params.cep.entry_name;
575
576		HASHTABLE_INIT(&(new_common_entry->items),
577			struct cache_ht_item_data_, data,
578			new_common_entry->common_params.cache_entries_size);
579
580		if (new_common_entry->common_params.policy == CPT_FIFO)
581			policies_size = 1;
582		else
583			policies_size = 2;
584
585		new_common_entry->policies = calloc(policies_size,
586			sizeof(*new_common_entry->policies));
587		assert(new_common_entry->policies != NULL);
588
589		new_common_entry->policies_size = policies_size;
590		new_common_entry->policies[0] = init_cache_fifo_policy();
591
592		if (policies_size > 1) {
593			switch (new_common_entry->common_params.policy) {
594			case CPT_LRU:
595				new_common_entry->policies[1] =
596					init_cache_lru_policy();
597			break;
598			case CPT_LFU:
599				new_common_entry->policies[1] =
600					init_cache_lfu_policy();
601			break;
602			default:
603			break;
604			}
605		}
606
607		new_common_entry->get_time_func =
608			the_cache->params.get_time_func;
609		the_cache->entries[the_cache->entries_size++] =
610			(struct cache_entry_ *)new_common_entry;
611		break;
612	case CET_MULTIPART:
613		new_mp_entry = calloc(1,
614			sizeof(*new_mp_entry));
615		assert(new_mp_entry != NULL);
616
617		memcpy(&new_mp_entry->mp_params, params,
618			sizeof(struct mp_cache_entry_params));
619		new_mp_entry->params =
620			(struct cache_entry_params *)&new_mp_entry->mp_params;
621
622		new_mp_entry->mp_params.cep.entry_name = calloc(1,
623			entry_name_size);
624		assert(new_mp_entry->mp_params.cep.entry_name != NULL);
625		strlcpy(new_mp_entry->mp_params.cep.entry_name, params->entry_name,
626			entry_name_size);
627		new_mp_entry->name = new_mp_entry->mp_params.cep.entry_name;
628
629		TAILQ_INIT(&new_mp_entry->ws_head);
630		TAILQ_INIT(&new_mp_entry->rs_head);
631
632		new_mp_entry->get_time_func = the_cache->params.get_time_func;
633		the_cache->entries[the_cache->entries_size++] =
634			(struct cache_entry_ *)new_mp_entry;
635		break;
636	}
637
638
639	qsort(the_cache->entries, the_cache->entries_size,
640		sizeof(struct cache_entry_ *), entries_qsort_cmp_func);
641
642	TRACE_OUT(register_cache_entry);
643	return (0);
644}
645
646int
647unregister_cache_entry(struct cache_ *the_cache, const char *entry_name)
648{
649	struct cache_entry_ **del_ent;
650
651	TRACE_IN(unregister_cache_entry);
652	assert(the_cache != NULL);
653
654	del_ent = find_cache_entry_p(the_cache, entry_name);
655	if (del_ent != NULL) {
656		destroy_cache_entry(*del_ent);
657		--the_cache->entries_size;
658
659		memmove(del_ent, del_ent + 1,
660			(&(the_cache->entries[--the_cache->entries_size]) -
661	    		del_ent) * sizeof(struct cache_entry_ *));
662
663		TRACE_OUT(unregister_cache_entry);
664		return (0);
665	} else {
666		TRACE_OUT(unregister_cache_entry);
667		return (-1);
668	}
669}
670
671struct cache_entry_ *
672find_cache_entry(struct cache_ *the_cache, const char *entry_name)
673{
674	struct cache_entry_ **result;
675
676	TRACE_IN(find_cache_entry);
677	result = find_cache_entry_p(the_cache, entry_name);
678
679	if (result == NULL) {
680		TRACE_OUT(find_cache_entry);
681		return (NULL);
682	} else {
683		TRACE_OUT(find_cache_entry);
684		return (*result);
685	}
686}
687
688/*
689 * Tries to read the element with the specified key from the cache. If the
690 * value_size is too small, it will be filled with the proper number, and
691 * the user will need to call cache_read again with the value buffer, that
692 * is large enough.
693 * Function returns 0 on success, -1 on error, and -2 if the value_size is too
694 * small.
695 */
696int
697cache_read(struct cache_entry_ *entry, const char *key, size_t key_size,
698	char *value, size_t *value_size)
699{
700	struct cache_common_entry_	*common_entry;
701	struct cache_ht_item_data_	item_data, *find_res;
702	struct cache_ht_item_		*item;
703	hashtable_index_t	hash;
704	struct cache_policy_item_ *connected_item;
705
706	TRACE_IN(cache_read);
707	assert(entry != NULL);
708	assert(key != NULL);
709	assert(value_size != NULL);
710	assert(entry->params->entry_type == CET_COMMON);
711
712	common_entry = (struct cache_common_entry_ *)entry;
713
714	memset(&item_data, 0, sizeof(struct cache_ht_item_data_));
715	/* can't avoid the cast here */
716	item_data.key = (char *)key;
717	item_data.key_size = key_size;
718
719	hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items,
720		&item_data);
721	assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items));
722
723	item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash);
724	find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data);
725	if (find_res == NULL) {
726		TRACE_OUT(cache_read);
727		return (-1);
728	}
729	/* pretend that entry was not found if confidence is below threshold*/
730	if (find_res->confidence <
731	    common_entry->common_params.confidence_threshold) {
732		TRACE_OUT(cache_read);
733		return (-1);
734	}
735
736	if ((common_entry->common_params.max_lifetime.tv_sec != 0) ||
737		(common_entry->common_params.max_lifetime.tv_usec != 0)) {
738
739		if (find_res->fifo_policy_item->last_request_time.tv_sec -
740			find_res->fifo_policy_item->creation_time.tv_sec >
741			common_entry->common_params.max_lifetime.tv_sec) {
742
743			free(find_res->key);
744			free(find_res->value);
745
746			connected_item =
747			    find_res->fifo_policy_item->connected_item;
748			if (connected_item != NULL) {
749				common_entry->policies[1]->remove_item_func(
750					common_entry->policies[1],
751			    		connected_item);
752				common_entry->policies[1]->destroy_item_func(
753					connected_item);
754			}
755
756			common_entry->policies[0]->remove_item_func(
757				common_entry->policies[0],
758					find_res->fifo_policy_item);
759			common_entry->policies[0]->destroy_item_func(
760				find_res->fifo_policy_item);
761
762			HASHTABLE_ENTRY_REMOVE(cache_ht_, item, find_res);
763			--common_entry->items_size;
764		}
765	}
766
767	if ((*value_size < find_res->value_size) || (value == NULL)) {
768		*value_size = find_res->value_size;
769		TRACE_OUT(cache_read);
770		return (-2);
771	}
772
773	*value_size = find_res->value_size;
774	memcpy(value, find_res->value, find_res->value_size);
775
776	++find_res->fifo_policy_item->request_count;
777	common_entry->get_time_func(
778		&find_res->fifo_policy_item->last_request_time);
779	common_entry->policies[0]->update_item_func(common_entry->policies[0],
780		find_res->fifo_policy_item);
781
782	if (find_res->fifo_policy_item->connected_item != NULL) {
783		connected_item = find_res->fifo_policy_item->connected_item;
784		memcpy(&connected_item->last_request_time,
785			&find_res->fifo_policy_item->last_request_time,
786			sizeof(struct timeval));
787		connected_item->request_count =
788			find_res->fifo_policy_item->request_count;
789
790		common_entry->policies[1]->update_item_func(
791			common_entry->policies[1], connected_item);
792	}
793
794	TRACE_OUT(cache_read);
795	return (0);
796}
797
798/*
799 * Writes the value with the specified key into the cache entry.
800 * Functions returns 0 on success, and -1 on error.
801 */
802int
803cache_write(struct cache_entry_ *entry, const char *key, size_t key_size,
804    	char const *value, size_t value_size)
805{
806	struct cache_common_entry_	*common_entry;
807	struct cache_ht_item_data_	item_data, *find_res;
808	struct cache_ht_item_		*item;
809	hashtable_index_t	hash;
810
811	struct cache_policy_		*policy, *connected_policy;
812	struct cache_policy_item_	*policy_item;
813	struct cache_policy_item_	*connected_policy_item;
814
815	TRACE_IN(cache_write);
816	assert(entry != NULL);
817	assert(key != NULL);
818	assert(value != NULL);
819	assert(entry->params->entry_type == CET_COMMON);
820
821	common_entry = (struct cache_common_entry_ *)entry;
822
823	memset(&item_data, 0, sizeof(struct cache_ht_item_data_));
824	/* can't avoid the cast here */
825	item_data.key = (char *)key;
826	item_data.key_size = key_size;
827
828	hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items,
829		&item_data);
830	assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items));
831
832	item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash);
833	find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data);
834	if (find_res != NULL) {
835		if (find_res->confidence < common_entry->common_params.confidence_threshold) {
836		  	/* duplicate entry is no error, if confidence is low */
837			if ((find_res->value_size == value_size) &&
838			    (memcmp(find_res->value, value, value_size) == 0)) {
839				/* increase confidence on exact match (key and values) */
840				find_res->confidence++;
841			} else {
842				/* create new entry with low confidence, if value changed */
843				free(item_data.value);
844				item_data.value = malloc(value_size);
845				assert(item_data.value != NULL);
846				memcpy(item_data.value, value, value_size);
847				item_data.value_size = value_size;
848				find_res->confidence = 1;
849			}
850			TRACE_OUT(cache_write);
851			return (0);
852		}
853		TRACE_OUT(cache_write);
854		return (-1);
855	}
856
857	item_data.key = malloc(key_size);
858	memcpy(item_data.key, key, key_size);
859
860	item_data.value = malloc(value_size);
861	assert(item_data.value != NULL);
862
863	memcpy(item_data.value, value, value_size);
864	item_data.value_size = value_size;
865
866	item_data.confidence = 1;
867
868	policy_item = common_entry->policies[0]->create_item_func();
869	policy_item->key = item_data.key;
870	policy_item->key_size = item_data.key_size;
871	common_entry->get_time_func(&policy_item->creation_time);
872
873	if (common_entry->policies_size > 1) {
874		connected_policy_item =
875			common_entry->policies[1]->create_item_func();
876		memcpy(&connected_policy_item->creation_time,
877			&policy_item->creation_time,
878			sizeof(struct timeval));
879		connected_policy_item->key = policy_item->key;
880		connected_policy_item->key_size = policy_item->key_size;
881
882		connected_policy_item->connected_item = policy_item;
883		policy_item->connected_item = connected_policy_item;
884	}
885
886	item_data.fifo_policy_item = policy_item;
887
888	common_entry->policies[0]->add_item_func(common_entry->policies[0],
889		policy_item);
890	if (common_entry->policies_size > 1)
891		common_entry->policies[1]->add_item_func(
892			common_entry->policies[1], connected_policy_item);
893
894	HASHTABLE_ENTRY_STORE(cache_ht_, item, &item_data);
895	++common_entry->items_size;
896
897	if ((common_entry->common_params.max_elemsize != 0) &&
898		(common_entry->items_size >
899		common_entry->common_params.max_elemsize)) {
900		if (common_entry->policies_size > 1) {
901			policy = common_entry->policies[1];
902			connected_policy = common_entry->policies[0];
903		} else {
904			policy = common_entry->policies[0];
905			connected_policy = NULL;
906		}
907
908		flush_cache_policy(common_entry, policy, connected_policy,
909			cache_elemsize_common_continue_func);
910	}
911
912	TRACE_OUT(cache_write);
913	return (0);
914}
915
916/*
917 * Initializes the write session for the specified multipart entry. This
918 * session then should be filled with data either committed or abandoned by
919 * using close_cache_mp_write_session or abandon_cache_mp_write_session
920 * respectively.
921 * Returns NULL on errors (when there are too many opened write sessions for
922 * the entry).
923 */
924struct cache_mp_write_session_ *
925open_cache_mp_write_session(struct cache_entry_ *entry)
926{
927	struct cache_mp_entry_	*mp_entry;
928	struct cache_mp_write_session_	*retval;
929
930	TRACE_IN(open_cache_mp_write_session);
931	assert(entry != NULL);
932	assert(entry->params->entry_type == CET_MULTIPART);
933	mp_entry = (struct cache_mp_entry_ *)entry;
934
935	if ((mp_entry->mp_params.max_sessions > 0) &&
936		(mp_entry->ws_size == mp_entry->mp_params.max_sessions)) {
937		TRACE_OUT(open_cache_mp_write_session);
938		return (NULL);
939	}
940
941	retval = calloc(1,
942		sizeof(*retval));
943	assert(retval != NULL);
944
945	TAILQ_INIT(&retval->items);
946	retval->parent_entry = mp_entry;
947
948	TAILQ_INSERT_HEAD(&mp_entry->ws_head, retval, entries);
949	++mp_entry->ws_size;
950
951	TRACE_OUT(open_cache_mp_write_session);
952	return (retval);
953}
954
955/*
956 * Writes data to the specified session. Return 0 on success and -1 on errors
957 * (when write session size limit is exceeded).
958 */
959int
960cache_mp_write(struct cache_mp_write_session_ *ws, char *data,
961	size_t data_size)
962{
963	struct cache_mp_data_item_	*new_item;
964
965	TRACE_IN(cache_mp_write);
966	assert(ws != NULL);
967	assert(ws->parent_entry != NULL);
968	assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
969
970	if ((ws->parent_entry->mp_params.max_elemsize > 0) &&
971		(ws->parent_entry->mp_params.max_elemsize == ws->items_size)) {
972		TRACE_OUT(cache_mp_write);
973		return (-1);
974	}
975
976	new_item = calloc(1,
977		sizeof(*new_item));
978	assert(new_item != NULL);
979
980	new_item->value = malloc(data_size);
981	assert(new_item->value != NULL);
982	memcpy(new_item->value, data, data_size);
983	new_item->value_size = data_size;
984
985	TAILQ_INSERT_TAIL(&ws->items, new_item, entries);
986	++ws->items_size;
987
988	TRACE_OUT(cache_mp_write);
989	return (0);
990}
991
992/*
993 * Abandons the write session and frees all the connected resources.
994 */
995void
996abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws)
997{
998
999	TRACE_IN(abandon_cache_mp_write_session);
1000	assert(ws != NULL);
1001	assert(ws->parent_entry != NULL);
1002	assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
1003
1004	TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries);
1005	--ws->parent_entry->ws_size;
1006
1007	destroy_cache_mp_write_session(ws);
1008	TRACE_OUT(abandon_cache_mp_write_session);
1009}
1010
1011/*
1012 * Commits the session to the entry, for which it was created.
1013 */
1014void
1015close_cache_mp_write_session(struct cache_mp_write_session_ *ws)
1016{
1017
1018	TRACE_IN(close_cache_mp_write_session);
1019	assert(ws != NULL);
1020	assert(ws->parent_entry != NULL);
1021	assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
1022
1023	TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries);
1024	--ws->parent_entry->ws_size;
1025
1026	if (ws->parent_entry->completed_write_session == NULL) {
1027		/*
1028		 * If there is no completed session yet, this will be the one
1029		 */
1030		ws->parent_entry->get_time_func(
1031	    		&ws->parent_entry->creation_time);
1032		ws->parent_entry->completed_write_session = ws;
1033	} else {
1034		/*
1035		 * If there is a completed session, then we'll save our session
1036		 * as a pending session. If there is already a pending session,
1037		 * it would be destroyed.
1038		 */
1039		if (ws->parent_entry->pending_write_session != NULL)
1040			destroy_cache_mp_write_session(
1041				ws->parent_entry->pending_write_session);
1042
1043		ws->parent_entry->pending_write_session = ws;
1044	}
1045	TRACE_OUT(close_cache_mp_write_session);
1046}
1047
1048/*
1049 * Opens read session for the specified entry. Returns NULL on errors (when
1050 * there are no data in the entry, or the data are obsolete).
1051 */
1052struct cache_mp_read_session_ *
1053open_cache_mp_read_session(struct cache_entry_ *entry)
1054{
1055	struct cache_mp_entry_			*mp_entry;
1056	struct cache_mp_read_session_	*retval;
1057
1058	TRACE_IN(open_cache_mp_read_session);
1059	assert(entry != NULL);
1060	assert(entry->params->entry_type == CET_MULTIPART);
1061	mp_entry = (struct cache_mp_entry_ *)entry;
1062
1063	if (mp_entry->completed_write_session == NULL) {
1064		TRACE_OUT(open_cache_mp_read_session);
1065		return (NULL);
1066	}
1067
1068	if ((mp_entry->mp_params.max_lifetime.tv_sec != 0)
1069		|| (mp_entry->mp_params.max_lifetime.tv_usec != 0)) {
1070		if (mp_entry->last_request_time.tv_sec -
1071			mp_entry->last_request_time.tv_sec >
1072			mp_entry->mp_params.max_lifetime.tv_sec) {
1073			flush_cache_entry(entry);
1074			TRACE_OUT(open_cache_mp_read_session);
1075			return (NULL);
1076		}
1077	}
1078
1079	retval = calloc(1,
1080		sizeof(*retval));
1081	assert(retval != NULL);
1082
1083	retval->parent_entry = mp_entry;
1084	retval->current_item = TAILQ_FIRST(
1085		&mp_entry->completed_write_session->items);
1086
1087	TAILQ_INSERT_HEAD(&mp_entry->rs_head, retval, entries);
1088	++mp_entry->rs_size;
1089
1090	mp_entry->get_time_func(&mp_entry->last_request_time);
1091	TRACE_OUT(open_cache_mp_read_session);
1092	return (retval);
1093}
1094
1095/*
1096 * Reads the data from the read session - step by step.
1097 * Returns 0 on success, -1 on error (when there are no more data), and -2 if
1098 * the data_size is too small.  In the last case, data_size would be filled
1099 * the proper value.
1100 */
1101int
1102cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size)
1103{
1104
1105	TRACE_IN(cache_mp_read);
1106	assert(rs != NULL);
1107
1108	if (rs->current_item == NULL) {
1109		TRACE_OUT(cache_mp_read);
1110		return (-1);
1111	}
1112
1113	if (rs->current_item->value_size > *data_size) {
1114		*data_size = rs->current_item->value_size;
1115		if (data == NULL) {
1116			TRACE_OUT(cache_mp_read);
1117			return (0);
1118		}
1119
1120		TRACE_OUT(cache_mp_read);
1121		return (-2);
1122	}
1123
1124	*data_size = rs->current_item->value_size;
1125	memcpy(data, rs->current_item->value, rs->current_item->value_size);
1126	rs->current_item = TAILQ_NEXT(rs->current_item, entries);
1127
1128	TRACE_OUT(cache_mp_read);
1129	return (0);
1130}
1131
1132/*
1133 * Closes the read session. If there are no more read sessions and there is
1134 * a pending write session, it will be committed and old
1135 * completed_write_session will be destroyed.
1136 */
1137void
1138close_cache_mp_read_session(struct cache_mp_read_session_ *rs)
1139{
1140
1141	TRACE_IN(close_cache_mp_read_session);
1142	assert(rs != NULL);
1143	assert(rs->parent_entry != NULL);
1144
1145	TAILQ_REMOVE(&rs->parent_entry->rs_head, rs, entries);
1146	--rs->parent_entry->rs_size;
1147
1148	if ((rs->parent_entry->rs_size == 0) &&
1149		(rs->parent_entry->pending_write_session != NULL)) {
1150		destroy_cache_mp_write_session(
1151			rs->parent_entry->completed_write_session);
1152		rs->parent_entry->completed_write_session =
1153			rs->parent_entry->pending_write_session;
1154		rs->parent_entry->pending_write_session = NULL;
1155	}
1156
1157	destroy_cache_mp_read_session(rs);
1158	TRACE_OUT(close_cache_mp_read_session);
1159}
1160
1161int
1162transform_cache_entry(struct cache_entry_ *entry,
1163	enum cache_transformation_t transformation)
1164{
1165
1166	TRACE_IN(transform_cache_entry);
1167	switch (transformation) {
1168	case CTT_CLEAR:
1169		clear_cache_entry(entry);
1170		TRACE_OUT(transform_cache_entry);
1171		return (0);
1172	case CTT_FLUSH:
1173		flush_cache_entry(entry);
1174		TRACE_OUT(transform_cache_entry);
1175		return (0);
1176	default:
1177		TRACE_OUT(transform_cache_entry);
1178		return (-1);
1179	}
1180}
1181
1182int
1183transform_cache_entry_part(struct cache_entry_ *entry,
1184	enum cache_transformation_t transformation, const char *key_part,
1185	size_t key_part_size, enum part_position_t part_position)
1186{
1187	struct cache_common_entry_ *common_entry;
1188	struct cache_ht_item_ *ht_item;
1189	struct cache_ht_item_data_ *ht_item_data, ht_key;
1190
1191	struct cache_policy_item_ *item, *connected_item;
1192
1193	TRACE_IN(transform_cache_entry_part);
1194	if (entry->params->entry_type != CET_COMMON) {
1195		TRACE_OUT(transform_cache_entry_part);
1196		return (-1);
1197	}
1198
1199	if (transformation != CTT_CLEAR) {
1200		TRACE_OUT(transform_cache_entry_part);
1201		return (-1);
1202	}
1203
1204	memset(&ht_key, 0, sizeof(struct cache_ht_item_data_));
1205	ht_key.key = (char *)key_part;	/* can't avoid casting here */
1206	ht_key.key_size = key_part_size;
1207
1208	common_entry = (struct cache_common_entry_ *)entry;
1209	HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
1210		do {
1211			ht_item_data = HASHTABLE_ENTRY_FIND_SPECIAL(cache_ht_,
1212				ht_item, &ht_key,
1213				ht_items_fixed_size_left_cmp_func);
1214
1215			if (ht_item_data != NULL) {
1216			    item = ht_item_data->fifo_policy_item;
1217			    connected_item = item->connected_item;
1218
1219			    common_entry->policies[0]->remove_item_func(
1220				common_entry->policies[0],
1221				item);
1222
1223			    free(ht_item_data->key);
1224			    free(ht_item_data->value);
1225			    HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item,
1226				ht_item_data);
1227			    --common_entry->items_size;
1228
1229			    common_entry->policies[0]->destroy_item_func(
1230				item);
1231			    if (common_entry->policies_size == 2) {
1232				common_entry->policies[1]->remove_item_func(
1233				    common_entry->policies[1],
1234				    connected_item);
1235				common_entry->policies[1]->destroy_item_func(
1236				    connected_item);
1237			    }
1238			}
1239		} while (ht_item_data != NULL);
1240	}
1241
1242	TRACE_OUT(transform_cache_entry_part);
1243	return (0);
1244}
1245