1/* cache.c - routines to maintain an in-core cache of entries */
2/* $OpenLDAP$ */
3/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
4 *
5 * Copyright 2001-2011 The OpenLDAP Foundation.
6 * Portions Copyright 2001-2003 Pierangelo Masarati.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted only as authorized by the OpenLDAP
11 * Public License.
12 *
13 * A copy of this license is available in file LICENSE in the
14 * top-level directory of the distribution or, alternatively, at
15 * <http://www.OpenLDAP.org/license.html>.
16 */
17/* ACKNOWLEDGEMENTS:
18 * This work was initially developed by Pierangelo Masarati for inclusion
19 * in OpenLDAP Software.
20 */
21
22#include "portable.h"
23
24#include <stdio.h>
25#include "ac/string.h"
26
27#include "slap.h"
28
29#include "back-monitor.h"
30
31/*
32 * The cache maps DNs to Entries.
33 * Each entry, on turn, holds the list of its children in the e_private field.
34 * This is used by search operation to perform onelevel and subtree candidate
35 * selection.
36 */
37typedef struct monitor_cache_t {
38	struct berval		mc_ndn;
39	Entry   		*mc_e;
40} monitor_cache_t;
41
42/*
43 * compares entries based on the dn
44 */
45int
46monitor_cache_cmp(
47	const void	*c1,
48	const void	*c2 )
49{
50	monitor_cache_t 	*cc1 = ( monitor_cache_t * )c1;
51	monitor_cache_t 	*cc2 = ( monitor_cache_t * )c2;
52
53	/*
54	 * case sensitive, because the dn MUST be normalized
55	 */
56	return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn );
57}
58
59/*
60 * checks for duplicate entries
61 */
62int
63monitor_cache_dup(
64	void		*c1,
65	void		*c2 )
66{
67	monitor_cache_t *cc1 = ( monitor_cache_t * )c1;
68	monitor_cache_t *cc2 = ( monitor_cache_t * )c2;
69
70	/*
71	 * case sensitive, because the dn MUST be normalized
72	 */
73	return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn ) == 0 ? -1 : 0;
74}
75
76/*
77 * adds an entry to the cache and inits the mutex
78 */
79int
80monitor_cache_add(
81	monitor_info_t	*mi,
82	Entry		*e )
83{
84	monitor_cache_t	*mc;
85	monitor_entry_t	*mp;
86	int		rc;
87
88	assert( mi != NULL );
89	assert( e != NULL );
90
91	mp = ( monitor_entry_t *)e->e_private;
92
93	mc = ( monitor_cache_t * )ch_malloc( sizeof( monitor_cache_t ) );
94	mc->mc_ndn = e->e_nname;
95	mc->mc_e = e;
96	ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
97	rc = avl_insert( &mi->mi_cache, ( caddr_t )mc,
98			monitor_cache_cmp, monitor_cache_dup );
99	ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
100
101	return rc;
102}
103
104/*
105 * locks the entry (no r/w)
106 */
107int
108monitor_cache_lock(
109	Entry		*e )
110{
111	monitor_entry_t *mp;
112
113	assert( e != NULL );
114	assert( e->e_private != NULL );
115
116	mp = ( monitor_entry_t * )e->e_private;
117	ldap_pvt_thread_mutex_lock( &mp->mp_mutex );
118
119	return( 0 );
120}
121
122/*
123 * tries to lock the entry (no r/w)
124 */
125int
126monitor_cache_trylock(
127	Entry		*e )
128{
129	monitor_entry_t *mp;
130
131	assert( e != NULL );
132	assert( e->e_private != NULL );
133
134	mp = ( monitor_entry_t * )e->e_private;
135	return ldap_pvt_thread_mutex_trylock( &mp->mp_mutex );
136}
137
138/*
139 * gets an entry from the cache based on the normalized dn
140 * with mutex locked
141 */
142int
143monitor_cache_get(
144	monitor_info_t	*mi,
145	struct berval	*ndn,
146	Entry		**ep )
147{
148	monitor_cache_t tmp_mc, *mc;
149
150	assert( mi != NULL );
151	assert( ndn != NULL );
152	assert( ep != NULL );
153
154	*ep = NULL;
155
156	tmp_mc.mc_ndn = *ndn;
157retry:;
158	ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
159	mc = ( monitor_cache_t * )avl_find( mi->mi_cache,
160			( caddr_t )&tmp_mc, monitor_cache_cmp );
161
162	if ( mc != NULL ) {
163		/* entry is returned with mutex locked */
164		if ( monitor_cache_trylock( mc->mc_e ) ) {
165			ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
166			ldap_pvt_thread_yield();
167			goto retry;
168		}
169		*ep = mc->mc_e;
170	}
171
172	ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
173
174	return ( *ep == NULL ? -1 : 0 );
175}
176
177/*
178 * gets an entry from the cache based on the normalized dn
179 * with mutex locked
180 */
181int
182monitor_cache_remove(
183	monitor_info_t	*mi,
184	struct berval	*ndn,
185	Entry		**ep )
186{
187	monitor_cache_t tmp_mc, *mc;
188	struct berval	pndn;
189
190	assert( mi != NULL );
191	assert( ndn != NULL );
192	assert( ep != NULL );
193
194	*ep = NULL;
195
196	dnParent( ndn, &pndn );
197
198retry:;
199	ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
200
201	tmp_mc.mc_ndn = *ndn;
202	mc = ( monitor_cache_t * )avl_find( mi->mi_cache,
203			( caddr_t )&tmp_mc, monitor_cache_cmp );
204
205	if ( mc != NULL ) {
206		monitor_cache_t *pmc;
207
208		if ( monitor_cache_trylock( mc->mc_e ) ) {
209			ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
210			goto retry;
211		}
212
213		tmp_mc.mc_ndn = pndn;
214		pmc = ( monitor_cache_t * )avl_find( mi->mi_cache,
215			( caddr_t )&tmp_mc, monitor_cache_cmp );
216		if ( pmc != NULL ) {
217			monitor_entry_t	*mp = (monitor_entry_t *)mc->mc_e->e_private,
218					*pmp = (monitor_entry_t *)pmc->mc_e->e_private;
219			Entry		**entryp;
220
221			if ( monitor_cache_trylock( pmc->mc_e ) ) {
222				monitor_cache_release( mi, mc->mc_e );
223				ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
224				goto retry;
225			}
226
227			for ( entryp = &pmp->mp_children; *entryp != NULL;  ) {
228				monitor_entry_t	*next = (monitor_entry_t *)(*entryp)->e_private;
229				if ( next == mp ) {
230					*entryp = next->mp_next;
231					entryp = NULL;
232					break;
233				}
234
235				entryp = &next->mp_next;
236			}
237
238			if ( entryp != NULL ) {
239				Debug( LDAP_DEBUG_ANY,
240					"monitor_cache_remove(\"%s\"): "
241					"not in parent's list\n",
242					ndn->bv_val, 0, 0 );
243			}
244
245			/* either succeeded, and the entry is no longer
246			 * in its parent's list, or failed, and the
247			 * entry is neither mucked with nor returned */
248			monitor_cache_release( mi, pmc->mc_e );
249
250			if ( entryp == NULL ) {
251				monitor_cache_t *tmpmc;
252
253				tmp_mc.mc_ndn = *ndn;
254				tmpmc = avl_delete( &mi->mi_cache,
255					( caddr_t )&tmp_mc, monitor_cache_cmp );
256				assert( tmpmc == mc );
257
258				*ep = mc->mc_e;
259				ch_free( mc );
260				mc = NULL;
261
262				/* NOTE: we destroy the mutex, but otherwise
263				 * leave the private data around; specifically,
264				 * callbacks need be freed by someone else */
265
266				ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
267				mp->mp_next = NULL;
268				mp->mp_children = NULL;
269			}
270
271		}
272
273		if ( mc ) {
274			monitor_cache_release( mi, mc->mc_e );
275		}
276	}
277
278	ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
279
280	return ( *ep == NULL ? -1 : 0 );
281}
282
283/*
284 * If the entry exists in cache, it is returned in locked status;
285 * otherwise, if the parent exists, if it may generate volatile
286 * descendants an attempt to generate the required entry is
287 * performed and, if successful, the entry is returned
288 */
289int
290monitor_cache_dn2entry(
291	Operation		*op,
292	SlapReply		*rs,
293	struct berval		*ndn,
294	Entry			**ep,
295	Entry			**matched )
296{
297	monitor_info_t *mi = (monitor_info_t *)op->o_bd->be_private;
298	int 			rc;
299	struct berval		p_ndn = BER_BVNULL;
300	Entry 			*e_parent;
301	monitor_entry_t 	*mp;
302
303	assert( mi != NULL );
304	assert( ndn != NULL );
305	assert( ep != NULL );
306	assert( matched != NULL );
307
308	*matched = NULL;
309
310	if ( !dnIsSuffix( ndn, &op->o_bd->be_nsuffix[ 0 ] ) ) {
311		return( -1 );
312	}
313
314	rc = monitor_cache_get( mi, ndn, ep );
315       	if ( !rc && *ep != NULL ) {
316		return( 0 );
317	}
318
319	/* try with parent/ancestors */
320	if ( BER_BVISNULL( ndn ) ) {
321		BER_BVSTR( &p_ndn, "" );
322
323	} else {
324		dnParent( ndn, &p_ndn );
325	}
326
327	rc = monitor_cache_dn2entry( op, rs, &p_ndn, &e_parent, matched );
328	if ( rc || e_parent == NULL ) {
329		return( -1 );
330	}
331
332	mp = ( monitor_entry_t * )e_parent->e_private;
333	rc = -1;
334	if ( mp->mp_flags & MONITOR_F_VOLATILE_CH ) {
335		/* parent entry generates volatile children */
336		rc = monitor_entry_create( op, rs, ndn, e_parent, ep );
337	}
338
339	if ( !rc ) {
340		monitor_cache_lock( *ep );
341		monitor_cache_release( mi, e_parent );
342
343	} else {
344		*matched = e_parent;
345	}
346
347	return( rc );
348}
349
350/*
351 * releases the lock of the entry; if it is marked as volatile, it is
352 * destroyed.
353 */
354int
355monitor_cache_release(
356	monitor_info_t	*mi,
357	Entry		*e )
358{
359	monitor_entry_t *mp;
360
361	assert( mi != NULL );
362	assert( e != NULL );
363	assert( e->e_private != NULL );
364
365	mp = ( monitor_entry_t * )e->e_private;
366
367	if ( mp->mp_flags & MONITOR_F_VOLATILE ) {
368		monitor_cache_t	*mc, tmp_mc;
369
370		/* volatile entries do not return to cache */
371		ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
372		tmp_mc.mc_ndn = e->e_nname;
373		mc = avl_delete( &mi->mi_cache,
374				( caddr_t )&tmp_mc, monitor_cache_cmp );
375		ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
376		if ( mc != NULL ) {
377			ch_free( mc );
378		}
379
380		ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
381		ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
382		ch_free( mp );
383		e->e_private = NULL;
384		entry_free( e );
385
386		return( 0 );
387	}
388
389	ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
390
391	return( 0 );
392}
393
394static void
395monitor_entry_destroy( void *v_mc )
396{
397	monitor_cache_t		*mc = (monitor_cache_t *)v_mc;
398
399	if ( mc->mc_e != NULL ) {
400		monitor_entry_t *mp;
401
402		assert( mc->mc_e->e_private != NULL );
403
404		mp = ( monitor_entry_t * )mc->mc_e->e_private;
405
406		if ( mp->mp_cb ) {
407			monitor_callback_t	*cb;
408
409			for ( cb = mp->mp_cb; cb != NULL; ) {
410				monitor_callback_t	*next = cb->mc_next;
411
412				if ( cb->mc_free ) {
413					(void)cb->mc_free( mc->mc_e, &cb->mc_private );
414				}
415				ch_free( mp->mp_cb );
416
417				cb = next;
418			}
419		}
420
421		ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
422
423		ch_free( mp );
424		mc->mc_e->e_private = NULL;
425		entry_free( mc->mc_e );
426	}
427
428	ch_free( mc );
429}
430
431int
432monitor_cache_destroy(
433	monitor_info_t	*mi )
434{
435	if ( mi->mi_cache ) {
436		avl_free( mi->mi_cache, monitor_entry_destroy );
437	}
438
439	return 0;
440}
441
442int monitor_back_release(
443	Operation *op,
444	Entry *e,
445	int rw )
446{
447	monitor_info_t	*mi = ( monitor_info_t * )op->o_bd->be_private;
448	return monitor_cache_release( mi, e );
449}
450