1/*
2 * config_pool.c :  pool of configuration objects
3 *
4 * ====================================================================
5 *    Licensed to the Apache Software Foundation (ASF) under one
6 *    or more contributor license agreements.  See the NOTICE file
7 *    distributed with this work for additional information
8 *    regarding copyright ownership.  The ASF licenses this file
9 *    to you under the Apache License, Version 2.0 (the
10 *    "License"); you may not use this file except in compliance
11 *    with the License.  You may obtain a copy of the License at
12 *
13 *      http://www.apache.org/licenses/LICENSE-2.0
14 *
15 *    Unless required by applicable law or agreed to in writing,
16 *    software distributed under the License is distributed on an
17 *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18 *    KIND, either express or implied.  See the License for the
19 *    specific language governing permissions and limitations
20 *    under the License.
21 * ====================================================================
22 */
23
24
25
26
27#include <assert.h>
28
29#include "svn_error.h"
30#include "svn_hash.h"
31#include "svn_pools.h"
32
33#include "private/svn_atomic.h"
34#include "private/svn_object_pool.h"
35#include "private/svn_subr_private.h"
36#include "private/svn_dep_compat.h"
37
38
39
40/* A reference counting wrapper around the user-provided object.
41 */
42typedef struct object_ref_t
43{
44  /* reference to the parent container */
45  svn_object_pool__t *object_pool;
46
47  /* identifies the bucket in OBJECT_POOL->OBJECTS in which this entry
48   * belongs. */
49  svn_membuf_t key;
50
51  /* User provided object. Usually a wrapper. */
52  void *wrapper;
53
54  /* private pool. This instance and its other members got allocated in it.
55   * Will be destroyed when this instance is cleaned up. */
56  apr_pool_t *pool;
57
58  /* Number of references to this data struct */
59  volatile svn_atomic_t ref_count;
60} object_ref_t;
61
62
63/* Core data structure.  All access to it must be serialized using MUTEX.
64 */
65struct svn_object_pool__t
66{
67  /* serialization object for all non-atomic data in this struct */
68  svn_mutex__t *mutex;
69
70  /* object_ref_t.KEY -> object_ref_t* mapping.
71   *
72   * In shared object mode, there is at most one such entry per key and it
73   * may or may not be in use.  In exclusive mode, only unused references
74   * will be put here and they form chains if there are multiple unused
75   * instances for the key. */
76  apr_hash_t *objects;
77
78  /* same as objects->count but allows for non-sync'ed access */
79  volatile svn_atomic_t object_count;
80
81  /* Number of entries in OBJECTS with a reference count 0.
82     Due to races, this may be *temporarily* off by one or more.
83     Hence we must not strictly depend on it. */
84  volatile svn_atomic_t unused_count;
85
86  /* the root pool owning this structure */
87  apr_pool_t *pool;
88
89  /* extractor and updater for the user object wrappers */
90  svn_object_pool__getter_t getter;
91  svn_object_pool__setter_t setter;
92};
93
94
95/* Pool cleanup function for the whole object pool.
96 */
97static apr_status_t
98object_pool_cleanup(void *baton)
99{
100  svn_object_pool__t *object_pool = baton;
101
102  /* all entries must have been released up by now */
103  SVN_ERR_ASSERT_NO_RETURN(   object_pool->object_count
104                           == object_pool->unused_count);
105
106  return APR_SUCCESS;
107}
108
109/* Remove entries from OBJECTS in OBJECT_POOL that have a ref-count of 0.
110 *
111 * Requires external serialization on OBJECT_POOL.
112 */
113static void
114remove_unused_objects(svn_object_pool__t *object_pool)
115{
116  apr_pool_t *subpool = svn_pool_create(object_pool->pool);
117
118  /* process all hash buckets */
119  apr_hash_index_t *hi;
120  for (hi = apr_hash_first(subpool, object_pool->objects);
121       hi != NULL;
122       hi = apr_hash_next(hi))
123    {
124      object_ref_t *object_ref = apr_hash_this_val(hi);
125
126      /* note that we won't hand out new references while access
127         to the hash is serialized */
128      if (svn_atomic_read(&object_ref->ref_count) == 0)
129        {
130          apr_hash_set(object_pool->objects, object_ref->key.data,
131                       object_ref->key.size, NULL);
132          svn_atomic_dec(&object_pool->object_count);
133          svn_atomic_dec(&object_pool->unused_count);
134
135          svn_pool_destroy(object_ref->pool);
136        }
137    }
138
139  svn_pool_destroy(subpool);
140}
141
142/* Cleanup function called when an object_ref_t gets released.
143 */
144static apr_status_t
145object_ref_cleanup(void *baton)
146{
147  object_ref_t *object = baton;
148  svn_object_pool__t *object_pool = object->object_pool;
149
150  /* If we released the last reference to object, there is one more
151     unused entry.
152
153     Note that unused_count does not need to be always exact but only
154     needs to become exact *eventually* (we use it to check whether we
155     should remove unused objects every now and then).  I.e. it must
156     never drift off / get stuck but always reflect the true value once
157     all threads left the racy sections.
158   */
159  if (svn_atomic_dec(&object->ref_count) == 0)
160    svn_atomic_inc(&object_pool->unused_count);
161
162  return APR_SUCCESS;
163}
164
165/* Handle reference counting for the OBJECT_REF that the caller is about
166 * to return.  The reference will be released when POOL gets cleaned up.
167 *
168 * Requires external serialization on OBJECT_REF->OBJECT_POOL.
169 */
170static void
171add_object_ref(object_ref_t *object_ref,
172              apr_pool_t *pool)
173{
174  /* Update ref counter.
175     Note that this is racy with object_ref_cleanup; see comment there. */
176  if (svn_atomic_inc(&object_ref->ref_count) == 0)
177    svn_atomic_dec(&object_ref->object_pool->unused_count);
178
179  /* make sure the reference gets released automatically */
180  apr_pool_cleanup_register(pool, object_ref, object_ref_cleanup,
181                            apr_pool_cleanup_null);
182}
183
184/* Actual implementation of svn_object_pool__lookup.
185 *
186 * Requires external serialization on OBJECT_POOL.
187 */
188static svn_error_t *
189lookup(void **object,
190       svn_object_pool__t *object_pool,
191       svn_membuf_t *key,
192       void *baton,
193       apr_pool_t *result_pool)
194{
195  object_ref_t *object_ref
196    = apr_hash_get(object_pool->objects, key->data, key->size);
197
198  if (object_ref)
199    {
200      *object = object_pool->getter(object_ref->wrapper, baton, result_pool);
201      add_object_ref(object_ref, result_pool);
202    }
203  else
204    {
205      *object = NULL;
206    }
207
208  return SVN_NO_ERROR;
209}
210
211/* Actual implementation of svn_object_pool__insert.
212 *
213 * Requires external serialization on OBJECT_POOL.
214 */
215static svn_error_t *
216insert(void **object,
217       svn_object_pool__t *object_pool,
218       const svn_membuf_t *key,
219       void *wrapper,
220       void *baton,
221       apr_pool_t *wrapper_pool,
222       apr_pool_t *result_pool)
223{
224  object_ref_t *object_ref
225    = apr_hash_get(object_pool->objects, key->data, key->size);
226  if (object_ref)
227    {
228      /* entry already exists (e.g. race condition) */
229      svn_error_t *err = object_pool->setter(&object_ref->wrapper,
230                                             wrapper, baton,
231                                             object_ref->pool);
232      if (err)
233        {
234          /* if we had an issue in the setter, then OBJECT_REF is in an
235           * unknown state now.  Keep it around for the current users
236           * (i.e. don't clean the pool) but remove it from the list of
237           * available ones.
238           */
239          apr_hash_set(object_pool->objects, key->data, key->size, NULL);
240          svn_atomic_dec(&object_pool->object_count);
241
242          /* for the unlikely case that the object got created _and_
243           * already released since we last checked: */
244          if (svn_atomic_read(&object_ref->ref_count) == 0)
245            svn_atomic_dec(&object_pool->unused_count);
246
247          /* cleanup the new data as well because it's not safe to use
248           * either.
249           */
250          svn_pool_destroy(wrapper_pool);
251
252          /* propagate error */
253          return svn_error_trace(err);
254        }
255
256      /* Destroy the new one and return a reference to the existing one
257       * because the existing one may already have references on it.
258       */
259      svn_pool_destroy(wrapper_pool);
260    }
261  else
262    {
263      /* add new index entry */
264      object_ref = apr_pcalloc(wrapper_pool, sizeof(*object_ref));
265      object_ref->object_pool = object_pool;
266      object_ref->wrapper = wrapper;
267      object_ref->pool = wrapper_pool;
268
269      svn_membuf__create(&object_ref->key, key->size, wrapper_pool);
270      object_ref->key.size = key->size;
271      memcpy(object_ref->key.data, key->data, key->size);
272
273      apr_hash_set(object_pool->objects, object_ref->key.data,
274                   object_ref->key.size, object_ref);
275      svn_atomic_inc(&object_pool->object_count);
276
277      /* the new entry is *not* in use yet.
278       * add_object_ref will update counters again.
279       */
280      svn_atomic_inc(&object_ref->object_pool->unused_count);
281    }
282
283  /* return a reference to the object we just added */
284  *object = object_pool->getter(object_ref->wrapper, baton, result_pool);
285  add_object_ref(object_ref, result_pool);
286
287  /* limit memory usage */
288  if (svn_atomic_read(&object_pool->unused_count) * 2
289      > apr_hash_count(object_pool->objects) + 2)
290    remove_unused_objects(object_pool);
291
292  return SVN_NO_ERROR;
293}
294
295/* Implement svn_object_pool__getter_t as no-op.
296 */
297static void *
298default_getter(void *object,
299               void *baton,
300               apr_pool_t *pool)
301{
302  return object;
303}
304
305/* Implement svn_object_pool__setter_t as no-op.
306 */
307static svn_error_t *
308default_setter(void **target,
309               void *source,
310               void *baton,
311               apr_pool_t *pool)
312{
313  return SVN_NO_ERROR;
314}
315
316
317/* API implementation */
318
319svn_error_t *
320svn_object_pool__create(svn_object_pool__t **object_pool,
321                        svn_object_pool__getter_t getter,
322                        svn_object_pool__setter_t setter,
323                        svn_boolean_t thread_safe,
324                        apr_pool_t *pool)
325{
326  svn_object_pool__t *result;
327
328  /* construct the object pool in our private ROOT_POOL to survive POOL
329   * cleanup and to prevent threading issues with the allocator
330   */
331  result = apr_pcalloc(pool, sizeof(*result));
332  SVN_ERR(svn_mutex__init(&result->mutex, thread_safe, pool));
333
334  result->pool = pool;
335  result->objects = svn_hash__make(result->pool);
336  result->getter = getter ? getter : default_getter;
337  result->setter = setter ? setter : default_setter;
338
339  /* make sure we clean up nicely.
340   * We need two cleanup functions of which exactly one will be run
341   * (disabling the respective other as the first step).  If the owning
342   * pool does not cleaned up / destroyed explicitly, it may live longer
343   * than our allocator.  So, we need do act upon cleanup requests from
344   * either side - owning_pool and root_pool.
345   */
346  apr_pool_cleanup_register(pool, result, object_pool_cleanup,
347                            apr_pool_cleanup_null);
348
349  *object_pool = result;
350  return SVN_NO_ERROR;
351}
352
353apr_pool_t *
354svn_object_pool__new_wrapper_pool(svn_object_pool__t *object_pool)
355{
356  return svn_pool_create(object_pool->pool);
357}
358
359svn_mutex__t *
360svn_object_pool__mutex(svn_object_pool__t *object_pool)
361{
362  return object_pool->mutex;
363}
364
365unsigned
366svn_object_pool__count(svn_object_pool__t *object_pool)
367{
368  return svn_atomic_read(&object_pool->object_count);
369}
370
371svn_error_t *
372svn_object_pool__lookup(void **object,
373                        svn_object_pool__t *object_pool,
374                        svn_membuf_t *key,
375                        void *baton,
376                        apr_pool_t *result_pool)
377{
378  *object = NULL;
379  SVN_MUTEX__WITH_LOCK(object_pool->mutex,
380                       lookup(object, object_pool, key, baton, result_pool));
381  return SVN_NO_ERROR;
382}
383
384svn_error_t *
385svn_object_pool__insert(void **object,
386                        svn_object_pool__t *object_pool,
387                        const svn_membuf_t *key,
388                        void *wrapper,
389                        void *baton,
390                        apr_pool_t *wrapper_pool,
391                        apr_pool_t *result_pool)
392{
393  *object = NULL;
394  SVN_MUTEX__WITH_LOCK(object_pool->mutex,
395                       insert(object, object_pool, key, wrapper, baton,
396                              wrapper_pool, result_pool));
397  return SVN_NO_ERROR;
398}
399