heimbase.c revision 1.2
1/*	$NetBSD: heimbase.c,v 1.2 2017/01/28 21:31:45 christos Exp $	*/
2
3/*
4 * Copyright (c) 2010 Kungliga Tekniska H��gskolan
5 * (Royal Institute of Technology, Stockholm, Sweden).
6 * All rights reserved.
7 *
8 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 *
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * 3. Neither the name of the Institute nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#include "baselocl.h"
39#include <syslog.h>
40
41static heim_base_atomic_type tidglobal = HEIM_TID_USER;
42
43struct heim_base {
44    heim_type_t isa;
45    heim_base_atomic_type ref_cnt;
46    HEIM_TAILQ_ENTRY(heim_base) autorel;
47    heim_auto_release_t autorelpool;
48    uintptr_t isaextra[3];
49};
50
51/* specialized version of base */
52struct heim_base_mem {
53    heim_type_t isa;
54    heim_base_atomic_type ref_cnt;
55    HEIM_TAILQ_ENTRY(heim_base) autorel;
56    heim_auto_release_t autorelpool;
57    const char *name;
58    void (*dealloc)(void *);
59    uintptr_t isaextra[1];
60};
61
62#define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
63#define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
64
65#ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
66HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
67#endif
68
69/*
70 * Auto release structure
71 */
72
73struct heim_auto_release {
74    HEIM_TAILQ_HEAD(, heim_base) pool;
75    HEIMDAL_MUTEX pool_mutex;
76    struct heim_auto_release *parent;
77};
78
79
80/**
81 * Retain object (i.e., take a reference)
82 *
83 * @param object to be released, NULL is ok
84 *
85 * @return the same object as passed in
86 */
87
88void *
89heim_retain(void *ptr)
90{
91    struct heim_base *p = PTR2BASE(ptr);
92
93    if (ptr == NULL || heim_base_is_tagged(ptr))
94	return ptr;
95
96    if (p->ref_cnt == heim_base_atomic_max)
97	return ptr;
98
99    if ((heim_base_atomic_inc(&p->ref_cnt) - 1) == 0)
100	heim_abort("resurection");
101    return ptr;
102}
103
104/**
105 * Release object, free if reference count reaches zero
106 *
107 * @param object to be released
108 */
109
110void
111heim_release(void *ptr)
112{
113    heim_base_atomic_type old;
114    struct heim_base *p = PTR2BASE(ptr);
115
116    if (ptr == NULL || heim_base_is_tagged(ptr))
117	return;
118
119    if (p->ref_cnt == heim_base_atomic_max)
120	return;
121
122    old = heim_base_atomic_dec(&p->ref_cnt) + 1;
123
124    if (old > 1)
125	return;
126
127    if (old == 1) {
128	heim_auto_release_t ar = p->autorelpool;
129	/* remove from autorel pool list */
130	if (ar) {
131	    p->autorelpool = NULL;
132	    HEIMDAL_MUTEX_lock(&ar->pool_mutex);
133	    HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
134	    HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
135	}
136	if (p->isa->dealloc)
137	    p->isa->dealloc(ptr);
138	free(p);
139    } else
140	heim_abort("over release");
141}
142
143/**
144 * If used require wrapped in autorelease pool
145 */
146
147heim_string_t
148heim_description(heim_object_t ptr)
149{
150    struct heim_base *p = PTR2BASE(ptr);
151    if (p->isa->desc == NULL)
152	return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
153    return heim_auto_release(p->isa->desc(ptr));
154}
155
156
157void
158_heim_make_permanent(heim_object_t ptr)
159{
160    struct heim_base *p = PTR2BASE(ptr);
161    p->ref_cnt = heim_base_atomic_max;
162}
163
164
165static heim_type_t tagged_isa[9] = {
166    &_heim_number_object,
167    &_heim_null_object,
168    &_heim_bool_object,
169
170    NULL,
171    NULL,
172    NULL,
173
174    NULL,
175    NULL,
176    NULL
177};
178
179heim_type_t
180_heim_get_isa(heim_object_t ptr)
181{
182    struct heim_base *p;
183    if (heim_base_is_tagged(ptr)) {
184	if (heim_base_is_tagged_object(ptr))
185	    return tagged_isa[heim_base_tagged_object_tid(ptr)];
186	heim_abort("not a supported tagged type");
187    }
188    p = PTR2BASE(ptr);
189    return p->isa;
190}
191
192/**
193 * Get type ID of object
194 *
195 * @param object object to get type id of
196 *
197 * @return type id of object
198 */
199
200heim_tid_t
201heim_get_tid(heim_object_t ptr)
202{
203    heim_type_t isa = _heim_get_isa(ptr);
204    return isa->tid;
205}
206
207/**
208 * Get hash value of object
209 *
210 * @param object object to get hash value for
211 *
212 * @return a hash value
213 */
214
215unsigned long
216heim_get_hash(heim_object_t ptr)
217{
218    heim_type_t isa = _heim_get_isa(ptr);
219    if (isa->hash)
220	return isa->hash(ptr);
221    return (unsigned long)ptr;
222}
223
224/**
225 * Compare two objects, returns 0 if equal, can use used for qsort()
226 * and friends.
227 *
228 * @param a first object to compare
229 * @param b first object to compare
230 *
231 * @return 0 if objects are equal
232 */
233
234int
235heim_cmp(heim_object_t a, heim_object_t b)
236{
237    heim_tid_t ta, tb;
238    heim_type_t isa;
239
240    ta = heim_get_tid(a);
241    tb = heim_get_tid(b);
242
243    if (ta != tb)
244	return ta - tb;
245
246    isa = _heim_get_isa(a);
247
248    if (isa->cmp)
249	return isa->cmp(a, b);
250
251    return (uintptr_t)a - (uintptr_t)b;
252}
253
254/*
255 * Private - allocates an memory object
256 */
257
258static void
259memory_dealloc(void *ptr)
260{
261    struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
262    if (p->dealloc)
263	p->dealloc(ptr);
264}
265
266struct heim_type_data memory_object = {
267    HEIM_TID_MEMORY,
268    "memory-object",
269    NULL,
270    memory_dealloc,
271    NULL,
272    NULL,
273    NULL,
274    NULL
275};
276
277/**
278 * Allocate memory for an object of anonymous type
279 *
280 * @param size size of object to be allocated
281 * @param name name of ad-hoc type
282 * @param dealloc destructor function
283 *
284 * Objects allocated with this interface do not serialize.
285 *
286 * @return allocated object
287 */
288
289void *
290heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
291{
292    /* XXX use posix_memalign */
293
294    struct heim_base_mem *p = calloc(1, size + sizeof(*p));
295    if (p == NULL)
296	return NULL;
297    p->isa = &memory_object;
298    p->ref_cnt = 1;
299    p->name = name;
300    p->dealloc = dealloc;
301    return BASE2PTR(p);
302}
303
304heim_type_t
305_heim_create_type(const char *name,
306		  heim_type_init init,
307		  heim_type_dealloc dealloc,
308		  heim_type_copy copy,
309		  heim_type_cmp cmp,
310		  heim_type_hash hash,
311		  heim_type_description desc)
312{
313    heim_type_t type;
314
315    type = calloc(1, sizeof(*type));
316    if (type == NULL)
317	return NULL;
318
319    type->tid = heim_base_atomic_inc(&tidglobal);
320    type->name = name;
321    type->init = init;
322    type->dealloc = dealloc;
323    type->copy = copy;
324    type->cmp = cmp;
325    type->hash = hash;
326    type->desc = desc;
327
328    return type;
329}
330
331heim_object_t
332_heim_alloc_object(heim_type_t type, size_t size)
333{
334    /* XXX should use posix_memalign */
335    struct heim_base *p = calloc(1, size + sizeof(*p));
336    if (p == NULL)
337	return NULL;
338    p->isa = type;
339    p->ref_cnt = 1;
340
341    return BASE2PTR(p);
342}
343
344void *
345_heim_get_isaextra(heim_object_t ptr, size_t idx)
346{
347    struct heim_base *p = (struct heim_base *)PTR2BASE(ptr);
348
349    heim_assert(ptr != NULL, "internal error");
350    if (p->isa == &memory_object)
351	return NULL;
352    heim_assert(idx < 3, "invalid private heim_base extra data index");
353    return &p->isaextra[idx];
354}
355
356heim_tid_t
357_heim_type_get_tid(heim_type_t type)
358{
359    return type->tid;
360}
361
362#if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
363static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
364static pthread_key_t once_arg_key;
365
366static void
367once_arg_key_once_init(void)
368{
369    errno = pthread_key_create(&once_arg_key, NULL);
370    if (errno != 0) {
371        fprintf(stderr,
372                "Error: pthread_key_create() failed, cannot continue: %s\n",
373                strerror(errno));
374        abort();
375    }
376}
377
378struct once_callback {
379    void (*fn)(void *);
380    void *data;
381};
382
383static void
384once_callback_caller(void)
385{
386    struct once_callback *once_callback = pthread_getspecific(once_arg_key);
387
388    if (once_callback == NULL) {
389        fprintf(stderr, "Error: pthread_once() calls callback on "
390                "different thread?!  Cannot continue.\n");
391        abort();
392    }
393    once_callback->fn(once_callback->data);
394}
395#endif
396
397/**
398 * Call func once and only once
399 *
400 * @param once pointer to a heim_base_once_t
401 * @param ctx context passed to func
402 * @param func function to be called
403 */
404
405void
406heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
407{
408#if defined(WIN32)
409    /*
410     * With a libroken wrapper for some CAS function and a libroken yield()
411     * wrapper we could make this the default implementation when we have
412     * neither Grand Central nor POSX threads.
413     *
414     * We could also adapt the double-checked lock pattern with CAS
415     * providing the necessary memory barriers in the absence of
416     * portable explicit memory barrier APIs.
417     */
418    /*
419     * We use CAS operations in large part to provide implied memory
420     * barriers.
421     *
422     * State 0 means that func() has never executed.
423     * State 1 means that func() is executing.
424     * State 2 means that func() has completed execution.
425     */
426    if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
427	/* State is now 1 */
428	(*func)(ctx);
429	(void)InterlockedExchange(once, 2L);
430	/* State is now 2 */
431    } else {
432	/*
433	 * The InterlockedCompareExchange is being used to fetch
434	 * the current state under a full memory barrier.  As long
435	 * as the current state is 1 continue to spin.
436	 */
437	while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
438	    SwitchToThread();
439    }
440#elif defined(HAVE_DISPATCH_DISPATCH_H)
441    dispatch_once_f(once, ctx, func);
442#elif defined(ENABLE_PTHREAD_SUPPORT)
443    struct once_callback once_callback;
444
445    once_callback.fn = func;
446    once_callback.data = ctx;
447
448    errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
449    if (errno != 0) {
450        fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
451                strerror(errno));
452        abort();
453    }
454    errno = pthread_setspecific(once_arg_key, &once_callback);
455    if (errno != 0) {
456        fprintf(stderr,
457                "Error: pthread_setspecific() failed, cannot continue: %s\n",
458                strerror(errno));
459        abort();
460    }
461    errno = pthread_once(once, once_callback_caller);
462    if (errno != 0) {
463        fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
464                strerror(errno));
465        abort();
466    }
467#else
468    static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
469    HEIMDAL_MUTEX_lock(&mutex);
470    if (*once == 0) {
471	*once = 1;
472	HEIMDAL_MUTEX_unlock(&mutex);
473	func(ctx);
474	HEIMDAL_MUTEX_lock(&mutex);
475	*once = 2;
476	HEIMDAL_MUTEX_unlock(&mutex);
477    } else if (*once == 2) {
478	HEIMDAL_MUTEX_unlock(&mutex);
479    } else {
480	HEIMDAL_MUTEX_unlock(&mutex);
481	while (1) {
482	    struct timeval tv = { 0, 1000 };
483	    select(0, NULL, NULL, NULL, &tv);
484	    HEIMDAL_MUTEX_lock(&mutex);
485	    if (*once == 2)
486		break;
487	    HEIMDAL_MUTEX_unlock(&mutex);
488	}
489	HEIMDAL_MUTEX_unlock(&mutex);
490    }
491#endif
492}
493
494/**
495 * Abort and log the failure (using syslog)
496 */
497
498void
499heim_abort(const char *fmt, ...)
500{
501    va_list ap;
502    va_start(ap, fmt);
503    heim_abortv(fmt, ap);
504    va_end(ap);
505}
506
507/**
508 * Abort and log the failure (using syslog)
509 */
510
511void
512heim_abortv(const char *fmt, va_list ap)
513{
514    static char str[1024];
515
516    vsnprintf(str, sizeof(str), fmt, ap);
517    syslog(LOG_ERR, "heim_abort: %s", str);
518    abort();
519}
520
521/*
522 *
523 */
524
525static int ar_created = 0;
526static HEIMDAL_thread_key ar_key;
527
528struct ar_tls {
529    struct heim_auto_release *head;
530    struct heim_auto_release *current;
531    HEIMDAL_MUTEX tls_mutex;
532};
533
534static void
535ar_tls_delete(void *ptr)
536{
537    struct ar_tls *tls = ptr;
538    heim_auto_release_t next = NULL;
539
540    if (tls == NULL)
541        return;
542    for (; tls->current != NULL; tls->current = next) {
543        next = tls->current->parent;
544        heim_release(tls->current);
545    }
546    free(tls);
547}
548
549static void
550init_ar_tls(void *ptr)
551{
552    int ret;
553    HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
554    if (ret == 0)
555	ar_created = 1;
556}
557
558static struct ar_tls *
559autorel_tls(void)
560{
561    static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
562    struct ar_tls *arp;
563    int ret;
564
565    heim_base_once_f(&once, NULL, init_ar_tls);
566    if (!ar_created)
567	return NULL;
568
569    arp = HEIMDAL_getspecific(ar_key);
570    if (arp == NULL) {
571
572	arp = calloc(1, sizeof(*arp));
573	if (arp == NULL)
574	    return NULL;
575	HEIMDAL_setspecific(ar_key, arp, ret);
576	if (ret) {
577	    free(arp);
578	    return NULL;
579	}
580    }
581    return arp;
582
583}
584
585static void
586autorel_dealloc(void *ptr)
587{
588    heim_auto_release_t ar = ptr;
589    struct ar_tls *tls;
590
591    tls = autorel_tls();
592    if (tls == NULL)
593	heim_abort("autorelease pool released on thread w/o autorelease inited");
594
595    heim_auto_release_drain(ar);
596
597    if (!HEIM_TAILQ_EMPTY(&ar->pool))
598	heim_abort("pool not empty after draining");
599
600    HEIMDAL_MUTEX_lock(&tls->tls_mutex);
601    if (tls->current != ptr)
602	heim_abort("autorelease not releaseing top pool");
603
604    tls->current = ar->parent;
605    HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
606}
607
608static int
609autorel_cmp(void *a, void *b)
610{
611    return (a == b);
612}
613
614static unsigned long
615autorel_hash(void *ptr)
616{
617    return (unsigned long)ptr;
618}
619
620
621static struct heim_type_data _heim_autorel_object = {
622    HEIM_TID_AUTORELEASE,
623    "autorelease-pool",
624    NULL,
625    autorel_dealloc,
626    NULL,
627    autorel_cmp,
628    autorel_hash,
629    NULL
630};
631
632/**
633 * Create thread-specific object auto-release pool
634 *
635 * Objects placed on the per-thread auto-release pool (with
636 * heim_auto_release()) can be released in one fell swoop by calling
637 * heim_auto_release_drain().
638 */
639
640heim_auto_release_t
641heim_auto_release_create(void)
642{
643    struct ar_tls *tls = autorel_tls();
644    heim_auto_release_t ar;
645
646    if (tls == NULL)
647	heim_abort("Failed to create/get autorelease head");
648
649    ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
650    if (ar) {
651	HEIMDAL_MUTEX_lock(&tls->tls_mutex);
652	if (tls->head == NULL)
653	    tls->head = ar;
654	ar->parent = tls->current;
655	tls->current = ar;
656	HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
657    }
658
659    return ar;
660}
661
662/**
663 * Place the current object on the thread's auto-release pool
664 *
665 * @param ptr object
666 */
667
668heim_object_t
669heim_auto_release(heim_object_t ptr)
670{
671    struct heim_base *p = PTR2BASE(ptr);
672    struct ar_tls *tls = autorel_tls();
673    heim_auto_release_t ar;
674
675    if (ptr == NULL || heim_base_is_tagged(ptr))
676	return ptr;
677
678    /* drop from old pool */
679    if ((ar = p->autorelpool) != NULL) {
680	HEIMDAL_MUTEX_lock(&ar->pool_mutex);
681	HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
682	p->autorelpool = NULL;
683	HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
684    }
685
686    if (tls == NULL || (ar = tls->current) == NULL)
687	heim_abort("no auto relase pool in place, would leak");
688
689    HEIMDAL_MUTEX_lock(&ar->pool_mutex);
690    HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
691    p->autorelpool = ar;
692    HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
693
694    return ptr;
695}
696
697/**
698 * Release all objects on the given auto-release pool
699 */
700
701void
702heim_auto_release_drain(heim_auto_release_t autorel)
703{
704    heim_object_t obj;
705
706    /* release all elements on the tail queue */
707
708    HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
709    while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
710	obj = HEIM_TAILQ_FIRST(&autorel->pool);
711	HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
712	heim_release(BASE2PTR(obj));
713	HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
714    }
715    HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
716}
717
718/*
719 * Helper for heim_path_vget() and heim_path_delete().  On success
720 * outputs the node named by the path and the parent node and key
721 * (useful for heim_path_delete()).
722 */
723
724static heim_object_t
725heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
726		heim_error_t *error, va_list ap)
727{
728    heim_object_t path_element;
729    heim_object_t node, next_node;
730    heim_tid_t node_type;
731
732    *parent = NULL;
733    *key = NULL;
734    if (ptr == NULL)
735	return NULL;
736
737    for (node = ptr; node != NULL; ) {
738	path_element = va_arg(ap, heim_object_t);
739	if (path_element == NULL) {
740	    *parent = node;
741	    *key = path_element;
742	    return node;
743	}
744
745	node_type = heim_get_tid(node);
746	switch (node_type) {
747	case HEIM_TID_ARRAY:
748	case HEIM_TID_DICT:
749	case HEIM_TID_DB:
750	    break;
751	default:
752	    if (node == ptr)
753		heim_abort("heim_path_get() only operates on container types");
754	    return NULL;
755	}
756
757	if (node_type == HEIM_TID_DICT) {
758	    next_node = heim_dict_get_value(node, path_element);
759	} else if (node_type == HEIM_TID_DB) {
760	    next_node = _heim_db_get_value(node, NULL, path_element, NULL);
761	} else if (node_type == HEIM_TID_ARRAY) {
762	    int idx = -1;
763
764	    if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
765		idx = heim_number_get_int(path_element);
766	    if (idx < 0) {
767		if (error)
768		    *error = heim_error_create(EINVAL,
769					       "heim_path_get() path elements "
770					       "for array nodes must be "
771					       "numeric and positive");
772		return NULL;
773	    }
774	    next_node = heim_array_get_value(node, idx);
775	} else {
776	    if (error)
777		*error = heim_error_create(EINVAL,
778					   "heim_path_get() node in path "
779					   "not a container type");
780	    return NULL;
781	}
782	node = next_node;
783    }
784    return NULL;
785}
786
787/**
788 * Get a node in a heim_object tree by path
789 *
790 * @param ptr tree
791 * @param error error (output)
792 * @param ap NULL-terminated va_list of heim_object_ts that form a path
793 *
794 * @return object (not retained) if found
795 *
796 * @addtogroup heimbase
797 */
798
799heim_object_t
800heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
801{
802    heim_object_t p, k;
803
804    return heim_path_vget2(ptr, &p, &k, error, ap);
805}
806
807/**
808 * Get a node in a tree by path, with retained reference
809 *
810 * @param ptr tree
811 * @param error error (output)
812 * @param ap NULL-terminated va_list of heim_object_ts that form a path
813 *
814 * @return retained object if found
815 *
816 * @addtogroup heimbase
817 */
818
819heim_object_t
820heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
821{
822    heim_object_t p, k;
823
824    return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
825}
826
827/**
828 * Get a node in a tree by path
829 *
830 * @param ptr tree
831 * @param error error (output)
832 * @param ... NULL-terminated va_list of heim_object_ts that form a path
833 *
834 * @return object (not retained) if found
835 *
836 * @addtogroup heimbase
837 */
838
839heim_object_t
840heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
841{
842    heim_object_t o;
843    heim_object_t p, k;
844    va_list ap;
845
846    if (ptr == NULL)
847	return NULL;
848
849    va_start(ap, error);
850    o = heim_path_vget2(ptr, &p, &k, error, ap);
851    va_end(ap);
852    return o;
853}
854
855/**
856 * Get a node in a tree by path, with retained reference
857 *
858 * @param ptr tree
859 * @param error error (output)
860 * @param ... NULL-terminated va_list of heim_object_ts that form a path
861 *
862 * @return retained object if found
863 *
864 * @addtogroup heimbase
865 */
866
867heim_object_t
868heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
869{
870    heim_object_t o;
871    heim_object_t p, k;
872    va_list ap;
873
874    if (ptr == NULL)
875	return NULL;
876
877    va_start(ap, error);
878    o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
879    va_end(ap);
880    return o;
881}
882
883/**
884 * Create a path in a heim_object_t tree
885 *
886 * @param ptr the tree
887 * @param size the size of the heim_dict_t nodes to be created
888 * @param leaf leaf node to be added, if any
889 * @param error error (output)
890 * @param ap NULL-terminated of path component objects
891 *
892 * Create a path of heim_dict_t interior nodes in a given heim_object_t
893 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
894 * then the leaf is not deleted).
895 *
896 * @return 0 on success, else a system error
897 *
898 * @addtogroup heimbase
899 */
900
901int
902heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
903		  heim_error_t *error, va_list ap)
904{
905    heim_object_t path_element = va_arg(ap, heim_object_t);
906    heim_object_t next_path_element = NULL;
907    heim_object_t node = ptr;
908    heim_object_t next_node = NULL;
909    heim_tid_t node_type;
910    int ret = 0;
911
912    if (ptr == NULL)
913	heim_abort("heim_path_vcreate() does not create root nodes");
914
915    while (path_element != NULL) {
916	next_path_element = va_arg(ap, heim_object_t);
917	node_type = heim_get_tid(node);
918
919	if (node_type == HEIM_TID_DICT) {
920	    next_node = heim_dict_get_value(node, path_element);
921	} else if (node_type == HEIM_TID_ARRAY) {
922	    int idx = -1;
923
924	    if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
925		idx = heim_number_get_int(path_element);
926	    if (idx < 0) {
927		if (error)
928		    *error = heim_error_create(EINVAL,
929					       "heim_path() path elements for "
930					       "array nodes must be numeric "
931					       "and positive");
932		return EINVAL;
933	    }
934	    if (idx < heim_array_get_length(node))
935		next_node = heim_array_get_value(node, idx);
936	    else
937		next_node = NULL;
938	} else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
939	    if (error)
940		*error = heim_error_create(EINVAL, "Interior node is a DB");
941	    return EINVAL;
942	}
943
944	if (next_path_element == NULL)
945	    break;
946
947	/* Create missing interior node */
948	if (next_node == NULL) {
949	    next_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
950	    if (next_node == NULL) {
951		ret = ENOMEM;
952		goto err;
953	    }
954
955	    if (node_type == HEIM_TID_DICT) {
956		ret = heim_dict_set_value(node, path_element, next_node);
957	    } else if (node_type == HEIM_TID_ARRAY &&
958		heim_number_get_int(path_element) <= heim_array_get_length(node)) {
959		ret = heim_array_insert_value(node,
960					      heim_number_get_int(path_element),
961					      next_node);
962	    } else {
963		ret = EINVAL;
964		if (error)
965		    *error = heim_error_create(ret, "Node in path not a "
966					       "container");
967	    }
968	    heim_release(next_node);
969	    if (ret)
970		goto err;
971	}
972
973	path_element = next_path_element;
974	node = next_node;
975	next_node = NULL;
976    }
977
978    if (path_element == NULL)
979	goto err;
980
981    /* Add the leaf */
982    if (leaf != NULL) {
983	if (node_type == HEIM_TID_DICT)
984	    ret = heim_dict_set_value(node, path_element, leaf);
985	else
986	    ret = heim_array_insert_value(node,
987					  heim_number_get_int(path_element),
988					  leaf);
989    }
990    return ret;
991
992err:
993    if (error && !*error) {
994	if (ret == ENOMEM)
995	    *error = heim_error_create_enomem();
996	else
997	    *error = heim_error_create(ret, "Could not set "
998				       "dict value");
999    }
1000    return ret;
1001}
1002
1003/**
1004 * Create a path in a heim_object_t tree
1005 *
1006 * @param ptr the tree
1007 * @param size the size of the heim_dict_t nodes to be created
1008 * @param leaf leaf node to be added, if any
1009 * @param error error (output)
1010 * @param ... NULL-terminated list of path component objects
1011 *
1012 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1013 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1014 * then the leaf is not deleted).
1015 *
1016 * @return 0 on success, else a system error
1017 *
1018 * @addtogroup heimbase
1019 */
1020
1021int
1022heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1023		 heim_error_t *error, ...)
1024{
1025    va_list ap;
1026    int ret;
1027
1028    va_start(ap, error);
1029    ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1030    va_end(ap);
1031    return ret;
1032}
1033
1034/**
1035 * Delete leaf node named by a path in a heim_object_t tree
1036 *
1037 * @param ptr the tree
1038 * @param error error (output)
1039 * @param ap NULL-terminated list of path component objects
1040 *
1041 * @addtogroup heimbase
1042 */
1043
1044void
1045heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1046{
1047    heim_object_t parent, key, child;
1048
1049    child = heim_path_vget2(ptr, &parent, &key, error, ap);
1050    if (child != NULL) {
1051	if (heim_get_tid(parent) == HEIM_TID_DICT)
1052	    heim_dict_delete_key(parent, key);
1053	else if (heim_get_tid(parent) == HEIM_TID_DB)
1054	    heim_db_delete_key(parent, NULL, key, error);
1055	else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1056	    heim_array_delete_value(parent, heim_number_get_int(key));
1057	heim_release(child);
1058    }
1059}
1060
1061/**
1062 * Delete leaf node named by a path in a heim_object_t tree
1063 *
1064 * @param ptr the tree
1065 * @param error error (output)
1066 * @param ap NULL-terminated list of path component objects
1067 *
1068 * @addtogroup heimbase
1069 */
1070
1071void
1072heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1073{
1074    va_list ap;
1075
1076    va_start(ap, error);
1077    heim_path_vdelete(ptr, error, ap);
1078    va_end(ap);
1079    return;
1080}
1081
1082