alloc.c revision 356345
1/*
2 * util/alloc.c - memory allocation service.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains memory allocation functions.
40 */
41
42#include "config.h"
43#include "util/alloc.h"
44#include "util/regional.h"
45#include "util/data/packed_rrset.h"
46#include "util/fptr_wlist.h"
47
48/** custom size of cached regional blocks */
49#define ALLOC_REG_SIZE	16384
50/** number of bits for ID part of uint64, rest for number of threads. */
51#define THRNUM_SHIFT	48	/* for 65k threads, 2^48 rrsets per thr. */
52
53/** setup new special type */
54static void
55alloc_setup_special(alloc_special_type* t)
56{
57	memset(t, 0, sizeof(*t));
58	lock_rw_init(&t->entry.lock);
59	t->entry.key = t;
60}
61
62/** prealloc some entries in the cache. To minimize contention.
63 * Result is 1 lock per alloc_max newly created entries.
64 * @param alloc: the structure to fill up.
65 */
66static void
67prealloc_setup(struct alloc_cache* alloc)
68{
69	alloc_special_type* p;
70	int i;
71	for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
72		if(!(p = (alloc_special_type*)malloc(
73			sizeof(alloc_special_type)))) {
74			log_err("prealloc: out of memory");
75			return;
76		}
77		alloc_setup_special(p);
78		alloc_set_special_next(p, alloc->quar);
79		alloc->quar = p;
80		alloc->num_quar++;
81	}
82}
83
84/** prealloc region blocks */
85static void
86prealloc_blocks(struct alloc_cache* alloc, size_t num)
87{
88	size_t i;
89	struct regional* r;
90	for(i=0; i<num; i++) {
91		r = regional_create_custom(ALLOC_REG_SIZE);
92		if(!r) {
93			log_err("prealloc blocks: out of memory");
94			return;
95		}
96		r->next = (char*)alloc->reg_list;
97		alloc->reg_list = r;
98		alloc->num_reg_blocks ++;
99	}
100}
101
102void
103alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
104	int thread_num)
105{
106	memset(alloc, 0, sizeof(*alloc));
107	alloc->super = super;
108	alloc->thread_num = thread_num;
109	alloc->next_id = (uint64_t)thread_num; 	/* in steps, so that type */
110	alloc->next_id <<= THRNUM_SHIFT; 	/* of *_id is used. */
111	alloc->last_id = 1; 			/* so no 64bit constants, */
112	alloc->last_id <<= THRNUM_SHIFT; 	/* or implicit 'int' ops. */
113	alloc->last_id -= 1; 			/* for compiler portability. */
114	alloc->last_id |= alloc->next_id;
115	alloc->next_id += 1;			/* because id=0 is special. */
116	alloc->max_reg_blocks = 100;
117	alloc->num_reg_blocks = 0;
118	alloc->reg_list = NULL;
119	alloc->cleanup = NULL;
120	alloc->cleanup_arg = NULL;
121	if(alloc->super)
122		prealloc_blocks(alloc, alloc->max_reg_blocks);
123	if(!alloc->super) {
124		lock_quick_init(&alloc->lock);
125		lock_protect(&alloc->lock, alloc, sizeof(*alloc));
126	}
127}
128
129/** free the special list */
130static void
131alloc_clear_special_list(struct alloc_cache* alloc)
132{
133	alloc_special_type* p, *np;
134	/* free */
135	p = alloc->quar;
136	while(p) {
137		np = alloc_special_next(p);
138		/* deinit special type */
139		lock_rw_destroy(&p->entry.lock);
140		free(p);
141		p = np;
142	}
143}
144
145void
146alloc_clear_special(struct alloc_cache* alloc)
147{
148	if(!alloc->super) {
149		lock_quick_lock(&alloc->lock);
150	}
151	alloc_clear_special_list(alloc);
152	alloc->quar = 0;
153	alloc->num_quar = 0;
154	if(!alloc->super) {
155		lock_quick_unlock(&alloc->lock);
156	}
157}
158
159void
160alloc_clear(struct alloc_cache* alloc)
161{
162	alloc_special_type* p;
163	struct regional* r, *nr;
164	if(!alloc)
165		return;
166	if(!alloc->super) {
167		lock_quick_destroy(&alloc->lock);
168	}
169	if(alloc->super && alloc->quar) {
170		/* push entire list into super */
171		p = alloc->quar;
172		while(alloc_special_next(p)) /* find last */
173			p = alloc_special_next(p);
174		lock_quick_lock(&alloc->super->lock);
175		alloc_set_special_next(p, alloc->super->quar);
176		alloc->super->quar = alloc->quar;
177		alloc->super->num_quar += alloc->num_quar;
178		lock_quick_unlock(&alloc->super->lock);
179	} else {
180		alloc_clear_special_list(alloc);
181	}
182	alloc->quar = 0;
183	alloc->num_quar = 0;
184	r = alloc->reg_list;
185	while(r) {
186		nr = (struct regional*)r->next;
187		free(r);
188		r = nr;
189	}
190	alloc->reg_list = NULL;
191	alloc->num_reg_blocks = 0;
192}
193
194uint64_t
195alloc_get_id(struct alloc_cache* alloc)
196{
197	uint64_t id = alloc->next_id++;
198	if(id == alloc->last_id) {
199		log_warn("rrset alloc: out of 64bit ids. Clearing cache.");
200		fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup));
201		(*alloc->cleanup)(alloc->cleanup_arg);
202
203		/* start back at first number */   	/* like in alloc_init*/
204		alloc->next_id = (uint64_t)alloc->thread_num;
205		alloc->next_id <<= THRNUM_SHIFT; 	/* in steps for comp. */
206		alloc->next_id += 1;			/* portability. */
207		/* and generate new and safe id */
208		id = alloc->next_id++;
209	}
210	return id;
211}
212
213alloc_special_type*
214alloc_special_obtain(struct alloc_cache* alloc)
215{
216	alloc_special_type* p;
217	log_assert(alloc);
218	/* see if in local cache */
219	if(alloc->quar) {
220		p = alloc->quar;
221		alloc->quar = alloc_special_next(p);
222		alloc->num_quar--;
223		p->id = alloc_get_id(alloc);
224		return p;
225	}
226	/* see if in global cache */
227	if(alloc->super) {
228		/* could maybe grab alloc_max/2 entries in one go,
229		 * but really, isn't that just as fast as this code? */
230		lock_quick_lock(&alloc->super->lock);
231		if((p = alloc->super->quar)) {
232			alloc->super->quar = alloc_special_next(p);
233			alloc->super->num_quar--;
234		}
235		lock_quick_unlock(&alloc->super->lock);
236		if(p) {
237			p->id = alloc_get_id(alloc);
238			return p;
239		}
240	}
241	/* allocate new */
242	prealloc_setup(alloc);
243	if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) {
244		log_err("alloc_special_obtain: out of memory");
245		return NULL;
246	}
247	alloc_setup_special(p);
248	p->id = alloc_get_id(alloc);
249	return p;
250}
251
252/** push mem and some more items to the super */
253static void
254pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem)
255{
256	int i;
257	alloc_special_type *p = alloc->quar;
258	log_assert(p);
259	log_assert(alloc && alloc->super &&
260		alloc->num_quar >= ALLOC_SPECIAL_MAX);
261	/* push ALLOC_SPECIAL_MAX/2 after mem */
262	alloc_set_special_next(mem, alloc->quar);
263	for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) {
264		p = alloc_special_next(p);
265	}
266	alloc->quar = alloc_special_next(p);
267	alloc->num_quar -= ALLOC_SPECIAL_MAX/2;
268
269	/* dump mem+list into the super quar list */
270	lock_quick_lock(&alloc->super->lock);
271	alloc_set_special_next(p, alloc->super->quar);
272	alloc->super->quar = mem;
273	alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1;
274	lock_quick_unlock(&alloc->super->lock);
275	/* so 1 lock per mem+alloc/2 deletes */
276}
277
278void
279alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem)
280{
281	log_assert(alloc);
282	if(!mem)
283		return;
284	if(!alloc->super) {
285		lock_quick_lock(&alloc->lock); /* superalloc needs locking */
286	}
287
288	alloc_special_clean(mem);
289	if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) {
290		/* push it to the super structure */
291		pushintosuper(alloc, mem);
292		return;
293	}
294
295	alloc_set_special_next(mem, alloc->quar);
296	alloc->quar = mem;
297	alloc->num_quar++;
298	if(!alloc->super) {
299		lock_quick_unlock(&alloc->lock);
300	}
301}
302
303void
304alloc_stats(struct alloc_cache* alloc)
305{
306	log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup",
307		(int)alloc->num_quar, (int)alloc->num_reg_blocks);
308}
309
310size_t alloc_get_mem(struct alloc_cache* alloc)
311{
312	alloc_special_type* p;
313	size_t s = sizeof(*alloc);
314	if(!alloc->super) {
315		lock_quick_lock(&alloc->lock); /* superalloc needs locking */
316	}
317	s += sizeof(alloc_special_type) * alloc->num_quar;
318	for(p = alloc->quar; p; p = alloc_special_next(p)) {
319		s += lock_get_mem(&p->entry.lock);
320	}
321	s += alloc->num_reg_blocks * ALLOC_REG_SIZE;
322	if(!alloc->super) {
323		lock_quick_unlock(&alloc->lock);
324	}
325	return s;
326}
327
328struct regional*
329alloc_reg_obtain(struct alloc_cache* alloc)
330{
331	if(alloc->num_reg_blocks > 0) {
332		struct regional* r = alloc->reg_list;
333		alloc->reg_list = (struct regional*)r->next;
334		r->next = NULL;
335		alloc->num_reg_blocks--;
336		return r;
337	}
338	return regional_create_custom(ALLOC_REG_SIZE);
339}
340
341void
342alloc_reg_release(struct alloc_cache* alloc, struct regional* r)
343{
344	if(alloc->num_reg_blocks >= alloc->max_reg_blocks) {
345		regional_destroy(r);
346		return;
347	}
348	if(!r) return;
349	regional_free_all(r);
350	log_assert(r->next == NULL);
351	r->next = (char*)alloc->reg_list;
352	alloc->reg_list = r;
353	alloc->num_reg_blocks++;
354}
355
356void
357alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*),
358        void* arg)
359{
360	alloc->cleanup = cleanup;
361	alloc->cleanup_arg = arg;
362}
363
364/** global debug value to keep track of total memory mallocs */
365size_t unbound_mem_alloc = 0;
366/** global debug value to keep track of total memory frees */
367size_t unbound_mem_freed = 0;
368#ifdef UNBOUND_ALLOC_STATS
369/** special value to know if the memory is being tracked */
370uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL;
371#ifdef malloc
372#undef malloc
373#endif
374/** malloc with stats */
375void *unbound_stat_malloc(size_t size)
376{
377	void* res;
378	if(size == 0) size = 1;
379	log_assert(size <= SIZE_MAX-16);
380	res = malloc(size+16);
381	if(!res) return NULL;
382	unbound_mem_alloc += size;
383	log_info("stat %p=malloc(%u)", res+16, (unsigned)size);
384	memcpy(res, &size, sizeof(size));
385	memcpy(res+8, &mem_special, sizeof(mem_special));
386	return res+16;
387}
388#ifdef calloc
389#undef calloc
390#endif
391#ifndef INT_MAX
392#define INT_MAX (((int)-1)>>1)
393#endif
394/** calloc with stats */
395void *unbound_stat_calloc(size_t nmemb, size_t size)
396{
397	size_t s;
398	void* res;
399	if(nmemb != 0 && INT_MAX/nmemb < size)
400		return NULL; /* integer overflow check */
401	s = (nmemb*size==0)?(size_t)1:nmemb*size;
402	log_assert(s <= SIZE_MAX-16);
403	res = calloc(1, s+16);
404	if(!res) return NULL;
405	log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
406	unbound_mem_alloc += s;
407	memcpy(res, &s, sizeof(s));
408	memcpy(res+8, &mem_special, sizeof(mem_special));
409	return res+16;
410}
411#ifdef free
412#undef free
413#endif
414/** free with stats */
415void unbound_stat_free(void *ptr)
416{
417	size_t s;
418	if(!ptr) return;
419	if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
420		free(ptr);
421		return;
422	}
423	ptr-=16;
424	memcpy(&s, ptr, sizeof(s));
425	log_info("stat free(%p) size %u", ptr+16, (unsigned)s);
426	memset(ptr+8, 0, 8);
427	unbound_mem_freed += s;
428	free(ptr);
429}
430#ifdef realloc
431#undef realloc
432#endif
433/** realloc with stats */
434void *unbound_stat_realloc(void *ptr, size_t size)
435{
436	size_t cursz;
437	void* res;
438	if(!ptr) return unbound_stat_malloc(size);
439	if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
440		return realloc(ptr, size);
441	}
442	if(size==0) {
443		unbound_stat_free(ptr);
444		return NULL;
445	}
446	ptr -= 16;
447	memcpy(&cursz, ptr, sizeof(cursz));
448	if(cursz == size) {
449		/* nothing changes */
450		return ptr;
451	}
452	log_assert(size <= SIZE_MAX-16);
453	res = malloc(size+16);
454	if(!res) return NULL;
455	unbound_mem_alloc += size;
456	unbound_mem_freed += cursz;
457	log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz);
458	if(cursz > size) {
459		memcpy(res+16, ptr+16, size);
460	} else if(size > cursz) {
461		memcpy(res+16, ptr+16, cursz);
462	}
463	memset(ptr+8, 0, 8);
464	free(ptr);
465	memcpy(res, &size, sizeof(size));
466	memcpy(res+8, &mem_special, sizeof(mem_special));
467	return res+16;
468}
469
470/** log to file where alloc was done */
471void *unbound_stat_malloc_log(size_t size, const char* file, int line,
472        const char* func)
473{
474	log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size);
475	return unbound_stat_malloc(size);
476}
477
478/** log to file where alloc was done */
479void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file,
480        int line, const char* func)
481{
482	log_info("%s:%d %s calloc(%u, %u)", file, line, func,
483		(unsigned) nmemb, (unsigned)size);
484	return unbound_stat_calloc(nmemb, size);
485}
486
487/** log to file where free was done */
488void unbound_stat_free_log(void *ptr, const char* file, int line,
489        const char* func)
490{
491	if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) {
492		size_t s;
493		memcpy(&s, ptr-16, sizeof(s));
494		log_info("%s:%d %s free(%p) size %u",
495			file, line, func, ptr, (unsigned)s);
496	} else
497		log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr);
498	unbound_stat_free(ptr);
499}
500
501/** log to file where alloc was done */
502void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file,
503        int line, const char* func)
504{
505	log_info("%s:%d %s realloc(%p, %u)", file, line, func,
506		ptr, (unsigned)size);
507	return unbound_stat_realloc(ptr, size);
508}
509
510#endif /* UNBOUND_ALLOC_STATS */
511#ifdef UNBOUND_ALLOC_LITE
512#undef malloc
513#undef calloc
514#undef free
515#undef realloc
516/** length of prefix and suffix */
517static size_t lite_pad = 16;
518/** prefix value to check */
519static char* lite_pre = "checkfront123456";
520/** suffix value to check */
521static char* lite_post= "checkafter123456";
522
523void *unbound_stat_malloc_lite(size_t size, const char* file, int line,
524        const char* func)
525{
526	/*  [prefix .. len .. actual data .. suffix] */
527	void* res;
528	log_assert(size <= SIZE_MAX-(lite_pad*2+sizeof(size_t)));
529	res = malloc(size+lite_pad*2+sizeof(size_t));
530	if(!res) return NULL;
531	memmove(res, lite_pre, lite_pad);
532	memmove(res+lite_pad, &size, sizeof(size_t));
533	memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */
534	memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad);
535	return res+lite_pad+sizeof(size_t);
536}
537
538void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
539        int line, const char* func)
540{
541	size_t req;
542	void* res;
543	if(nmemb != 0 && INT_MAX/nmemb < size)
544		return NULL; /* integer overflow check */
545	req = nmemb * size;
546	log_assert(req <= SIZE_MAX-(lite_pad*2+sizeof(size_t)));
547	res = malloc(req+lite_pad*2+sizeof(size_t));
548	if(!res) return NULL;
549	memmove(res, lite_pre, lite_pad);
550	memmove(res+lite_pad, &req, sizeof(size_t));
551	memset(res+lite_pad+sizeof(size_t), 0, req);
552	memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad);
553	return res+lite_pad+sizeof(size_t);
554}
555
556void unbound_stat_free_lite(void *ptr, const char* file, int line,
557        const char* func)
558{
559	void* real;
560	size_t orig = 0;
561	if(!ptr) return;
562	real = ptr-lite_pad-sizeof(size_t);
563	if(memcmp(real, lite_pre, lite_pad) != 0) {
564		log_err("free(): prefix failed %s:%d %s", file, line, func);
565		log_hex("prefix here", real, lite_pad);
566		log_hex("  should be", lite_pre, lite_pad);
567		fatal_exit("alloc assertion failed");
568	}
569	memmove(&orig, real+lite_pad, sizeof(size_t));
570	if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
571		log_err("free(): suffix failed %s:%d %s", file, line, func);
572		log_err("alloc size is %d", (int)orig);
573		log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
574			lite_pad);
575		log_hex("  should be", lite_post, lite_pad);
576		fatal_exit("alloc assertion failed");
577	}
578	memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
579	free(real);
580}
581
582void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file,
583        int line, const char* func)
584{
585	/* always free and realloc (no growing) */
586	void* real, *newa;
587	size_t orig = 0;
588	if(!ptr) {
589		/* like malloc() */
590		return unbound_stat_malloc_lite(size, file, line, func);
591	}
592	if(!size) {
593		/* like free() */
594		unbound_stat_free_lite(ptr, file, line, func);
595		return NULL;
596	}
597	/* change allocation size and copy */
598	real = ptr-lite_pad-sizeof(size_t);
599	if(memcmp(real, lite_pre, lite_pad) != 0) {
600		log_err("realloc(): prefix failed %s:%d %s", file, line, func);
601		log_hex("prefix here", real, lite_pad);
602		log_hex("  should be", lite_pre, lite_pad);
603		fatal_exit("alloc assertion failed");
604	}
605	memmove(&orig, real+lite_pad, sizeof(size_t));
606	if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
607		log_err("realloc(): suffix failed %s:%d %s", file, line, func);
608		log_err("alloc size is %d", (int)orig);
609		log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
610			lite_pad);
611		log_hex("  should be", lite_post, lite_pad);
612		fatal_exit("alloc assertion failed");
613	}
614	/* new alloc and copy over */
615	newa = unbound_stat_malloc_lite(size, file, line, func);
616	if(!newa)
617		return NULL;
618	if(orig < size)
619		memmove(newa, ptr, orig);
620	else	memmove(newa, ptr, size);
621	memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
622	free(real);
623	return newa;
624}
625
626char* unbound_strdup_lite(const char* s, const char* file, int line,
627        const char* func)
628{
629	/* this routine is made to make sure strdup() uses the malloc_lite */
630	size_t l = strlen(s)+1;
631	char* n = (char*)unbound_stat_malloc_lite(l, file, line, func);
632	if(!n) return NULL;
633	memmove(n, s, l);
634	return n;
635}
636
637char* unbound_lite_wrapstr(char* s)
638{
639	char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__);
640	free(s);
641	return n;
642}
643
644#undef sldns_pkt2wire
645sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p,
646	size_t *size)
647{
648	uint8_t* md = NULL;
649	size_t ms = 0;
650	sldns_status s = sldns_pkt2wire(&md, p, &ms);
651	if(md) {
652		*dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__,
653			__func__);
654		*size = ms;
655		if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; }
656		memcpy(*dest, md, ms);
657		free(md);
658	} else {
659		*dest = NULL;
660		*size = 0;
661	}
662	return s;
663}
664
665#undef i2d_DSA_SIG
666int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig)
667{
668	unsigned char* n = NULL;
669	int r= i2d_DSA_SIG(dsasig, &n);
670	if(n) {
671		*sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__,
672			__func__);
673		if(!*sig) return -1;
674		memcpy(*sig, n, (size_t)r);
675		free(n);
676		return r;
677	}
678	*sig = NULL;
679	return r;
680}
681
682#endif /* UNBOUND_ALLOC_LITE */
683