alloc.c revision 1.2
1/*
2 * util/alloc.c - memory allocation service.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains memory allocation functions.
40 */
41
42#include "config.h"
43#include "util/alloc.h"
44#include "util/regional.h"
45#include "util/data/packed_rrset.h"
46#include "util/fptr_wlist.h"
47
48/** custom size of cached regional blocks */
49#define ALLOC_REG_SIZE	16384
50/** number of bits for ID part of uint64, rest for number of threads. */
51#define THRNUM_SHIFT	48	/* for 65k threads, 2^48 rrsets per thr. */
52
53/** setup new special type */
54static void
55alloc_setup_special(alloc_special_type* t)
56{
57	memset(t, 0, sizeof(*t));
58	lock_rw_init(&t->entry.lock);
59	t->entry.key = t;
60}
61
62/** prealloc some entries in the cache. To minimize contention.
63 * Result is 1 lock per alloc_max newly created entries.
64 * @param alloc: the structure to fill up.
65 */
66static void
67prealloc_setup(struct alloc_cache* alloc)
68{
69	alloc_special_type* p;
70	int i;
71	for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
72		if(!(p = (alloc_special_type*)malloc(
73			sizeof(alloc_special_type)))) {
74			log_err("prealloc: out of memory");
75			return;
76		}
77		alloc_setup_special(p);
78		alloc_set_special_next(p, alloc->quar);
79		alloc->quar = p;
80		alloc->num_quar++;
81	}
82}
83
84/** prealloc region blocks */
85static void
86prealloc_blocks(struct alloc_cache* alloc, size_t num)
87{
88	size_t i;
89	struct regional* r;
90	for(i=0; i<num; i++) {
91		r = regional_create_custom(ALLOC_REG_SIZE);
92		if(!r) {
93			log_err("prealloc blocks: out of memory");
94			return;
95		}
96		r->next = (char*)alloc->reg_list;
97		alloc->reg_list = r;
98		alloc->num_reg_blocks ++;
99	}
100}
101
102void
103alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
104	int thread_num)
105{
106	memset(alloc, 0, sizeof(*alloc));
107	alloc->super = super;
108	alloc->thread_num = thread_num;
109	alloc->next_id = (uint64_t)thread_num; 	/* in steps, so that type */
110	alloc->next_id <<= THRNUM_SHIFT; 	/* of *_id is used. */
111	alloc->last_id = 1; 			/* so no 64bit constants, */
112	alloc->last_id <<= THRNUM_SHIFT; 	/* or implicit 'int' ops. */
113	alloc->last_id -= 1; 			/* for compiler portability. */
114	alloc->last_id |= alloc->next_id;
115	alloc->next_id += 1;			/* because id=0 is special. */
116	alloc->max_reg_blocks = 100;
117	alloc->num_reg_blocks = 0;
118	alloc->reg_list = NULL;
119	alloc->cleanup = NULL;
120	alloc->cleanup_arg = NULL;
121	if(alloc->super)
122		prealloc_blocks(alloc, alloc->max_reg_blocks);
123	if(!alloc->super) {
124		lock_quick_init(&alloc->lock);
125		lock_protect(&alloc->lock, alloc, sizeof(*alloc));
126	}
127}
128
129void
130alloc_clear(struct alloc_cache* alloc)
131{
132	alloc_special_type* p, *np;
133	struct regional* r, *nr;
134	if(!alloc)
135		return;
136	if(!alloc->super) {
137		lock_quick_destroy(&alloc->lock);
138	}
139	if(alloc->super && alloc->quar) {
140		/* push entire list into super */
141		p = alloc->quar;
142		while(alloc_special_next(p)) /* find last */
143			p = alloc_special_next(p);
144		lock_quick_lock(&alloc->super->lock);
145		alloc_set_special_next(p, alloc->super->quar);
146		alloc->super->quar = alloc->quar;
147		alloc->super->num_quar += alloc->num_quar;
148		lock_quick_unlock(&alloc->super->lock);
149	} else {
150		/* free */
151		p = alloc->quar;
152		while(p) {
153			np = alloc_special_next(p);
154			/* deinit special type */
155			lock_rw_destroy(&p->entry.lock);
156			free(p);
157			p = np;
158		}
159	}
160	alloc->quar = 0;
161	alloc->num_quar = 0;
162	r = alloc->reg_list;
163	while(r) {
164		nr = (struct regional*)r->next;
165		free(r);
166		r = nr;
167	}
168	alloc->reg_list = NULL;
169	alloc->num_reg_blocks = 0;
170}
171
172uint64_t
173alloc_get_id(struct alloc_cache* alloc)
174{
175	uint64_t id = alloc->next_id++;
176	if(id == alloc->last_id) {
177		log_warn("rrset alloc: out of 64bit ids. Clearing cache.");
178		fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup));
179		(*alloc->cleanup)(alloc->cleanup_arg);
180
181		/* start back at first number */   	/* like in alloc_init*/
182		alloc->next_id = (uint64_t)alloc->thread_num;
183		alloc->next_id <<= THRNUM_SHIFT; 	/* in steps for comp. */
184		alloc->next_id += 1;			/* portability. */
185		/* and generate new and safe id */
186		id = alloc->next_id++;
187	}
188	return id;
189}
190
191alloc_special_type*
192alloc_special_obtain(struct alloc_cache* alloc)
193{
194	alloc_special_type* p;
195	log_assert(alloc);
196	/* see if in local cache */
197	if(alloc->quar) {
198		p = alloc->quar;
199		alloc->quar = alloc_special_next(p);
200		alloc->num_quar--;
201		p->id = alloc_get_id(alloc);
202		return p;
203	}
204	/* see if in global cache */
205	if(alloc->super) {
206		/* could maybe grab alloc_max/2 entries in one go,
207		 * but really, isn't that just as fast as this code? */
208		lock_quick_lock(&alloc->super->lock);
209		if((p = alloc->super->quar)) {
210			alloc->super->quar = alloc_special_next(p);
211			alloc->super->num_quar--;
212		}
213		lock_quick_unlock(&alloc->super->lock);
214		if(p) {
215			p->id = alloc_get_id(alloc);
216			return p;
217		}
218	}
219	/* allocate new */
220	prealloc_setup(alloc);
221	if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) {
222		log_err("alloc_special_obtain: out of memory");
223		return NULL;
224	}
225	alloc_setup_special(p);
226	p->id = alloc_get_id(alloc);
227	return p;
228}
229
230/** push mem and some more items to the super */
231static void
232pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem)
233{
234	int i;
235	alloc_special_type *p = alloc->quar;
236	log_assert(p);
237	log_assert(alloc && alloc->super &&
238		alloc->num_quar >= ALLOC_SPECIAL_MAX);
239	/* push ALLOC_SPECIAL_MAX/2 after mem */
240	alloc_set_special_next(mem, alloc->quar);
241	for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) {
242		p = alloc_special_next(p);
243	}
244	alloc->quar = alloc_special_next(p);
245	alloc->num_quar -= ALLOC_SPECIAL_MAX/2;
246
247	/* dump mem+list into the super quar list */
248	lock_quick_lock(&alloc->super->lock);
249	alloc_set_special_next(p, alloc->super->quar);
250	alloc->super->quar = mem;
251	alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1;
252	lock_quick_unlock(&alloc->super->lock);
253	/* so 1 lock per mem+alloc/2 deletes */
254}
255
256void
257alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem)
258{
259	log_assert(alloc);
260	if(!mem)
261		return;
262	if(!alloc->super) {
263		lock_quick_lock(&alloc->lock); /* superalloc needs locking */
264	}
265
266	alloc_special_clean(mem);
267	if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) {
268		/* push it to the super structure */
269		pushintosuper(alloc, mem);
270		return;
271	}
272
273	alloc_set_special_next(mem, alloc->quar);
274	alloc->quar = mem;
275	alloc->num_quar++;
276	if(!alloc->super) {
277		lock_quick_unlock(&alloc->lock);
278	}
279}
280
281void
282alloc_stats(struct alloc_cache* alloc)
283{
284	log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup",
285		(int)alloc->num_quar, (int)alloc->num_reg_blocks);
286}
287
288size_t alloc_get_mem(struct alloc_cache* alloc)
289{
290	alloc_special_type* p;
291	size_t s = sizeof(*alloc);
292	if(!alloc->super) {
293		lock_quick_lock(&alloc->lock); /* superalloc needs locking */
294	}
295	s += sizeof(alloc_special_type) * alloc->num_quar;
296	for(p = alloc->quar; p; p = alloc_special_next(p)) {
297		s += lock_get_mem(&p->entry.lock);
298	}
299	s += alloc->num_reg_blocks * ALLOC_REG_SIZE;
300	if(!alloc->super) {
301		lock_quick_unlock(&alloc->lock);
302	}
303	return s;
304}
305
306struct regional*
307alloc_reg_obtain(struct alloc_cache* alloc)
308{
309	if(alloc->num_reg_blocks > 0) {
310		struct regional* r = alloc->reg_list;
311		alloc->reg_list = (struct regional*)r->next;
312		r->next = NULL;
313		alloc->num_reg_blocks--;
314		return r;
315	}
316	return regional_create_custom(ALLOC_REG_SIZE);
317}
318
319void
320alloc_reg_release(struct alloc_cache* alloc, struct regional* r)
321{
322	if(alloc->num_reg_blocks >= alloc->max_reg_blocks) {
323		regional_destroy(r);
324		return;
325	}
326	if(!r) return;
327	regional_free_all(r);
328	log_assert(r->next == NULL);
329	r->next = (char*)alloc->reg_list;
330	alloc->reg_list = r;
331	alloc->num_reg_blocks++;
332}
333
334void
335alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*),
336        void* arg)
337{
338	alloc->cleanup = cleanup;
339	alloc->cleanup_arg = arg;
340}
341
342/** global debug value to keep track of total memory mallocs */
343size_t unbound_mem_alloc = 0;
344/** global debug value to keep track of total memory frees */
345size_t unbound_mem_freed = 0;
346#ifdef UNBOUND_ALLOC_STATS
347/** special value to know if the memory is being tracked */
348uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL;
349#ifdef malloc
350#undef malloc
351#endif
352/** malloc with stats */
353void *unbound_stat_malloc(size_t size)
354{
355	void* res;
356	if(size == 0) size = 1;
357	res = malloc(size+16);
358	if(!res) return NULL;
359	unbound_mem_alloc += size;
360	log_info("stat %p=malloc(%u)", res+16, (unsigned)size);
361	memcpy(res, &size, sizeof(size));
362	memcpy(res+8, &mem_special, sizeof(mem_special));
363	return res+16;
364}
365#ifdef calloc
366#undef calloc
367#endif
368#ifndef INT_MAX
369#define INT_MAX (((int)-1)>>1)
370#endif
371/** calloc with stats */
372void *unbound_stat_calloc(size_t nmemb, size_t size)
373{
374	size_t s;
375	void* res;
376	if(nmemb != 0 && INT_MAX/nmemb < size)
377		return NULL; /* integer overflow check */
378	s = (nmemb*size==0)?(size_t)1:nmemb*size;
379	res = calloc(1, s+16);
380	if(!res) return NULL;
381	log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
382	unbound_mem_alloc += s;
383	memcpy(res, &s, sizeof(s));
384	memcpy(res+8, &mem_special, sizeof(mem_special));
385	return res+16;
386}
387#ifdef free
388#undef free
389#endif
390/** free with stats */
391void unbound_stat_free(void *ptr)
392{
393	size_t s;
394	if(!ptr) return;
395	if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
396		free(ptr);
397		return;
398	}
399	ptr-=16;
400	memcpy(&s, ptr, sizeof(s));
401	log_info("stat free(%p) size %u", ptr+16, (unsigned)s);
402	memset(ptr+8, 0, 8);
403	unbound_mem_freed += s;
404	free(ptr);
405}
406#ifdef realloc
407#undef realloc
408#endif
409/** realloc with stats */
410void *unbound_stat_realloc(void *ptr, size_t size)
411{
412	size_t cursz;
413	void* res;
414	if(!ptr) return unbound_stat_malloc(size);
415	if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
416		return realloc(ptr, size);
417	}
418	if(size==0) {
419		unbound_stat_free(ptr);
420		return NULL;
421	}
422	ptr -= 16;
423	memcpy(&cursz, ptr, sizeof(cursz));
424	if(cursz == size) {
425		/* nothing changes */
426		return ptr;
427	}
428	res = malloc(size+16);
429	if(!res) return NULL;
430	unbound_mem_alloc += size;
431	unbound_mem_freed += cursz;
432	log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz);
433	if(cursz > size) {
434		memcpy(res+16, ptr+16, size);
435	} else if(size > cursz) {
436		memcpy(res+16, ptr+16, cursz);
437	}
438	memset(ptr+8, 0, 8);
439	free(ptr);
440	memcpy(res, &size, sizeof(size));
441	memcpy(res+8, &mem_special, sizeof(mem_special));
442	return res+16;
443}
444
445/** log to file where alloc was done */
446void *unbound_stat_malloc_log(size_t size, const char* file, int line,
447        const char* func)
448{
449	log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size);
450	return unbound_stat_malloc(size);
451}
452
453/** log to file where alloc was done */
454void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file,
455        int line, const char* func)
456{
457	log_info("%s:%d %s calloc(%u, %u)", file, line, func,
458		(unsigned) nmemb, (unsigned)size);
459	return unbound_stat_calloc(nmemb, size);
460}
461
462/** log to file where free was done */
463void unbound_stat_free_log(void *ptr, const char* file, int line,
464        const char* func)
465{
466	if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) {
467		size_t s;
468		memcpy(&s, ptr-16, sizeof(s));
469		log_info("%s:%d %s free(%p) size %u",
470			file, line, func, ptr, (unsigned)s);
471	} else
472		log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr);
473	unbound_stat_free(ptr);
474}
475
476/** log to file where alloc was done */
477void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file,
478        int line, const char* func)
479{
480	log_info("%s:%d %s realloc(%p, %u)", file, line, func,
481		ptr, (unsigned)size);
482	return unbound_stat_realloc(ptr, size);
483}
484
485#endif /* UNBOUND_ALLOC_STATS */
486#ifdef UNBOUND_ALLOC_LITE
487#undef malloc
488#undef calloc
489#undef free
490#undef realloc
491/** length of prefix and suffix */
492static size_t lite_pad = 16;
493/** prefix value to check */
494static char* lite_pre = "checkfront123456";
495/** suffix value to check */
496static char* lite_post= "checkafter123456";
497
498void *unbound_stat_malloc_lite(size_t size, const char* file, int line,
499        const char* func)
500{
501	/*  [prefix .. len .. actual data .. suffix] */
502	void* res = malloc(size+lite_pad*2+sizeof(size_t));
503	if(!res) return NULL;
504	memmove(res, lite_pre, lite_pad);
505	memmove(res+lite_pad, &size, sizeof(size_t));
506	memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */
507	memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad);
508	return res+lite_pad+sizeof(size_t);
509}
510
511void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
512        int line, const char* func)
513{
514	size_t req;
515	void* res;
516	if(nmemb != 0 && INT_MAX/nmemb < size)
517		return NULL; /* integer overflow check */
518	req = nmemb * size;
519	res = malloc(req+lite_pad*2+sizeof(size_t));
520	if(!res) return NULL;
521	memmove(res, lite_pre, lite_pad);
522	memmove(res+lite_pad, &req, sizeof(size_t));
523	memset(res+lite_pad+sizeof(size_t), 0, req);
524	memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad);
525	return res+lite_pad+sizeof(size_t);
526}
527
528void unbound_stat_free_lite(void *ptr, const char* file, int line,
529        const char* func)
530{
531	void* real;
532	size_t orig = 0;
533	if(!ptr) return;
534	real = ptr-lite_pad-sizeof(size_t);
535	if(memcmp(real, lite_pre, lite_pad) != 0) {
536		log_err("free(): prefix failed %s:%d %s", file, line, func);
537		log_hex("prefix here", real, lite_pad);
538		log_hex("  should be", lite_pre, lite_pad);
539		fatal_exit("alloc assertion failed");
540	}
541	memmove(&orig, real+lite_pad, sizeof(size_t));
542	if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
543		log_err("free(): suffix failed %s:%d %s", file, line, func);
544		log_err("alloc size is %d", (int)orig);
545		log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
546			lite_pad);
547		log_hex("  should be", lite_post, lite_pad);
548		fatal_exit("alloc assertion failed");
549	}
550	memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
551	free(real);
552}
553
554void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file,
555        int line, const char* func)
556{
557	/* always free and realloc (no growing) */
558	void* real, *newa;
559	size_t orig = 0;
560	if(!ptr) {
561		/* like malloc() */
562		return unbound_stat_malloc_lite(size, file, line, func);
563	}
564	if(!size) {
565		/* like free() */
566		unbound_stat_free_lite(ptr, file, line, func);
567		return NULL;
568	}
569	/* change allocation size and copy */
570	real = ptr-lite_pad-sizeof(size_t);
571	if(memcmp(real, lite_pre, lite_pad) != 0) {
572		log_err("realloc(): prefix failed %s:%d %s", file, line, func);
573		log_hex("prefix here", real, lite_pad);
574		log_hex("  should be", lite_pre, lite_pad);
575		fatal_exit("alloc assertion failed");
576	}
577	memmove(&orig, real+lite_pad, sizeof(size_t));
578	if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
579		log_err("realloc(): suffix failed %s:%d %s", file, line, func);
580		log_err("alloc size is %d", (int)orig);
581		log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
582			lite_pad);
583		log_hex("  should be", lite_post, lite_pad);
584		fatal_exit("alloc assertion failed");
585	}
586	/* new alloc and copy over */
587	newa = unbound_stat_malloc_lite(size, file, line, func);
588	if(!newa)
589		return NULL;
590	if(orig < size)
591		memmove(newa, ptr, orig);
592	else	memmove(newa, ptr, size);
593	memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
594	free(real);
595	return newa;
596}
597
598char* unbound_strdup_lite(const char* s, const char* file, int line,
599        const char* func)
600{
601	/* this routine is made to make sure strdup() uses the malloc_lite */
602	size_t l = strlen(s)+1;
603	char* n = (char*)unbound_stat_malloc_lite(l, file, line, func);
604	if(!n) return NULL;
605	memmove(n, s, l);
606	return n;
607}
608
609char* unbound_lite_wrapstr(char* s)
610{
611	char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__);
612	free(s);
613	return n;
614}
615
616#undef sldns_pkt2wire
617sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p,
618	size_t *size)
619{
620	uint8_t* md = NULL;
621	size_t ms = 0;
622	sldns_status s = sldns_pkt2wire(&md, p, &ms);
623	if(md) {
624		*dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__,
625			__func__);
626		*size = ms;
627		if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; }
628		memcpy(*dest, md, ms);
629		free(md);
630	} else {
631		*dest = NULL;
632		*size = 0;
633	}
634	return s;
635}
636
637#undef i2d_DSA_SIG
638int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig)
639{
640	unsigned char* n = NULL;
641	int r= i2d_DSA_SIG(dsasig, &n);
642	if(n) {
643		*sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__,
644			__func__);
645		if(!*sig) return -1;
646		memcpy(*sig, n, (size_t)r);
647		free(n);
648		return r;
649	}
650	*sig = NULL;
651	return r;
652}
653
654#endif /* UNBOUND_ALLOC_LITE */
655