kern_malloc.c revision 8876
1/* 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34 * $Id: kern_malloc.c,v 1.11 1995/04/16 11:25:15 davidg Exp $ 35 */ 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/proc.h> 40#include <sys/kernel.h> 41#include <sys/malloc.h> 42 43#include <vm/vm.h> 44#include <vm/vm_kern.h> 45 46struct kmembuckets bucket[MINBUCKET + 16]; 47struct kmemstats kmemstats[M_LAST]; 48struct kmemusage *kmemusage; 49char *kmembase, *kmemlimit; 50char *memname[] = INITKMEMNAMES; 51 52#ifdef DIAGNOSTIC 53/* 54 * This structure provides a set of masks to catch unaligned frees. 55 */ 56long addrmask[] = { 0, 57 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 58 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 59 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 60 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 61}; 62 63/* 64 * The WEIRD_ADDR is used as known text to copy into free objects so 65 * that modifications after frees can be detected. 66 */ 67#define WEIRD_ADDR 0xdeadc0de 68#define MAX_COPY 64 69 70/* 71 * Normally the first word of the structure is used to hold the list 72 * pointer for free objects. However, when running with diagnostics, 73 * we use the third and fourth fields, so as to catch modifications 74 * in the most commonly trashed first two words. 75 */ 76struct freelist { 77 long spare0; 78 short type; 79 long spare1; 80 caddr_t next; 81}; 82#else /* !DIAGNOSTIC */ 83struct freelist { 84 caddr_t next; 85}; 86#endif /* DIAGNOSTIC */ 87 88/* 89 * Allocate a block of memory 90 */ 91void * 92malloc(size, type, flags) 93 unsigned long size; 94 int type, flags; 95{ 96 register struct kmembuckets *kbp; 97 register struct kmemusage *kup; 98 register struct freelist *freep; 99 long indx, npg, allocsize; 100 int s; 101 caddr_t va, cp, savedlist; 102#ifdef DIAGNOSTIC 103 long *end, *lp; 104 int copysize; 105 char *savedtype; 106#endif 107#ifdef KMEMSTATS 108 register struct kmemstats *ksp = &kmemstats[type]; 109 110 if (((unsigned long)type) > M_LAST) 111 panic("malloc - bogus type"); 112#endif 113 indx = BUCKETINDX(size); 114 kbp = &bucket[indx]; 115 s = splhigh(); 116#ifdef KMEMSTATS 117 while (ksp->ks_memuse >= ksp->ks_limit) { 118 if (flags & M_NOWAIT) { 119 splx(s); 120 return ((void *) NULL); 121 } 122 if (ksp->ks_limblocks < 65535) 123 ksp->ks_limblocks++; 124 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 125 } 126 ksp->ks_size |= 1 << indx; 127#endif 128#ifdef DIAGNOSTIC 129 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 130#endif 131 if (kbp->kb_next == NULL) { 132 kbp->kb_last = NULL; 133 if (size > MAXALLOCSAVE) 134 allocsize = roundup(size, CLBYTES); 135 else 136 allocsize = 1 << indx; 137 npg = clrnd(btoc(allocsize)); 138 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags); 139 if (va == NULL) { 140 splx(s); 141 return ((void *) NULL); 142 } 143#ifdef KMEMSTATS 144 kbp->kb_total += kbp->kb_elmpercl; 145#endif 146 kup = btokup(va); 147 kup->ku_indx = indx; 148 if (allocsize > MAXALLOCSAVE) { 149 if (npg > 65535) 150 panic("malloc: allocation too large"); 151 kup->ku_pagecnt = npg; 152#ifdef KMEMSTATS 153 ksp->ks_memuse += allocsize; 154#endif 155 goto out; 156 } 157#ifdef KMEMSTATS 158 kup->ku_freecnt = kbp->kb_elmpercl; 159 kbp->kb_totalfree += kbp->kb_elmpercl; 160#endif 161 /* 162 * Just in case we blocked while allocating memory, 163 * and someone else also allocated memory for this 164 * bucket, don't assume the list is still empty. 165 */ 166 savedlist = kbp->kb_next; 167 kbp->kb_next = cp = va + (npg * NBPG) - allocsize; 168 for (;;) { 169 freep = (struct freelist *)cp; 170#ifdef DIAGNOSTIC 171 /* 172 * Copy in known text to detect modification 173 * after freeing. 174 */ 175 end = (long *)&cp[copysize]; 176 for (lp = (long *)cp; lp < end; lp++) 177 *lp = WEIRD_ADDR; 178 freep->type = M_FREE; 179#endif /* DIAGNOSTIC */ 180 if (cp <= va) 181 break; 182 cp -= allocsize; 183 freep->next = cp; 184 } 185 freep->next = savedlist; 186 if (kbp->kb_last == NULL) 187 kbp->kb_last = (caddr_t)freep; 188 } 189 va = kbp->kb_next; 190 kbp->kb_next = ((struct freelist *)va)->next; 191#ifdef DIAGNOSTIC 192 freep = (struct freelist *)va; 193 savedtype = (unsigned)freep->type < M_LAST ? 194 memname[freep->type] : "???"; 195 if (kbp->kb_next && 196 !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) { 197 printf("%s of object %p size %ld %s %s (invalid addr %p)\n", 198 "Data modified on freelist: word 2.5", va, size, 199 "previous type", savedtype, kbp->kb_next); 200 kbp->kb_next = NULL; 201 } 202#if BYTE_ORDER == BIG_ENDIAN 203 freep->type = WEIRD_ADDR >> 16; 204#endif 205#if BYTE_ORDER == LITTLE_ENDIAN 206 freep->type = (short)WEIRD_ADDR; 207#endif 208 if (((long)(&freep->next)) & 0x2) 209 freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16)); 210 else 211 freep->next = (caddr_t)WEIRD_ADDR; 212 end = (long *)&va[copysize]; 213 for (lp = (long *)va; lp < end; lp++) { 214 if (*lp == WEIRD_ADDR) 215 continue; 216 printf("%s %d of object %p size %ld %s %s (0x%lx != 0x%x)\n", 217 "Data modified on freelist: word", lp - (long *)va, 218 va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 219 break; 220 } 221 freep->spare0 = 0; 222#endif /* DIAGNOSTIC */ 223#ifdef KMEMSTATS 224 kup = btokup(va); 225 if (kup->ku_indx != indx) 226 panic("malloc: wrong bucket"); 227 if (kup->ku_freecnt == 0) 228 panic("malloc: lost data"); 229 kup->ku_freecnt--; 230 kbp->kb_totalfree--; 231 ksp->ks_memuse += 1 << indx; 232out: 233 kbp->kb_calls++; 234 ksp->ks_inuse++; 235 ksp->ks_calls++; 236 if (ksp->ks_memuse > ksp->ks_maxused) 237 ksp->ks_maxused = ksp->ks_memuse; 238#else 239out: 240#endif 241 splx(s); 242 return ((void *) va); 243} 244 245/* 246 * Free a block of memory allocated by malloc. 247 */ 248void 249free(addr, type) 250 void *addr; 251 int type; 252{ 253 register struct kmembuckets *kbp; 254 register struct kmemusage *kup; 255 register struct freelist *freep; 256 long size; 257 int s; 258#ifdef DIAGNOSTIC 259 caddr_t cp; 260 long *end, *lp, alloc, copysize; 261#endif 262#ifdef KMEMSTATS 263 register struct kmemstats *ksp = &kmemstats[type]; 264#endif 265 266#ifdef DIAGNOSTIC 267 if ((char *)addr < kmembase || (char *)addr >= kmemlimit) { 268 panic("free: address 0x%x out of range", addr); 269 } 270 if ((u_long)type > M_LAST) { 271 panic("free: type %d out of range", type); 272 } 273#endif 274 kup = btokup(addr); 275 size = 1 << kup->ku_indx; 276 kbp = &bucket[kup->ku_indx]; 277 s = splhigh(); 278#ifdef DIAGNOSTIC 279 /* 280 * Check for returns of data that do not point to the 281 * beginning of the allocation. 282 */ 283 if (size > NBPG * CLSIZE) 284 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)]; 285 else 286 alloc = addrmask[kup->ku_indx]; 287 if (((u_long)addr & alloc) != 0) 288 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d", 289 addr, size, memname[type], alloc); 290#endif /* DIAGNOSTIC */ 291 if (size > MAXALLOCSAVE) { 292 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 293#ifdef KMEMSTATS 294 size = kup->ku_pagecnt << PGSHIFT; 295 ksp->ks_memuse -= size; 296 kup->ku_indx = 0; 297 kup->ku_pagecnt = 0; 298 if (ksp->ks_memuse + size >= ksp->ks_limit && 299 ksp->ks_memuse < ksp->ks_limit) 300 wakeup((caddr_t)ksp); 301 ksp->ks_inuse--; 302 kbp->kb_total -= 1; 303#endif 304 splx(s); 305 return; 306 } 307 freep = (struct freelist *)addr; 308#ifdef DIAGNOSTIC 309 /* 310 * Check for multiple frees. Use a quick check to see if 311 * it looks free before laboriously searching the freelist. 312 */ 313 if (freep->spare0 == WEIRD_ADDR) { 314 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) { 315 if (addr != cp) 316 continue; 317 printf("multiply freed item %p\n", addr); 318 panic("free: duplicated free"); 319 } 320 } 321 /* 322 * Copy in known text to detect modification after freeing 323 * and to make it look free. Also, save the type being freed 324 * so we can list likely culprit if modification is detected 325 * when the object is reallocated. 326 */ 327 copysize = size < MAX_COPY ? size : MAX_COPY; 328 end = (long *)&((caddr_t)addr)[copysize]; 329 for (lp = (long *)addr; lp < end; lp++) 330 *lp = WEIRD_ADDR; 331 freep->type = type; 332#endif /* DIAGNOSTIC */ 333#ifdef KMEMSTATS 334 kup->ku_freecnt++; 335 if (kup->ku_freecnt >= kbp->kb_elmpercl) 336 if (kup->ku_freecnt > kbp->kb_elmpercl) 337 panic("free: multiple frees"); 338 else if (kbp->kb_totalfree > kbp->kb_highwat) 339 kbp->kb_couldfree++; 340 kbp->kb_totalfree++; 341 ksp->ks_memuse -= size; 342 if (ksp->ks_memuse + size >= ksp->ks_limit && 343 ksp->ks_memuse < ksp->ks_limit) 344 wakeup((caddr_t)ksp); 345 ksp->ks_inuse--; 346#endif 347 if (kbp->kb_next == NULL) 348 kbp->kb_next = addr; 349 else 350 ((struct freelist *)kbp->kb_last)->next = addr; 351 freep->next = NULL; 352 kbp->kb_last = addr; 353 splx(s); 354} 355 356/* 357 * Initialize the kernel memory allocator 358 */ 359void 360kmeminit() 361{ 362 register long indx; 363 int npg; 364 365#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 366 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2 367#endif 368#if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 369 ERROR!_kmeminit:_MAXALLOCSAVE_too_big 370#endif 371#if (MAXALLOCSAVE < CLBYTES) 372 ERROR!_kmeminit:_MAXALLOCSAVE_too_small 373#endif 374 npg = VM_KMEM_SIZE/ NBPG; 375 if( npg > cnt.v_page_count) 376 npg = cnt.v_page_count; 377 378 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 379 (vm_size_t)(npg * sizeof(struct kmemusage))); 380 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 381 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE); 382#ifdef KMEMSTATS 383 for (indx = 0; indx < MINBUCKET + 16; indx++) { 384 if (1 << indx >= CLBYTES) 385 bucket[indx].kb_elmpercl = 1; 386 else 387 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); 388 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 389 } 390 for (indx = 0; indx < M_LAST; indx++) 391 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; 392#endif 393} 394