Deleted Added
full compact
34c34
< * $FreeBSD: head/sys/kern/kern_malloc.c 92194 2002-03-13 01:42:33Z archie $
---
> * $FreeBSD: head/sys/kern/kern_malloc.c 92654 2002-03-19 09:11:49Z jeff $
54a55,56
> #include <vm/uma.h>
> #include <vm/uma_int.h>
83,84d84
< static struct kmembuckets bucket[MINBUCKET + 16];
< static struct kmemusage *kmemusage;
88c88,90
< static struct mtx malloc_mtx;
---
> #define KMEM_ZSHIFT 4
> #define KMEM_ZBASE 16
> #define KMEM_ZMASK (KMEM_ZBASE - 1)
90c92,94
< u_int vm_kmem_size;
---
> #define KMEM_ZMAX 65536
> #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
> static uma_zone_t kmemzones[KMEM_ZSIZE + 1];
92,100c96,115
< #ifdef INVARIANTS
< /*
< * This structure provides a set of masks to catch unaligned frees.
< */
< static long addrmask[] = { 0,
< 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
< 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
< 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
< 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
---
>
> /* These won't be powers of two for long */
> struct {
> int size;
> char *name;
> } kmemsizes[] = {
> {16, "16"},
> {32, "32"},
> {64, "64"},
> {128, "128"},
> {256, "256"},
> {512, "512"},
> {1024, "1024"},
> {2048, "2048"},
> {4096, "4096"},
> {8192, "8192"},
> {16384, "16384"},
> {32768, "32768"},
> {65536, "65536"},
> {0, NULL},
103,108c118
< /*
< * The WEIRD_ADDR is used as known text to copy into free objects so
< * that modifications after frees can be detected.
< */
< #define WEIRD_ADDR 0xdeadc0de
< #define MAX_COPY 64
---
> static struct mtx malloc_mtx;
110,126c120
< /*
< * Normally the first word of the structure is used to hold the list
< * pointer for free objects. However, when running with diagnostics,
< * we use the third and fourth fields, so as to catch modifications
< * in the most commonly trashed first two words.
< */
< struct freelist {
< long spare0;
< struct malloc_type *type;
< long spare1;
< caddr_t next;
< };
< #else /* !INVARIANTS */
< struct freelist {
< caddr_t next;
< };
< #endif /* INVARIANTS */
---
> u_int vm_kmem_size;
142,145d135
< register struct kmembuckets *kbp;
< register struct kmemusage *kup;
< register struct freelist *freep;
< long indx, npg, allocsize;
147,152c137,139
< caddr_t va, cp, savedlist;
< #ifdef INVARIANTS
< long *end, *lp;
< int copysize;
< const char *savedtype;
< #endif
---
> long indx;
> caddr_t va;
> uma_zone_t zone;
160,161d146
< indx = BUCKETINDX(size);
< kbp = &bucket[indx];
163c148
< mtx_lock(&malloc_mtx);
---
> /* mtx_lock(&malloc_mtx); XXX */
167c152
< mtx_unlock(&malloc_mtx);
---
> /* mtx_unlock(&malloc_mtx); XXX */
172c157
< msleep((caddr_t)ksp, &malloc_mtx, PSWP+2, type->ks_shortdesc,
---
> msleep((caddr_t)ksp, /* &malloc_mtx */ NULL, PSWP+2, type->ks_shortdesc,
175,185c160
< ksp->ks_size |= 1 << indx;
< #ifdef INVARIANTS
< copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
< #endif
< if (kbp->kb_next == NULL) {
< kbp->kb_last = NULL;
< if (size > MAXALLOCSAVE)
< allocsize = roundup(size, PAGE_SIZE);
< else
< allocsize = 1 << indx;
< npg = btoc(allocsize);
---
> /* mtx_unlock(&malloc_mtx); XXX */
187,189c162,168
< mtx_unlock(&malloc_mtx);
< va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
<
---
> if (size <= KMEM_ZMAX) {
> indx = size;
> if (indx & KMEM_ZMASK)
> indx = (indx & ~KMEM_ZMASK) + KMEM_ZBASE;
> zone = kmemzones[indx >> KMEM_ZSHIFT];
> indx = zone->uz_size;
> va = uma_zalloc(zone, flags);
191,192c170,171
< splx(s);
< return ((void *) NULL);
---
> /* mtx_lock(&malloc_mtx); XXX */
> goto out;
194,207c173,180
< /*
< * Enter malloc_mtx after the error check to avoid having to
< * immediately exit it again if there is an error.
< */
< mtx_lock(&malloc_mtx);
<
< kbp->kb_total += kbp->kb_elmpercl;
< kup = btokup(va);
< kup->ku_indx = indx;
< if (allocsize > MAXALLOCSAVE) {
< if (npg > 65535)
< panic("malloc: allocation too large");
< kup->ku_pagecnt = npg;
< ksp->ks_memuse += allocsize;
---
> ksp->ks_size |= indx;
> } else {
> /* XXX This is not the next power of two so this will break ks_size */
> indx = roundup(size, PAGE_SIZE);
> zone = NULL;
> va = uma_large_malloc(size, flags);
> if (va == NULL) {
> /* mtx_lock(&malloc_mtx); XXX */
210,238d182
< kup->ku_freecnt = kbp->kb_elmpercl;
< kbp->kb_totalfree += kbp->kb_elmpercl;
< /*
< * Just in case we blocked while allocating memory,
< * and someone else also allocated memory for this
< * bucket, don't assume the list is still empty.
< */
< savedlist = kbp->kb_next;
< kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
< for (;;) {
< freep = (struct freelist *)cp;
< #ifdef INVARIANTS
< /*
< * Copy in known text to detect modification
< * after freeing.
< */
< end = (long *)&cp[copysize];
< for (lp = (long *)cp; lp < end; lp++)
< *lp = WEIRD_ADDR;
< freep->type = M_FREE;
< #endif /* INVARIANTS */
< if (cp <= va)
< break;
< cp -= allocsize;
< freep->next = cp;
< }
< freep->next = savedlist;
< if (kbp->kb_last == NULL)
< kbp->kb_last = (caddr_t)freep;
240,271c184,185
< va = kbp->kb_next;
< kbp->kb_next = ((struct freelist *)va)->next;
< #ifdef INVARIANTS
< freep = (struct freelist *)va;
< savedtype = (const char *) freep->type->ks_shortdesc;
< freep->type = (struct malloc_type *)WEIRD_ADDR;
< if ((intptr_t)(void *)&freep->next & 0x2)
< freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
< else
< freep->next = (caddr_t)WEIRD_ADDR;
< end = (long *)&va[copysize];
< for (lp = (long *)va; lp < end; lp++) {
< if (*lp == WEIRD_ADDR)
< continue;
< printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
< "Data modified on freelist: word",
< (long)(lp - (long *)va), (void *)va, size,
< "previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
< break;
< }
< freep->spare0 = 0;
< #endif /* INVARIANTS */
< kup = btokup(va);
< if (kup->ku_indx != indx)
< panic("malloc: wrong bucket");
< if (kup->ku_freecnt == 0)
< panic("malloc: lost data");
< kup->ku_freecnt--;
< kbp->kb_totalfree--;
< ksp->ks_memuse += 1 << indx;
< out:
< kbp->kb_calls++;
---
> /* mtx_lock(&malloc_mtx); XXX */
> ksp->ks_memuse += indx;
272a187
> out:
277c192
< mtx_unlock(&malloc_mtx);
---
> /* mtx_unlock(&malloc_mtx); XXX */
296,299c211,213
< register struct kmembuckets *kbp;
< register struct kmemusage *kup;
< register struct freelist *freep;
< long size;
---
> uma_slab_t slab;
> void *mem;
> u_long size;
301,304d214
< #ifdef INVARIANTS
< struct freelist *fp;
< long *end, *lp, alloc, copysize;
< #endif
311,315c221
< KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
< ("free: address %p out of range", (void *)addr));
< kup = btokup(addr);
< size = 1 << kup->ku_indx;
< kbp = &bucket[kup->ku_indx];
---
> size = 0;
317,334d222
< mtx_lock(&malloc_mtx);
< #ifdef INVARIANTS
< /*
< * Check for returns of data that do not point to the
< * beginning of the allocation.
< */
< if (size > PAGE_SIZE)
< alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
< else
< alloc = addrmask[kup->ku_indx];
< if (((uintptr_t)(void *)addr & alloc) != 0)
< panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
< (void *)addr, size, type->ks_shortdesc, alloc);
< #endif /* INVARIANTS */
< if (size > MAXALLOCSAVE) {
< mtx_unlock(&malloc_mtx);
< kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
< mtx_lock(&malloc_mtx);
336,347c224,235
< size = kup->ku_pagecnt << PAGE_SHIFT;
< ksp->ks_memuse -= size;
< kup->ku_indx = 0;
< kup->ku_pagecnt = 0;
< if (ksp->ks_memuse + size >= ksp->ks_limit &&
< ksp->ks_memuse < ksp->ks_limit)
< wakeup((caddr_t)ksp);
< ksp->ks_inuse--;
< kbp->kb_total -= 1;
< splx(s);
< mtx_unlock(&malloc_mtx);
< return;
---
> mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
> slab = hash_sfind(mallochash, mem);
>
> if (slab == NULL)
> panic("free: address %p(%p) has not been allocated.\n", addr, mem);
>
> if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
> size = slab->us_zone->uz_size;
> uma_zfree_arg(slab->us_zone, addr, slab);
> } else {
> size = slab->us_size;
> uma_large_free(slab);
349,384c237,238
< freep = (struct freelist *)addr;
< #ifdef INVARIANTS
< /*
< * Check for multiple frees. Use a quick check to see if
< * it looks free before laboriously searching the freelist.
< */
< if (freep->spare0 == WEIRD_ADDR) {
< fp = (struct freelist *)kbp->kb_next;
< while (fp) {
< if (fp->spare0 != WEIRD_ADDR)
< panic("free: free item %p modified", fp);
< else if (addr == (caddr_t)fp)
< panic("free: multiple freed item %p", addr);
< fp = (struct freelist *)fp->next;
< }
< }
< /*
< * Copy in known text to detect modification after freeing
< * and to make it look free. Also, save the type being freed
< * so we can list likely culprit if modification is detected
< * when the object is reallocated.
< */
< copysize = size < MAX_COPY ? size : MAX_COPY;
< end = (long *)&((caddr_t)addr)[copysize];
< for (lp = (long *)addr; lp < end; lp++)
< *lp = WEIRD_ADDR;
< freep->type = type;
< #endif /* INVARIANTS */
< kup->ku_freecnt++;
< if (kup->ku_freecnt >= kbp->kb_elmpercl) {
< if (kup->ku_freecnt > kbp->kb_elmpercl)
< panic("free: multiple frees");
< else if (kbp->kb_totalfree > kbp->kb_highwat)
< kbp->kb_couldfree++;
< }
< kbp->kb_totalfree++;
---
> /* mtx_lock(&malloc_mtx); XXX */
>
390,411d243
< #ifdef OLD_MALLOC_MEMORY_POLICY
< if (kbp->kb_next == NULL)
< kbp->kb_next = addr;
< else
< ((struct freelist *)kbp->kb_last)->next = addr;
< freep->next = NULL;
< kbp->kb_last = addr;
< #else
< /*
< * Return memory to the head of the queue for quick reuse. This
< * can improve performance by improving the probability of the
< * item being in the cache when it is reused.
< */
< if (kbp->kb_next == NULL) {
< kbp->kb_next = addr;
< kbp->kb_last = addr;
< freep->next = NULL;
< } else {
< freep->next = kbp->kb_next;
< kbp->kb_next = addr;
< }
< #endif
413c245
< mtx_unlock(&malloc_mtx);
---
> /* mtx_unlock(&malloc_mtx); XXX */
426c258
< struct kmemusage *kup;
---
> uma_slab_t slab;
433a266,268
> slab = hash_sfind(mallochash,
> (void *)((u_long)addr & ~(UMA_SLAB_MASK)));
>
435c270
< KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
---
> KASSERT(slab != NULL,
439,442c274,277
< kup = btokup(addr);
< alloc = 1 << kup->ku_indx;
< if (alloc > MAXALLOCSAVE)
< alloc = kup->ku_pagecnt << PAGE_SHIFT;
---
> if (slab->us_zone)
> alloc = slab->us_zone->uz_size;
> else
> alloc = slab->us_size;
486a322,326
> void *hashmem;
> u_long hashsize;
> int highbit;
> int bits;
> int i;
488,497d327
< #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
< #error "kmeminit: MAXALLOCSAVE not power of 2"
< #endif
< #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
< #error "kmeminit: MAXALLOCSAVE too big"
< #endif
< #if (MAXALLOCSAVE < PAGE_SIZE)
< #error "kmeminit: MAXALLOCSAVE too small"
< #endif
<
547,548d376
< kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
< (vm_size_t)(npg * sizeof(struct kmemusage)));
552,557c380,406
< for (indx = 0; indx < MINBUCKET + 16; indx++) {
< if (1 << indx >= PAGE_SIZE)
< bucket[indx].kb_elmpercl = 1;
< else
< bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
< bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
---
>
> hashsize = npg * sizeof(void *);
>
> highbit = 0;
> bits = 0;
> /* The hash size must be a power of two */
> for (i = 0; i < 8 * sizeof(hashsize); i++)
> if (hashsize & (1 << i)) {
> highbit = i;
> bits++;
> }
> if (bits > 1)
> hashsize = 1 << (highbit);
>
> hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize);
> uma_startup2(hashmem, hashsize / sizeof(void *));
>
> for (i = 0, indx = 0; kmemsizes[indx].size != 0; indx++) {
> uma_zone_t zone;
> int size = kmemsizes[indx].size;
> char *name = kmemsizes[indx].name;
>
> zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL,
> UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
> for (;i <= size; i+= KMEM_ZBASE)
> kmemzones[i >> KMEM_ZSHIFT] = zone;
>
591,596d439
< #ifdef INVARIANTS
< struct kmembuckets *kbp;
< struct freelist *freep;
< long indx;
< int s;
< #endif
607,626d449
< #ifdef INVARIANTS
< s = splmem();
< mtx_lock(&malloc_mtx);
< for (indx = 0; indx < MINBUCKET + 16; indx++) {
< kbp = bucket + indx;
< freep = (struct freelist*)kbp->kb_next;
< while (freep) {
< if (freep->type == type)
< freep->type = M_FREE;
< freep = (struct freelist*)freep->next;
< }
< }
< splx(s);
< mtx_unlock(&malloc_mtx);
<
< if (type->ks_memuse != 0)
< printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
< type->ks_memuse, type->ks_shortdesc);
< #endif
<