Deleted Added
full compact
26c26
< * $FreeBSD: head/sys/vm/uma_core.c 102241 2002-08-21 23:39:52Z archie $
---
> * $FreeBSD: head/sys/vm/uma_core.c 103531 2002-09-18 08:26:30Z jeff $
148,155d147
< /*
< * This is the malloc hash table which is used to find the zone that a
< * malloc allocation came from. It is not currently resizeable. The
< * memory for the actual hash bucket is allocated in kmeminit.
< */
< struct uma_hash mhash;
< struct uma_hash *mallochash = &mhash;
<
286,292c278,282
< if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
< !(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
< if (zone->uz_pages / zone->uz_ppera
< >= zone->uz_hash.uh_hashsize) {
< struct uma_hash newhash;
< struct uma_hash oldhash;
< int ret;
---
> if (zone->uz_flags & UMA_ZFLAG_HASH &&
> zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
> struct uma_hash newhash;
> struct uma_hash oldhash;
> int ret;
294,300c284,300
< /*
< * This is so involved because allocating and freeing
< * while the zone lock is held will lead to deadlock.
< * I have to do everything in stages and check for
< * races.
< */
< newhash = zone->uz_hash;
---
> /*
> * This is so involved because allocating and freeing
> * while the zone lock is held will lead to deadlock.
> * I have to do everything in stages and check for
> * races.
> */
> newhash = zone->uz_hash;
> ZONE_UNLOCK(zone);
> ret = hash_alloc(&newhash);
> ZONE_LOCK(zone);
> if (ret) {
> if (hash_expand(&zone->uz_hash, &newhash)) {
> oldhash = zone->uz_hash;
> zone->uz_hash = newhash;
> } else
> oldhash = newhash;
>
302c302
< ret = hash_alloc(&newhash);
---
> hash_free(&oldhash);
304,314d303
< if (ret) {
< if (hash_expand(&zone->uz_hash, &newhash)) {
< oldhash = zone->uz_hash;
< zone->uz_hash = newhash;
< } else
< oldhash = newhash;
<
< ZONE_UNLOCK(zone);
< hash_free(&oldhash);
< ZONE_LOCK(zone);
< }
482,488c471,472
< if (mzone) {
< mtx_lock(&malloc_mtx);
< slab = hash_sfind(mallochash,
< (u_int8_t *)((unsigned long)item &
< (~UMA_SLAB_MASK)));
< mtx_unlock(&malloc_mtx);
< }
---
> if (mzone)
> slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
625,631c609
< if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
< mtx_lock(&malloc_mtx);
< UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
< mtx_unlock(&malloc_mtx);
< }
< if (zone->uz_flags & UMA_ZFLAG_OFFPAGE &&
< !(zone->uz_flags & UMA_ZFLAG_MALLOC))
---
> if (zone->uz_flags & UMA_ZFLAG_HASH)
651c629,630
< if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
---
>
> if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
653c632,635
< }
---
> if (zone->uz_flags & UMA_ZFLAG_MALLOC)
> for (i = 0; i < zone->uz_ppera; i++)
> vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
> kmem_object);
735c717
< if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
---
> if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE))
737d718
< }
739,747c720,722
< if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
< #ifdef UMA_DEBUG
< printf("Inserting %p into malloc hash from slab %p\n",
< mem, slab);
< #endif
< mtx_lock(&malloc_mtx);
< UMA_HASH_INSERT(mallochash, slab, mem);
< mtx_unlock(&malloc_mtx);
< }
---
> if (zone->uz_flags & UMA_ZFLAG_MALLOC)
> for (i = 0; i < zone->uz_ppera; i++)
> vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
781,782c756
< if ((zone->uz_flags & (UMA_ZFLAG_OFFPAGE|UMA_ZFLAG_MALLOC)) ==
< UMA_ZFLAG_OFFPAGE)
---
> if (zone->uz_flags & UMA_ZFLAG_HASH)
938a913,914
> if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
> zone->uz_flags |= UMA_ZFLAG_HASH;
970a947,949
> if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
> zone->uz_flags |= UMA_ZFLAG_HASH;
>
1076,1078d1054
< } else {
< hash_alloc(&zone->uz_hash);
< zone->uz_pgoff = 0;
1080a1057,1059
> if (zone->uz_flags & UMA_ZFLAG_HASH)
> hash_alloc(&zone->uz_hash);
>
1256c1235
< uma_startup2(void *hashmem, u_long elems)
---
> uma_startup2(void)
1258,1261d1236
< bzero(hashmem, elems * sizeof(void *));
< mallochash->uh_slab_hash = hashmem;
< mallochash->uh_hashsize = elems;
< mallochash->uh_hashmask = elems - 1;
1806c1781
< if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
---
> if (zone->uz_flags & UMA_ZFLAG_HASH)
2003a1979
> vsetslab((vm_offset_t)mem, slab);
2007,2009d1982
< mtx_lock(&malloc_mtx);
< UMA_HASH_INSERT(mallochash, slab, mem);
< mtx_unlock(&malloc_mtx);
2021,2023c1994
< mtx_lock(&malloc_mtx);
< UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
< mtx_unlock(&malloc_mtx);
---
> vsetobj((vm_offset_t)slab->us_data, kmem_object);