Deleted Added
full compact
uma_core.c (94159) uma_core.c (94161)
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/vm/uma_core.c 94159 2002-04-08 02:42:55Z jeff $
26 * $FreeBSD: head/sys/vm/uma_core.c 94161 2002-04-08 04:48:58Z jeff $
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as

--- 117 unchanged lines hidden (view full) ---

152static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
153static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
154static void page_free(void *, int, u_int8_t);
155static uma_slab_t slab_zalloc(uma_zone_t, int);
156static void cache_drain(uma_zone_t);
157static void bucket_drain(uma_zone_t, uma_bucket_t);
158static void zone_drain(uma_zone_t);
159static void zone_ctor(void *, int, void *);
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as

--- 117 unchanged lines hidden (view full) ---

152static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
153static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
154static void page_free(void *, int, u_int8_t);
155static uma_slab_t slab_zalloc(uma_zone_t, int);
156static void cache_drain(uma_zone_t);
157static void bucket_drain(uma_zone_t, uma_bucket_t);
158static void zone_drain(uma_zone_t);
159static void zone_ctor(void *, int, void *);
160static void zone_dtor(void *, int, void *);
160static void zero_init(void *, int);
161static void zone_small_init(uma_zone_t zone);
162static void zone_large_init(uma_zone_t zone);
163static void zone_foreach(void (*zfunc)(uma_zone_t));
164static void zone_timeout(uma_zone_t zone);
165static void hash_expand(struct uma_hash *);
161static void zero_init(void *, int);
162static void zone_small_init(uma_zone_t zone);
163static void zone_large_init(uma_zone_t zone);
164static void zone_foreach(void (*zfunc)(uma_zone_t));
165static void zone_timeout(uma_zone_t zone);
166static void hash_expand(struct uma_hash *);
167static void hash_free(struct uma_hash *hash);
166static void uma_timeout(void *);
167static void uma_startup3(void);
168static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
169static void uma_zfree_internal(uma_zone_t,
170 void *, void *, int);
171void uma_print_zone(uma_zone_t);
172void uma_print_stats(void);
173static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);

--- 123 unchanged lines hidden (view full) ---

297 * Discussion:
298 */
299static void
300hash_expand(struct uma_hash *hash)
301{
302 struct slabhead *newhash;
303 struct slabhead *oldhash;
304 uma_slab_t slab;
168static void uma_timeout(void *);
169static void uma_startup3(void);
170static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
171static void uma_zfree_internal(uma_zone_t,
172 void *, void *, int);
173void uma_print_zone(uma_zone_t);
174void uma_print_stats(void);
175static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);

--- 123 unchanged lines hidden (view full) ---

299 * Discussion:
300 */
301static void
302hash_expand(struct uma_hash *hash)
303{
304 struct slabhead *newhash;
305 struct slabhead *oldhash;
306 uma_slab_t slab;
305 int hzonefree;
306 int hashsize;
307 int oldsize;
308 int newsize;
307 int alloc;
308 int hval;
309 int i;
310
311
312 /*
313 * Remember the old hash size and see if it has to go back to the
314 * hash zone, or malloc. The hash zone is used for the initial hash
315 */
316
309 int alloc;
310 int hval;
311 int i;
312
313
314 /*
315 * Remember the old hash size and see if it has to go back to the
316 * hash zone, or malloc. The hash zone is used for the initial hash
317 */
318
317 hashsize = hash->uh_hashsize;
319 oldsize = hash->uh_hashsize;
318 oldhash = hash->uh_slab_hash;
319
320 oldhash = hash->uh_slab_hash;
321
320 if (hashsize == UMA_HASH_SIZE_INIT)
321 hzonefree = 1;
322 else
323 hzonefree = 0;
324
325
326 /* We're just going to go to a power of two greater */
327 if (hash->uh_hashsize) {
322 /* We're just going to go to a power of two greater */
323 if (hash->uh_hashsize) {
328 alloc = sizeof(hash->uh_slab_hash[0]) * (hash->uh_hashsize * 2);
324 newsize = oldsize * 2;
325 alloc = sizeof(hash->uh_slab_hash[0]) * newsize;
329 /* XXX Shouldn't be abusing DEVBUF here */
330 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT);
331 if (newhash == NULL) {
332 return;
333 }
326 /* XXX Shouldn't be abusing DEVBUF here */
327 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT);
328 if (newhash == NULL) {
329 return;
330 }
334 hash->uh_hashsize *= 2;
335 } else {
336 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
337 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL);
331 } else {
332 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
333 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL);
338 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
334 newsize = UMA_HASH_SIZE_INIT;
339 }
340
341 bzero(newhash, alloc);
342
335 }
336
337 bzero(newhash, alloc);
338
343 hash->uh_hashmask = hash->uh_hashsize - 1;
339 hash->uh_hashmask = newsize - 1;
344
345 /*
346 * I need to investigate hash algorithms for resizing without a
347 * full rehash.
348 */
349
340
341 /*
342 * I need to investigate hash algorithms for resizing without a
343 * full rehash.
344 */
345
350 for (i = 0; i < hashsize; i++)
346 for (i = 0; i < oldsize; i++)
351 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) {
352 slab = SLIST_FIRST(&hash->uh_slab_hash[i]);
353 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink);
354 hval = UMA_HASH(hash, slab->us_data);
355 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink);
356 }
357
347 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) {
348 slab = SLIST_FIRST(&hash->uh_slab_hash[i]);
349 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink);
350 hval = UMA_HASH(hash, slab->us_data);
351 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink);
352 }
353
358 if (hash->uh_slab_hash) {
359 if (hzonefree)
360 uma_zfree_internal(hashzone,
361 hash->uh_slab_hash, NULL, 0);
362 else
363 free(hash->uh_slab_hash, M_DEVBUF);
364 }
354 if (oldhash)
355 hash_free(hash);
356
365 hash->uh_slab_hash = newhash;
357 hash->uh_slab_hash = newhash;
358 hash->uh_hashsize = newsize;
366
367 return;
368}
369
359
360 return;
361}
362
363static void
364hash_free(struct uma_hash *hash)
365{
366 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
367 uma_zfree_internal(hashzone,
368 hash->uh_slab_hash, NULL, 0);
369 else
370 free(hash->uh_slab_hash, M_DEVBUF);
371
372 hash->uh_slab_hash = NULL;
373}
374
370/*
371 * Frees all outstanding items in a bucket
372 *
373 * Arguments:
374 * zone The zone to free to, must be unlocked.
375 * bucket The free/alloc bucket with items, cpu queue must be locked.
376 *
377 * Returns:

--- 110 unchanged lines hidden (view full) ---

488}
489
490/*
491 * Frees pages from a zone back to the system. This is done on demand from
492 * the pageout daemon.
493 *
494 * Arguments:
495 * zone The zone to free pages from
375/*
376 * Frees all outstanding items in a bucket
377 *
378 * Arguments:
379 * zone The zone to free to, must be unlocked.
380 * bucket The free/alloc bucket with items, cpu queue must be locked.
381 *
382 * Returns:

--- 110 unchanged lines hidden (view full) ---

493}
494
495/*
496 * Frees pages from a zone back to the system. This is done on demand from
497 * the pageout daemon.
498 *
499 * Arguments:
500 * zone The zone to free pages from
501 * all Should we drain all items?
496 *
497 * Returns:
498 * Nothing.
499 */
500static void
501zone_drain(uma_zone_t zone)
502{
503 uma_slab_t slab;

--- 16 unchanged lines hidden (view full) ---

520 cache_drain(zone);
521
522 if (zone->uz_free < zone->uz_wssize)
523 goto finished;
524#ifdef UMA_DEBUG
525 printf("%s working set size: %llu free items: %u\n",
526 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
527#endif
502 *
503 * Returns:
504 * Nothing.
505 */
506static void
507zone_drain(uma_zone_t zone)
508{
509 uma_slab_t slab;

--- 16 unchanged lines hidden (view full) ---

526 cache_drain(zone);
527
528 if (zone->uz_free < zone->uz_wssize)
529 goto finished;
530#ifdef UMA_DEBUG
531 printf("%s working set size: %llu free items: %u\n",
532 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
533#endif
528 extra = zone->uz_wssize - zone->uz_free;
534 extra = zone->uz_free - zone->uz_wssize;
529 extra /= zone->uz_ipers;
530
531 /* extra is now the number of extra slabs that we can free */
532
533 if (extra == 0)
534 goto finished;
535
536 slab = LIST_FIRST(&zone->uz_free_slab);

--- 471 unchanged lines hidden (view full) ---

1008 zone->uz_count = zone->uz_ipers - 1;
1009 else
1010 zone->uz_count = UMA_BUCKET_SIZE - 1;
1011
1012 for (cpu = 0; cpu < maxcpu; cpu++)
1013 CPU_LOCK_INIT(zone, cpu);
1014}
1015
535 extra /= zone->uz_ipers;
536
537 /* extra is now the number of extra slabs that we can free */
538
539 if (extra == 0)
540 goto finished;
541
542 slab = LIST_FIRST(&zone->uz_free_slab);

--- 471 unchanged lines hidden (view full) ---

1014 zone->uz_count = zone->uz_ipers - 1;
1015 else
1016 zone->uz_count = UMA_BUCKET_SIZE - 1;
1017
1018 for (cpu = 0; cpu < maxcpu; cpu++)
1019 CPU_LOCK_INIT(zone, cpu);
1020}
1021
1022/*
1023 * Zone header dtor. This frees all data, destroys locks, frees the hash table
1024 * and removes the zone from the global list.
1025 *
1026 * Arguments/Returns follow uma_dtor specifications
1027 * udata unused
1028 */
1029
1030static void
1031zone_dtor(void *arg, int size, void *udata)
1032{
1033 uma_zone_t zone;
1034 int cpu;
1035
1036 zone = (uma_zone_t)arg;
1037
1038 mtx_lock(&uma_mtx);
1039 LIST_REMOVE(zone, uz_link);
1040 mtx_unlock(&uma_mtx);
1041
1042 ZONE_LOCK(zone);
1043 zone->uz_wssize = 0;
1044 ZONE_UNLOCK(zone);
1045
1046 zone_drain(zone);
1047 ZONE_LOCK(zone);
1048 if (zone->uz_free != 0)
1049 printf("Zone %s was not empty. Lost %d pages of memory.\n",
1050 zone->uz_name, zone->uz_pages);
1051
1052 if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) != 0)
1053 for (cpu = 0; cpu < maxcpu; cpu++)
1054 CPU_LOCK_FINI(zone, cpu);
1055
1056 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0)
1057 hash_free(&zone->uz_hash);
1058
1059 ZONE_UNLOCK(zone);
1060 ZONE_LOCK_FINI(zone);
1061}
1016/*
1017 * Traverses every zone in the system and calls a callback
1018 *
1019 * Arguments:
1020 * zfunc A pointer to a function which accepts a zone
1021 * as an argument.
1022 *
1023 * Returns:

--- 34 unchanged lines hidden (view full) ---

1058 Debugger("stop");
1059#endif
1060 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1061 /* "manually" Create the initial zone */
1062 args.name = "UMA Zones";
1063 args.size = sizeof(struct uma_zone) +
1064 (sizeof(struct uma_cache) * (maxcpu - 1));
1065 args.ctor = zone_ctor;
1062/*
1063 * Traverses every zone in the system and calls a callback
1064 *
1065 * Arguments:
1066 * zfunc A pointer to a function which accepts a zone
1067 * as an argument.
1068 *
1069 * Returns:

--- 34 unchanged lines hidden (view full) ---

1104 Debugger("stop");
1105#endif
1106 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1107 /* "manually" Create the initial zone */
1108 args.name = "UMA Zones";
1109 args.size = sizeof(struct uma_zone) +
1110 (sizeof(struct uma_cache) * (maxcpu - 1));
1111 args.ctor = zone_ctor;
1066 args.dtor = NULL;
1112 args.dtor = zone_dtor;
1067 args.uminit = zero_init;
1068 args.fini = NULL;
1069 args.align = 32 - 1;
1070 args.flags = UMA_ZONE_INTERNAL;
1071 /* The initial zone has no Per cpu queues so it's smaller */
1072 zone_ctor(zones, sizeof(struct uma_zone), &args);
1073
1074#ifdef UMA_DEBUG

--- 92 unchanged lines hidden (view full) ---

1167 args.fini = fini;
1168 args.align = align;
1169 args.flags = flags;
1170
1171 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
1172}
1173
1174/* See uma.h */
1113 args.uminit = zero_init;
1114 args.fini = NULL;
1115 args.align = 32 - 1;
1116 args.flags = UMA_ZONE_INTERNAL;
1117 /* The initial zone has no Per cpu queues so it's smaller */
1118 zone_ctor(zones, sizeof(struct uma_zone), &args);
1119
1120#ifdef UMA_DEBUG

--- 92 unchanged lines hidden (view full) ---

1213 args.fini = fini;
1214 args.align = align;
1215 args.flags = flags;
1216
1217 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
1218}
1219
1220/* See uma.h */
1221void
1222uma_zdestroy(uma_zone_t zone)
1223{
1224 uma_zfree_internal(zones, zone, NULL, 0);
1225}
1226
1227/* See uma.h */
1175void *
1176uma_zalloc_arg(uma_zone_t zone, void *udata, int wait)
1177{
1178 void *item;
1179 uma_cache_t cache;
1180 uma_bucket_t bucket;
1181 int cpu;
1182

--- 706 unchanged lines hidden ---
1228void *
1229uma_zalloc_arg(uma_zone_t zone, void *udata, int wait)
1230{
1231 void *item;
1232 uma_cache_t cache;
1233 uma_bucket_t bucket;
1234 int cpu;
1235

--- 706 unchanged lines hidden ---