1/* 2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/vm/uma_core.c 95766 2002-04-30 04:26:34Z jeff $ |
27 * 28 */ 29 30/* 31 * uma_core.c Implementation of the Universal Memory allocator 32 * 33 * This allocator is intended to replace the multitude of similar object caches 34 * in the standard FreeBSD kernel. The intent is to be flexible as well as --- 1250 unchanged lines hidden (view full) --- 1285void 1286uma_zdestroy(uma_zone_t zone) 1287{ 1288 uma_zfree_internal(zones, zone, NULL, 0); 1289} 1290 1291/* See uma.h */ 1292void * |
1293uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) |
1294{ 1295 void *item; 1296 uma_cache_t cache; 1297 uma_bucket_t bucket; 1298 int cpu; 1299 1300 /* This is the fast path allocation */ 1301#ifdef UMA_DEBUG_ALLOC_1 --- 16 unchanged lines hidden (view full) --- 1318#endif 1319 bucket->ub_ptr--; 1320 KASSERT(item != NULL, 1321 ("uma_zalloc: Bucket pointer mangled.")); 1322 cache->uc_allocs++; 1323 CPU_UNLOCK(zone, cpu); 1324 if (zone->uz_ctor) 1325 zone->uz_ctor(item, zone->uz_size, udata); |
1326 if (flags & M_ZERO) 1327 bzero(item, zone->uz_size); |
1328 return (item); 1329 } else if (cache->uc_freebucket) { 1330 /* 1331 * We have run out of items in our allocbucket. 1332 * See if we can switch with our free bucket. 1333 */ 1334 if (cache->uc_freebucket->ub_ptr > -1) { 1335 uma_bucket_t swap; --- 50 unchanged lines hidden (view full) --- 1386 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) 1387 LIST_REMOVE(bucket, ub_link); 1388 1389 /* Now we no longer need the zone lock. */ 1390 ZONE_UNLOCK(zone); 1391 1392 if (bucket == NULL) 1393 bucket = uma_zalloc_internal(bucketzone, |
1394 NULL, flags, NULL); |
1395 1396 if (bucket != NULL) { 1397#ifdef INVARIANTS 1398 bzero(bucket, bucketzone->uz_size); 1399#endif 1400 bucket->ub_ptr = -1; 1401 |
1402 if (uma_zalloc_internal(zone, udata, flags, bucket)) |
1403 goto zalloc_restart; 1404 else 1405 uma_zfree_internal(bucketzone, bucket, NULL, 0); 1406 } 1407 /* 1408 * We may not get a bucket if we recurse, so 1409 * return an actual item. 1410 */ 1411#ifdef UMA_DEBUG 1412 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1413#endif 1414 |
1415 return (uma_zalloc_internal(zone, udata, flags, NULL)); |
1416} 1417 1418/* 1419 * Allocates an item for an internal zone OR fills a bucket 1420 * 1421 * Arguments 1422 * zone The zone to alloc for. 1423 * udata The data to be passed to the constructor. |
1424 * flags M_WAITOK, M_NOWAIT, M_ZERO. |
1425 * bucket The bucket to fill or NULL 1426 * 1427 * Returns 1428 * NULL if there is no memory and M_NOWAIT is set 1429 * An item if called on an interal zone 1430 * Non NULL if called to fill a bucket and it was successful. 1431 * 1432 * Discussion: 1433 * This was much cleaner before it had to do per cpu caches. It is 1434 * complicated now because it has to handle the simple internal case, and 1435 * the more involved bucket filling and allocation. 1436 */ 1437 1438static void * |
1439uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket) |
1440{ 1441 uma_slab_t slab; 1442 u_int8_t freei; 1443 void *item; 1444 1445 item = NULL; 1446 1447 /* --- 52 unchanged lines hidden (view full) --- 1500 if (zone == bucketzone && zone->uz_recurse != 0) { 1501 ZONE_UNLOCK(zone); 1502 return (NULL); 1503 } 1504 while (zone->uz_maxpages && 1505 zone->uz_pages >= zone->uz_maxpages) { 1506 zone->uz_flags |= UMA_ZFLAG_FULL; 1507 |
1508 if (flags & M_WAITOK) |
1509 msleep(zone, &zone->uz_lock, PVM, "zonelimit", 0); 1510 else 1511 goto alloc_fail; 1512 1513 goto new_slab; 1514 } 1515 1516 zone->uz_recurse++; |
1517 slab = slab_zalloc(zone, flags); |
1518 zone->uz_recurse--; 1519 /* 1520 * We might not have been able to get a slab but another cpu 1521 * could have while we were unlocked. If we did get a slab put 1522 * it on the partially used slab list. If not check the free 1523 * count and restart or fail accordingly. 1524 */ 1525 if (slab) --- 36 unchanged lines hidden (view full) --- 1562 if (slab->us_freecount == 0) { 1563 LIST_REMOVE(slab, us_link); 1564 LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link); 1565 } 1566 1567 if (bucket != NULL) { 1568 /* Try to keep the buckets totally full, but don't block */ 1569 if (bucket->ub_ptr < zone->uz_count) { |
1570 flags |= M_NOWAIT; 1571 flags &= ~M_WAITOK; |
1572 goto new_slab; 1573 } else 1574 zone->uz_fills--; 1575 } 1576 1577 ZONE_UNLOCK(zone); 1578 1579 /* Only construct at this time if we're not filling a bucket */ |
1580 if (bucket == NULL && zone->uz_ctor != NULL) { |
1581 zone->uz_ctor(item, zone->uz_size, udata); |
1582 if (flags & M_ZERO) 1583 bzero(item, zone->uz_size); 1584 } |
1585 1586 return (item); 1587 1588alloc_fail: 1589 if (bucket != NULL) 1590 zone->uz_fills--; 1591 ZONE_UNLOCK(zone); 1592 --- 455 unchanged lines hidden --- |