Lines Matching defs:objects

4  * objects in per cpu and per node lists.
73 * A. slab->freelist -> List of free objects in a slab
74 * B. slab->inuse -> Number of objects in use
75 * C. slab->objects -> Number of objects in slab
84 * processors may put objects onto the freelist but the processor that
85 * froze the slab is the only one that can retrieve the objects from the
113 * allocating a long series of objects that fill up slabs does not require
158 * cannot scan all objects.
177 * free objects in addition to the regular freelist
277 * sort the partial list by the number of objects in use.
305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
423 * The slab lists for all objects.
580 /* Loop over all objects in a slab */
619 * We take the number of objects but actually limit the number of
769 bitmap_zero(obj_map, slab->objects);
855 if (object < base || object >= base + slab->objects * s->size ||
967 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
968 slab, slab->objects, slab->inuse, slab->freelist,
1219 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
1220 * 0xcc (RED_ACTIVE) for objects in use.
1369 * of the free objects in this slab. May cause
1388 if (slab->objects > maxobj) {
1389 slab_err(s, slab, "objects %u > max %u",
1390 slab->objects, maxobj);
1393 if (slab->inuse > slab->objects) {
1395 slab->inuse, slab->objects);
1415 while (fp && nr <= slab->objects) {
1426 slab->inuse = slab->objects;
1441 if (slab->objects != max_objects) {
1442 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1443 slab->objects, max_objects);
1444 slab->objects = max_objects;
1445 slab_fix(s, "Number of objects adjusted");
1447 if (slab->inuse != slab->objects - nr) {
1449 slab->inuse, slab->objects - nr);
1450 slab->inuse = slab->objects - nr;
1501 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1506 atomic_long_add(objects, &n->total_objects);
1508 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1513 atomic_long_sub(objects, &n->total_objects);
1572 * to avoid issues in the future. Marking all objects
1573 * as used avoids touching the remaining objects.
1575 slab_fix(s, "Marking all objects used");
1576 slab->inuse = slab->objects;
1859 int objects) {}
1861 int objects) {}
1898 struct slabobj_ext *vec, unsigned int objects)
1902 * objects with no tag reference. Mark all references in this
1908 for (i = 0; i < objects; i++)
1918 struct slabobj_ext *vec, unsigned int objects) {}
1933 unsigned int objects = objs_per_slab(s, slab);
1941 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
1956 handle_failed_objexts_alloc(old_exts, vec, objects);
2005 * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
2037 int objects)
2050 for (i = 0; i < objects; i++) {
2083 int objects)
2118 int objects)
2129 __memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2141 void **p, int objects)
2328 * If the target page allocation failed, the number of objects on the
2349 if (slab->objects < 2 || !s->random_seq)
2355 page_limit = slab->objects * s->size;
2363 for (idx = 1; idx < slab->objects; idx++) {
2441 slab->objects = oo_objects(oo);
2461 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2513 for_each_object(p, s, slab_address(slab), slab->objects)
2525 dec_slabs_node(s, slab_nid(slab), slab->objects);
2600 if (slab->inuse == slab->objects) {
2636 if (slab->inuse == slab->objects)
2641 inc_slabs_node(s, nid, slab->objects);
2734 * returns node local objects. If the ratio is higher then kmalloc()
2735 * may return off node objects because partial slabs are obtained
2743 * with available objects.
2894 * Stage one: Count the objects on cpu's freelist as free_delta and
2904 * 'freelist_iter' is already corrupted. So isolate all objects
3221 * Check if the objects in a per cpu structure fit numa
3236 return slab->objects - slab->inuse;
3259 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3289 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3331 x += slab->objects - slab->inuse;
3334 * For a long list, approximate the total count of objects in
3341 x += slab->objects - slab->inuse;
3346 x += slab->objects - slab->inuse;
3439 new.inuse = slab->objects;
3466 new.inuse = slab->objects;
3481 * Processing is still very fast if new objects have been freed to the
3569 * freelist is pointing to the list of objects to be used.
3570 * slab is pointing to the slab from which the objects are obtained.
3699 slab->inuse = slab->objects;
3702 inc_slabs_node(s, slab_nid(slab), slab->objects);
4219 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4231 * Slow path handling. This may still be called frequently since objects
4362 * Bulk free of a freelist with several objects (all pointing to the
4363 * same slab) possible by specifying head and tail ptr, plus objects
4458 * to remove objects, whose reuse must be delayed.
4570 * This function progressively scans the array with free objects (with
4571 * a limited look ahead) and extract objects belonging to the same
4573 * slab/objects. This can happen without any need for
4574 * synchronization, because the objects are owned by running process.
4575 * The freelist is build up as a single linked list in the objects.
4593 /* Handle kalloc'ed objects */
4641 * Internal bulk free of objects that were not initialised by the post alloc
4690 * Drain objects in the per cpu slab, while disabling local
4840 * order 0 does not cause fragmentation in the page allocator. Larger objects
4846 * number of objects is in one slab. Otherwise we may generate too much
4851 * number of objects in a slab as critical. If we reach slab_max_order then
4855 * Higher order allocations also allow the placement of more objects in a
5020 inc_slabs_node(kmem_cache_node, node, slab->objects);
5080 * cpu_partial determined the maximum number of objects kept in the
5089 * of objects, even though we now limit maximum number of pages, see
5162 * destructor, are poisoning the objects, or are
5200 * overwrites from earlier objects rather than let
5215 * offset 0. In order to align the objects we have to simply size
5238 * Determine the number of objects per slab
5317 for_each_object(p, s, addr, slab->objects) {
5375 /* Attempt to free all objects */
5409 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5487 * Rejects incorrectly sized objects and objects that are to be copied
5543 * being allocated from last increasing the chance that the last objects
5572 int free = slab->objects - slab->inuse;
5580 if (free == slab->objects) {
5584 dec_slabs_node(s, node, slab->objects);
5890 return slab->objects;
5906 for_each_object(p, s, addr, slab->objects) {
5975 * Generate lists of code addresses where slabcache objects are allocated
6131 for_each_object(p, s, addr, slab->objects)
6145 SL_OBJECTS, /* Determine allocated objects not slabs */
6183 x = slab->objects;
6345 unsigned int objects;
6348 err = kstrtouint(buf, 10, &objects);
6351 if (objects && !kmem_cache_has_cpu_partial(s))
6354 slub_set_cpu_partial(s, objects);
6394 int objects = 0;
6411 objects = (slabs * oo_objects(s->oo)) / 2;
6412 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6421 objects = (slabs * oo_objects(s->oo)) / 2;
6423 cpu, objects, slabs);
6484 SLAB_ATTR_RO(objects);