Lines Matching defs:objects

4  * objects in per cpu and per node lists.
73 * A. slab->freelist -> List of free objects in a slab
74 * B. slab->inuse -> Number of objects in use
75 * C. slab->objects -> Number of objects in slab
84 * processors may put objects onto the freelist but the processor that
85 * froze the slab is the only one that can retrieve the objects from the
113 * allocating a long series of objects that fill up slabs does not require
158 * cannot scan all objects.
177 * free objects in addition to the regular freelist
277 * sort the partial list by the number of objects in use.
305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
423 * The slab lists for all objects.
580 /* Loop over all objects in a slab */
619 * We take the number of objects but actually limit the number of
765 bitmap_zero(obj_map, slab->objects);
851 if (object < base || object >= base + slab->objects * s->size ||
963 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
964 slab, slab->objects, slab->inuse, slab->freelist,
1215 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
1216 * 0xcc (RED_ACTIVE) for objects in use.
1365 * of the free objects in this slab. May cause
1384 if (slab->objects > maxobj) {
1385 slab_err(s, slab, "objects %u > max %u",
1386 slab->objects, maxobj);
1389 if (slab->inuse > slab->objects) {
1391 slab->inuse, slab->objects);
1411 while (fp && nr <= slab->objects) {
1422 slab->inuse = slab->objects;
1437 if (slab->objects != max_objects) {
1438 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1439 slab->objects, max_objects);
1440 slab->objects = max_objects;
1441 slab_fix(s, "Number of objects adjusted");
1443 if (slab->inuse != slab->objects - nr) {
1445 slab->inuse, slab->objects - nr);
1446 slab->inuse = slab->objects - nr;
1497 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1502 atomic_long_add(objects, &n->total_objects);
1504 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1509 atomic_long_sub(objects, &n->total_objects);
1568 * to avoid issues in the future. Marking all objects
1569 * as used avoids touching the remaining objects.
1571 slab_fix(s, "Marking all objects used");
1572 slab->inuse = slab->objects;
1855 int objects) {}
1857 int objects) {}
1896 size_t objects, gfp_t flags)
1919 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
1931 struct obj_cgroup **objcgp, size_t objects,
1940 return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects,
1987 void **p, int objects,
1990 for (int i = 0; i < objects; i++) {
2009 int objects)
2020 __memcg_slab_free_hook(s, slab, p, objects, objcgs);
2024 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
2028 obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
2038 size_t objects, gfp_t flags)
2051 void **p, int objects)
2056 void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
2244 * If the target page allocation failed, the number of objects on the
2265 if (slab->objects < 2 || !s->random_seq)
2271 page_limit = slab->objects * s->size;
2279 for (idx = 1; idx < slab->objects; idx++) {
2357 slab->objects = oo_objects(oo);
2377 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2429 for_each_object(p, s, slab_address(slab), slab->objects)
2441 dec_slabs_node(s, slab_nid(slab), slab->objects);
2516 if (slab->inuse == slab->objects) {
2552 if (slab->inuse == slab->objects)
2557 inc_slabs_node(s, nid, slab->objects);
2651 * returns node local objects. If the ratio is higher then kmalloc()
2652 * may return off node objects because partial slabs are obtained
2660 * with available objects.
2811 * Stage one: Count the objects on cpu's freelist as free_delta and
2821 * 'freelist_iter' is already corrupted. So isolate all objects
3138 * Check if the objects in a per cpu structure fit numa
3153 return slab->objects - slab->inuse;
3176 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3206 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3319 new.inuse = slab->objects;
3346 new.inuse = slab->objects;
3361 * Processing is still very fast if new objects have been freed to the
3448 * freelist is pointing to the list of objects to be used.
3449 * slab is pointing to the slab from which the objects are obtained.
3558 slab->inuse = slab->objects;
3561 inc_slabs_node(s, slab_nid(slab), slab->objects);
4070 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4082 * Slow path handling. This may still be called frequently since objects
4213 * Bulk free of a freelist with several objects (all pointing to the
4214 * same slab) possible by specifying head and tail ptr, plus objects
4297 * to remove objects, whose reuse must be delayed.
4409 * This function progressively scans the array with free objects (with
4410 * a limited look ahead) and extract objects belonging to the same
4412 * slab/objects. This can happen without any need for
4413 * synchronization, because the objects are owned by running process.
4414 * The freelist is build up as a single linked list in the objects.
4432 /* Handle kalloc'ed objects */
4480 * Internal bulk free of objects that were not initialised by the post alloc
4529 * Drain objects in the per cpu slab, while disabling local
4682 * order 0 does not cause fragmentation in the page allocator. Larger objects
4688 * number of objects is in one slab. Otherwise we may generate too much
4693 * number of objects in a slab as critical. If we reach slab_max_order then
4697 * Higher order allocations also allow the placement of more objects in a
4863 inc_slabs_node(kmem_cache_node, node, slab->objects);
4923 * cpu_partial determined the maximum number of objects kept in the
4932 * of objects, even though we now limit maximum number of pages, see
5005 * destructor, are poisoning the objects, or are
5043 * overwrites from earlier objects rather than let
5058 * offset 0. In order to align the objects we have to simply size
5083 * Determine the number of objects per slab
5162 for_each_object(p, s, addr, slab->objects) {
5220 /* Attempt to free all objects */
5254 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5332 * Rejects incorrectly sized objects and objects that are to be copied
5388 * being allocated from last increasing the chance that the last objects
5417 int free = slab->objects - slab->inuse;
5425 if (free == slab->objects) {
5429 dec_slabs_node(s, node, slab->objects);
5734 return slab->objects;
5750 for_each_object(p, s, addr, slab->objects) {
5819 * Generate lists of code addresses where slabcache objects are allocated
5975 for_each_object(p, s, addr, slab->objects)
5989 SL_OBJECTS, /* Determine allocated objects not slabs */
6027 x = slab->objects;
6189 unsigned int objects;
6192 err = kstrtouint(buf, 10, &objects);
6195 if (objects && !kmem_cache_has_cpu_partial(s))
6198 slub_set_cpu_partial(s, objects);
6238 int objects = 0;
6255 objects = (slabs * oo_objects(s->oo)) / 2;
6256 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6265 objects = (slabs * oo_objects(s->oo)) / 2;
6267 cpu, objects, slabs);
6328 SLAB_ATTR_RO(objects);