Lines Matching refs:freelist

73  *	A. slab->freelist	-> List of free objects in a slab
84 * processors may put objects onto the freelist but the processor that
86 * slab's freelist.
129 * taken but it still utilizes the freelist for the common operations.
176 * freelist that allows lockless access to
177 * free objects in addition to the regular freelist
359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
381 * When changing the layout, make sure freelist and tid are still compatible
387 void **freelist; /* Pointer to next available object */
469 * freeptr_t represents a SLUB freelist pointer, which might be encoded
475 * Returns freelist pointer (ptr). With hardening, this is obfuscated
526 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
639 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
640 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
656 if (slab->freelist == freelist_old &&
658 slab->freelist = freelist_new;
747 for (p = slab->freelist; p; p = get_freepointer(s, p))
964 slab, slab->objects, slab->inuse, slab->freelist,
1101 void **freelist, void *nextfree)
1104 !check_valid_pointer(s, slab, nextfree) && freelist) {
1105 object_err(s, slab, *freelist, "Freechain corrupt");
1106 *freelist = NULL;
1400 * Determine if a certain object in a slab is on the freelist. Must hold the
1410 fp = slab->freelist;
1421 slab->freelist = NULL;
1460 slab->freelist);
1573 slab->freelist = NULL;
1861 void **freelist, void *nextfree)
2124 /* Head and tail of the reconstructed freelist */
2136 /* Move object to the new freelist */
2143 * Adjust the reconstructed freelist depth
2217 /* Initialize each random sequence freelist per cache */
2230 /* Get the next entry on the pre-computed freelist randomized */
2252 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2269 /* First entry is used as the base of the freelist */
2272 slab->freelist = cur;
2371 slab->freelist = start;
2502 object = slab->freelist;
2503 slab->freelist = get_freepointer(s, object);
2521 * allocated slab. Allocate a single object instead of whole freelist
2533 object = slab->freelist;
2534 slab->freelist = get_freepointer(s, object);
2784 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
2790 void *freelist)
2800 if (slab->freelist) {
2806 * Stage one: Count the objects on cpu's freelist as free_delta and
2810 freelist_iter = freelist;
2830 * freelist to the head of slab's freelist.
2833 old.freelist = READ_ONCE(slab->freelist);
2842 set_freepointer(s, freelist_tail, old.freelist);
2843 new.freelist = freelist;
2845 new.freelist = old.freelist;
2848 old.freelist, old.counters,
2849 new.freelist, new.counters,
2859 } else if (new.freelist) {
2998 void *freelist;
3003 freelist = c->freelist;
3006 c->freelist = NULL;
3012 deactivate_slab(s, slab, freelist);
3020 void *freelist = c->freelist;
3024 c->freelist = NULL;
3028 deactivate_slab(s, slab, freelist);
3156 /* Supports checking bulk free of a constructed freelist */
3192 /* Reached end of constructed freelist yet? */
3285 freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3286 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3293 * Check the slab->freelist and either transfer the freelist to the
3294 * per cpu freelist or deactivate the slab.
3304 void *freelist;
3309 freelist = slab->freelist;
3315 new.frozen = freelist != NULL;
3318 freelist, counters,
3322 return freelist;
3326 * Freeze the partial slab and return the pointer to the freelist.
3332 void *freelist;
3335 freelist = slab->freelist;
3345 freelist, counters,
3349 return freelist;
3353 * Slow path. The lockless freelist is empty or we need to perform
3357 * regular freelist. In that case we simply take over the regular freelist
3358 * as the lockless freelist and zap the regular freelist.
3361 * first element of the freelist as the object to allocate now and move the
3362 * rest of the freelist to the lockless freelist.
3374 void *freelist;
3422 freelist = c->freelist;
3423 if (freelist)
3426 freelist = get_freelist(s, slab);
3428 if (!freelist) {
3443 * freelist is pointing to the list of objects to be used.
3448 c->freelist = get_freepointer(s, freelist);
3451 return freelist;
3460 freelist = c->freelist;
3462 c->freelist = NULL;
3465 deactivate_slab(s, slab, freelist);
3488 freelist = get_freelist(s, slab);
3489 VM_BUG_ON(!freelist);
3508 freelist = pc.object;
3515 set_track(s, freelist, TRACK_ALLOC, addr);
3517 return freelist;
3520 freelist = freeze_slab(s, slab);
3536 freelist = alloc_single_from_new_slab(s, slab, orig_size);
3538 if (unlikely(!freelist))
3542 set_track(s, freelist, TRACK_ALLOC, addr);
3544 return freelist;
3551 freelist = slab->freelist;
3552 slab->freelist = NULL;
3560 * For !pfmemalloc_match() case we don't load freelist so that
3563 deactivate_slab(s, slab, get_freepointer(s, freelist));
3564 return freelist;
3571 void *flush_freelist = c->freelist;
3575 c->freelist = NULL;
3658 object = c->freelist;
3674 * 2. Verify that tid and freelist have not been changed
3675 * 3. If they were not changed replace tid and freelist
3720 * zeroing out freelist pointer.
3815 * The fastpath works by first checking if the lockless freelist can be used.
4031 void *prior = slab->freelist;
4036 slab->freelist = head;
4108 prior = slab->freelist;
4207 * Bulk free of a freelist with several objects (all pointing to the
4217 void **freelist;
4238 freelist = READ_ONCE(c->freelist);
4240 set_freepointer(s, tail, freelist);
4242 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4255 freelist = c->freelist;
4257 set_freepointer(s, tail, freelist);
4258 c->freelist = head;
4290 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4397 void *freelist;
4405 * slab. It builds a detached freelist directly within the given
4408 * The freelist is build up as a single linked list in the objects.
4409 * The idea is, that this detached freelist can then be bulk
4410 * transferred to the real freelist(s), but only requiring a single
4440 /* Start new detached freelist */
4442 df->freelist = object;
4455 /* Opportunity build freelist */
4456 set_freepointer(df->s, object, df->freelist);
4457 df->freelist = object;
4489 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
4507 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
4538 object = c->freelist;
4541 * We may have removed an object from c->freelist using
4553 * of re-populating per CPU c->freelist
4567 c->freelist = get_freepointer(s, object);
4846 n = slab->freelist;
4853 slab->freelist = get_freepointer(kmem_cache_node, n);
5011 * Store freelist pointer near middle of object to keep
5127 /* Initialize the pre-computed randomized freelist if slab is up */
5742 /* Now we know that a valid freelist exists */