Lines Matching defs:object

156 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
158 kasan_unpoison(object, cache->object_size, false);
161 void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
163 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
168 * This function assigns a tag to an object considering the following:
170 * object somewhere (e.g. in the object itself). We preassign a tag for
171 * each object in caches with constructors during slab creation and reuse
172 * the same tag each time a particular object is allocated.
178 const void *object, bool init)
185 * set, assign a tag when the object is being allocated (init == false).
195 return init ? kasan_random_tag() : get_tag(object);
199 const void *object)
201 /* Initialize per-object metadata if it is present. */
203 kasan_init_object_meta(cache, object);
206 object = set_tag(object, assign_tag(cache, object, true));
208 return (void *)object;
211 static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
219 tagged_object = object;
220 object = kasan_reset_tag(object);
222 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
236 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
245 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
248 if (is_kfence_address(object))
252 * If the object is buggy, do not let slab put the object onto the
253 * freelist. The object will thus never be allocated again and its
256 if (poison_slab_object(cache, object, ip, init))
260 * If the object is put into quarantine, do not let slab put the object
261 * onto the freelist for now. The object's metadata is kept until the
262 * object gets evicted from quarantine.
264 if (kasan_quarantine_put(cache, object))
268 * Note: Keep per-object metadata to allow KASAN print stack traces for
272 /* Let slab put the object onto the freelist. */
298 /* The object will be poisoned by kasan_poison_pages(). */
301 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
305 * Unpoison the whole object. For kmalloc() allocations,
308 kasan_unpoison(object, cache->object_size, init);
312 kasan_save_alloc_info(cache, object, flags);
316 void *object, gfp_t flags, bool init)
324 if (unlikely(object == NULL))
327 if (is_kfence_address(object))
328 return (void *)object;
334 tag = assign_tag(cache, object, false);
335 tagged_object = set_tag(object, tag);
337 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
344 const void *object, size_t size, gfp_t flags)
351 * Partially poison the last object granule to cover the unaligned
355 kasan_poison_last_granule((void *)object, size);
358 redzone_start = round_up((unsigned long)(object + size),
360 redzone_end = round_up((unsigned long)(object + cache->object_size),
370 kasan_save_alloc_info(cache, (void *)object, flags);
374 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
380 if (unlikely(object == NULL))
383 if (is_kfence_address(object))
384 return (void *)object;
386 /* The object has already been unpoisoned by kasan_slab_alloc(). */
387 poison_kmalloc_redzone(cache, object, size, flags);
390 return (void *)object;
402 * Partially poison the last object granule to cover the unaligned
424 /* The object has already been unpoisoned by kasan_unpoison_pages(). */
431 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
438 if (unlikely(object == ZERO_SIZE_PTR))
439 return (void *)object;
441 if (is_kfence_address(object))
442 return (void *)object;
445 * Unpoison the object's data.
449 kasan_unpoison(object, size, false);
451 slab = virt_to_slab(object);
455 poison_kmalloc_large_redzone(object, size, flags);
457 poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
459 return (void *)object;
534 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */