Lines Matching refs:meta

250 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
252 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
258 if (KFENCE_WARN_ON(meta < kfence_metadata ||
259 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
266 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
277 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
281 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
283 lockdep_assert_held(&meta->lock);
305 WRITE_ONCE(meta->state, next);
311 struct kfence_metadata *meta;
319 meta = addr_to_metadata((unsigned long)addr);
320 raw_spin_lock_irqsave(&meta->lock, flags);
321 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
322 raw_spin_unlock_irqrestore(&meta->lock, flags);
327 static inline void set_canary(const struct kfence_metadata *meta)
329 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
336 for (; addr < meta->addr; addr += sizeof(u64))
339 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
344 static inline void check_canary(const struct kfence_metadata *meta)
346 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
359 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
369 for (; addr < meta->addr; addr++) {
375 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
394 struct kfence_metadata *meta = NULL;
405 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
406 list_del_init(&meta->list);
409 if (!meta) {
414 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
417 * use-after-free, which locked meta->lock, and the reporting
426 list_add_tail(&meta->list, &kfence_freelist);
432 meta->addr = metadata_to_pageaddr(meta);
434 if (meta->state == KFENCE_OBJECT_FREED)
435 kfence_unprotect(meta->addr);
447 meta->addr += PAGE_SIZE - size;
448 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
451 addr = (void *)meta->addr;
454 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
456 WRITE_ONCE(meta->cache, cache);
457 meta->size = size;
458 meta->alloc_stack_hash = alloc_stack_hash;
459 raw_spin_unlock_irqrestore(&meta->lock, flags);
464 slab = virt_to_slab((void *)meta->addr);
469 set_canary(meta);
482 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
490 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
496 raw_spin_lock_irqsave(&meta->lock, flags);
498 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
501 kfence_report_error((unsigned long)addr, false, NULL, meta,
503 raw_spin_unlock_irqrestore(&meta->lock, flags);
516 if (meta->unprotected_page) {
517 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
518 kfence_protect(meta->unprotected_page);
519 meta->unprotected_page = 0;
523 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
524 init = slab_want_init_on_free(meta->cache);
525 raw_spin_unlock_irqrestore(&meta->lock, flags);
527 alloc_covered_add(meta->alloc_stack_hash, -1);
530 check_canary(meta);
538 memzero_explicit(addr, meta->size);
547 KFENCE_WARN_ON(!list_empty(&meta->list));
548 list_add_tail(&meta->list, &kfence_freelist);
561 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
563 kfence_guarded_free((void *)meta->addr, meta, false);
618 struct kfence_metadata *meta = &kfence_metadata_init[i];
621 INIT_LIST_HEAD(&meta->list);
622 raw_spin_lock_init(&meta->lock);
623 meta->state = KFENCE_OBJECT_UNUSED;
624 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
625 list_add_tail(&meta->list, &kfence_freelist);
733 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
736 raw_spin_lock_irqsave(&meta->lock, flags);
737 kfence_print_object(seq, meta);
738 raw_spin_unlock_irqrestore(&meta->lock, flags);
774 struct kfence_metadata *meta = &kfence_metadata[i];
776 if (meta->state == KFENCE_OBJECT_ALLOCATED)
777 check_canary(meta);
983 struct kfence_metadata *meta;
993 meta = &kfence_metadata[i];
1002 if (READ_ONCE(meta->cache) != s ||
1003 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1006 raw_spin_lock_irqsave(&meta->lock, flags);
1007 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1008 raw_spin_unlock_irqrestore(&meta->lock, flags);
1025 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1030 meta = &kfence_metadata[i];
1033 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1036 raw_spin_lock_irqsave(&meta->lock, flags);
1037 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1038 meta->cache = NULL;
1039 raw_spin_unlock_irqrestore(&meta->lock, flags);
1118 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1124 return meta ? meta->size : 0;
1129 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1135 return meta ? (void *)meta->addr : NULL;
1140 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1143 KFENCE_WARN_ON(meta->objcg);
1148 * objects once it has been freed. meta->cache may be NULL if the cache
1151 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1152 call_rcu(&meta->rcu_head, rcu_guarded_free);
1154 kfence_guarded_free(addr, meta, false);
1174 struct kfence_metadata *meta;
1177 meta = addr_to_metadata(addr - PAGE_SIZE);
1178 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1179 to_report = meta;
1181 distance = addr - data_race(meta->addr + meta->size);
1184 meta = addr_to_metadata(addr + PAGE_SIZE);
1185 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1187 if (!to_report || distance > data_race(meta->addr) - addr)
1188 to_report = meta;