155714Skris/* SPDX-License-Identifier: GPL-2.0 */ 255714Skris/* 355714Skris * allocation tagging 455714Skris */ 555714Skris#ifndef _LINUX_ALLOC_TAG_H 655714Skris#define _LINUX_ALLOC_TAG_H 755714Skris 8280304Sjkim#include <linux/bug.h> 955714Skris#include <linux/codetag.h> 1055714Skris#include <linux/container_of.h> 1155714Skris#include <linux/preempt.h> 1255714Skris#include <asm/percpu.h> 1355714Skris#include <linux/cpumask.h> 1455714Skris#include <linux/smp.h> 15280304Sjkim#include <linux/static_key.h> 1655714Skris#include <linux/irqflags.h> 1755714Skris 1855714Skrisstruct alloc_tag_counters { 1955714Skris u64 bytes; 2055714Skris u64 calls; 2155714Skris}; 22280304Sjkim 2355714Skris/* 2455714Skris * An instance of this structure is created in a special ELF section at every 2555714Skris * allocation callsite. At runtime, the special section is treated as 2655714Skris * an array of these. Embedded codetag utilizes codetag framework. 2755714Skris */ 2855714Skrisstruct alloc_tag { 2955714Skris struct codetag ct; 3055714Skris struct alloc_tag_counters __percpu *counters; 3155714Skris} __aligned(8); 3255714Skris 3355714Skris#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 3455714Skris 3555714Skris#define CODETAG_EMPTY ((void *)1) 3655714Skris 37280304Sjkimstatic inline bool is_codetag_empty(union codetag_ref *ref) 3855714Skris{ 3955714Skris return ref->ct == CODETAG_EMPTY; 40280304Sjkim} 4155714Skris 4255714Skrisstatic inline void set_codetag_empty(union codetag_ref *ref) 4355714Skris{ 4455714Skris if (ref) 4555714Skris ref->ct = CODETAG_EMPTY; 4655714Skris} 4755714Skris 4855714Skris#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 4955714Skris 5055714Skrisstatic inline bool is_codetag_empty(union codetag_ref *ref) { return false; } 5155714Skrisstatic inline void set_codetag_empty(union codetag_ref *ref) {} 52280304Sjkim 5355714Skris#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 5455714Skris 5555714Skris#ifdef CONFIG_MEM_ALLOC_PROFILING 5655714Skris 5755714Skrisstruct codetag_bytes { 5855714Skris struct codetag *ct; 5955714Skris s64 bytes; 60238405Sjkim}; 61238405Sjkim 62238405Sjkimsize_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep); 6355714Skris 64280304Sjkimstatic inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) 65280304Sjkim{ 66238405Sjkim return container_of(ct, struct alloc_tag, ct); 67238405Sjkim} 68280304Sjkim 69280304Sjkim#ifdef ARCH_NEEDS_WEAK_PER_CPU 70280304Sjkim/* 71238405Sjkim * When percpu variables are required to be defined as weak, static percpu 72280304Sjkim * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). 73280304Sjkim * Instead we will accound all module allocations to a single counter. 74238405Sjkim */ 75238405SjkimDECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); 76280304Sjkim 77238405Sjkim#define DEFINE_ALLOC_TAG(_alloc_tag) \ 78280304Sjkim static struct alloc_tag _alloc_tag __used __aligned(8) \ 79238405Sjkim __section("alloc_tags") = { \ 80238405Sjkim .ct = CODE_TAG_INIT, \ 81280304Sjkim .counters = &_shared_alloc_tag }; 82280304Sjkim 83280304Sjkim#else /* ARCH_NEEDS_WEAK_PER_CPU */ 84280304Sjkim 85280304Sjkim#define DEFINE_ALLOC_TAG(_alloc_tag) \ 86280304Sjkim static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ 87238405Sjkim static struct alloc_tag _alloc_tag __used __aligned(8) \ 88280304Sjkim __section("alloc_tags") = { \ 89238405Sjkim .ct = CODE_TAG_INIT, \ 90238405Sjkim .counters = &_alloc_tag_cntr }; 91280304Sjkim 92238405Sjkim#endif /* ARCH_NEEDS_WEAK_PER_CPU */ 93280304Sjkim 94238405SjkimDECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 95280304Sjkim mem_alloc_profiling_key); 96280304Sjkim 97280304Sjkimstatic inline bool mem_alloc_profiling_enabled(void) 98280304Sjkim{ 99280304Sjkim return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 100280304Sjkim &mem_alloc_profiling_key); 101280304Sjkim} 102280304Sjkim 103280304Sjkimstatic inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag) 104280304Sjkim{ 105238405Sjkim struct alloc_tag_counters v = { 0, 0 }; 106109998Smarkm struct alloc_tag_counters *counter; 107280304Sjkim int cpu; 108280304Sjkim 109280304Sjkim for_each_possible_cpu(cpu) { 110280304Sjkim counter = per_cpu_ptr(tag->counters, cpu); 111280304Sjkim v.bytes += counter->bytes; 112280304Sjkim v.calls += counter->calls; 11355714Skris } 114280304Sjkim 115280304Sjkim return v; 116280304Sjkim} 117280304Sjkim 118280304Sjkim#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 119280304Sjkimstatic inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) 120280304Sjkim{ 121280304Sjkim WARN_ONCE(ref && ref->ct, 122280304Sjkim "alloc_tag was not cleared (got tag for %s:%u)\n", 123280304Sjkim ref->ct->filename, ref->ct->lineno); 124280304Sjkim 125 WARN_ONCE(!tag, "current->alloc_tag not set"); 126} 127 128static inline void alloc_tag_sub_check(union codetag_ref *ref) 129{ 130 WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n"); 131} 132#else 133static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {} 134static inline void alloc_tag_sub_check(union codetag_ref *ref) {} 135#endif 136 137/* Caller should verify both ref and tag to be valid */ 138static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) 139{ 140 ref->ct = &tag->ct; 141 /* 142 * We need in increment the call counter every time we have a new 143 * allocation or when we split a large allocation into smaller ones. 144 * Each new reference for every sub-allocation needs to increment call 145 * counter because when we free each part the counter will be decremented. 146 */ 147 this_cpu_inc(tag->counters->calls); 148} 149 150static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) 151{ 152 alloc_tag_add_check(ref, tag); 153 if (!ref || !tag) 154 return; 155 156 __alloc_tag_ref_set(ref, tag); 157} 158 159static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) 160{ 161 alloc_tag_add_check(ref, tag); 162 if (!ref || !tag) 163 return; 164 165 __alloc_tag_ref_set(ref, tag); 166 this_cpu_add(tag->counters->bytes, bytes); 167} 168 169static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) 170{ 171 struct alloc_tag *tag; 172 173 alloc_tag_sub_check(ref); 174 if (!ref || !ref->ct) 175 return; 176 177 if (is_codetag_empty(ref)) { 178 ref->ct = NULL; 179 return; 180 } 181 182 tag = ct_to_alloc_tag(ref->ct); 183 184 this_cpu_sub(tag->counters->bytes, bytes); 185 this_cpu_dec(tag->counters->calls); 186 187 ref->ct = NULL; 188} 189 190#define alloc_tag_record(p) ((p) = current->alloc_tag) 191 192#else /* CONFIG_MEM_ALLOC_PROFILING */ 193 194#define DEFINE_ALLOC_TAG(_alloc_tag) 195static inline bool mem_alloc_profiling_enabled(void) { return false; } 196static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, 197 size_t bytes) {} 198static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} 199#define alloc_tag_record(p) do {} while (0) 200 201#endif /* CONFIG_MEM_ALLOC_PROFILING */ 202 203#define alloc_hooks_tag(_tag, _do_alloc) \ 204({ \ 205 struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \ 206 typeof(_do_alloc) _res = _do_alloc; \ 207 alloc_tag_restore(_tag, _old); \ 208 _res; \ 209}) 210 211#define alloc_hooks(_do_alloc) \ 212({ \ 213 DEFINE_ALLOC_TAG(_alloc_tag); \ 214 alloc_hooks_tag(&_alloc_tag, _do_alloc); \ 215}) 216 217#endif /* _LINUX_ALLOC_TAG_H */ 218