1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PERCPU_COUNTER_H 3#define _LINUX_PERCPU_COUNTER_H 4/* 5 * A simple "approximate counter" for use in ext2 and ext3 superblocks. 6 * 7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. 8 */ 9 10#include <linux/spinlock.h> 11#include <linux/smp.h> 12#include <linux/list.h> 13#include <linux/threads.h> 14#include <linux/percpu.h> 15#include <linux/types.h> 16 17/* percpu_counter batch for local add or sub */ 18#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX 19 20#ifdef CONFIG_SMP 21 22struct percpu_counter { 23 raw_spinlock_t lock; 24 s64 count; 25#ifdef CONFIG_HOTPLUG_CPU 26 struct list_head list; /* All percpu_counters are on a list */ 27#endif 28 s32 __percpu *counters; 29}; 30 31extern int percpu_counter_batch; 32 33int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, 34 gfp_t gfp, u32 nr_counters, 35 struct lock_class_key *key); 36 37#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ 38 ({ \ 39 static struct lock_class_key __key; \ 40 \ 41 __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ 42 &__key); \ 43 }) 44 45 46#define percpu_counter_init(fbc, value, gfp) \ 47 percpu_counter_init_many(fbc, value, gfp, 1) 48 49void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters); 50static inline void percpu_counter_destroy(struct percpu_counter *fbc) 51{ 52 percpu_counter_destroy_many(fbc, 1); 53} 54 55void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 56void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, 57 s32 batch); 58s64 __percpu_counter_sum(struct percpu_counter *fbc); 59int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 60bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, 61 s64 amount, s32 batch); 62void percpu_counter_sync(struct percpu_counter *fbc); 63 64static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 65{ 66 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); 67} 68 69static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 70{ 71 percpu_counter_add_batch(fbc, amount, percpu_counter_batch); 72} 73 74static inline bool 75percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) 76{ 77 return __percpu_counter_limited_add(fbc, limit, amount, 78 percpu_counter_batch); 79} 80 81/* 82 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts 83 * are accumulated in local per cpu counter and not in fbc->count until 84 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter 85 * write efficient. 86 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be 87 * used to add up the counts from each CPU to account for all the local 88 * counts. So percpu_counter_add_local() and percpu_counter_sub_local() 89 * should be used when a counter is updated frequently and read rarely. 90 */ 91static inline void 92percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) 93{ 94 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH); 95} 96 97static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 98{ 99 s64 ret = __percpu_counter_sum(fbc); 100 return ret < 0 ? 0 : ret; 101} 102 103static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 104{ 105 return __percpu_counter_sum(fbc); 106} 107 108static inline s64 percpu_counter_read(struct percpu_counter *fbc) 109{ 110 return fbc->count; 111} 112 113/* 114 * It is possible for the percpu_counter_read() to return a small negative 115 * number for some counter which should never be negative. 116 * 117 */ 118static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 119{ 120 /* Prevent reloads of fbc->count */ 121 s64 ret = READ_ONCE(fbc->count); 122 123 if (ret >= 0) 124 return ret; 125 return 0; 126} 127 128static inline bool percpu_counter_initialized(struct percpu_counter *fbc) 129{ 130 return (fbc->counters != NULL); 131} 132 133#else /* !CONFIG_SMP */ 134 135struct percpu_counter { 136 s64 count; 137}; 138 139static inline int percpu_counter_init_many(struct percpu_counter *fbc, 140 s64 amount, gfp_t gfp, 141 u32 nr_counters) 142{ 143 u32 i; 144 145 for (i = 0; i < nr_counters; i++) 146 fbc[i].count = amount; 147 148 return 0; 149} 150 151static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, 152 gfp_t gfp) 153{ 154 return percpu_counter_init_many(fbc, amount, gfp, 1); 155} 156 157static inline void percpu_counter_destroy_many(struct percpu_counter *fbc, 158 u32 nr_counters) 159{ 160} 161 162static inline void percpu_counter_destroy(struct percpu_counter *fbc) 163{ 164} 165 166static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 167{ 168 fbc->count = amount; 169} 170 171static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 172{ 173 if (fbc->count > rhs) 174 return 1; 175 else if (fbc->count < rhs) 176 return -1; 177 else 178 return 0; 179} 180 181static inline int 182__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) 183{ 184 return percpu_counter_compare(fbc, rhs); 185} 186 187static inline void 188percpu_counter_add(struct percpu_counter *fbc, s64 amount) 189{ 190 unsigned long flags; 191 192 local_irq_save(flags); 193 fbc->count += amount; 194 local_irq_restore(flags); 195} 196 197static inline bool 198percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) 199{ 200 unsigned long flags; 201 bool good = false; 202 s64 count; 203 204 if (amount == 0) 205 return true; 206 207 local_irq_save(flags); 208 count = fbc->count + amount; 209 if ((amount > 0 && count <= limit) || 210 (amount < 0 && count >= limit)) { 211 fbc->count = count; 212 good = true; 213 } 214 local_irq_restore(flags); 215 return good; 216} 217 218/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ 219static inline void 220percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) 221{ 222 percpu_counter_add(fbc, amount); 223} 224 225static inline void 226percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) 227{ 228 percpu_counter_add(fbc, amount); 229} 230 231static inline s64 percpu_counter_read(struct percpu_counter *fbc) 232{ 233 return fbc->count; 234} 235 236/* 237 * percpu_counter is intended to track positive numbers. In the UP case the 238 * number should never be negative. 239 */ 240static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 241{ 242 return fbc->count; 243} 244 245static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 246{ 247 return percpu_counter_read_positive(fbc); 248} 249 250static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 251{ 252 return percpu_counter_read(fbc); 253} 254 255static inline bool percpu_counter_initialized(struct percpu_counter *fbc) 256{ 257 return true; 258} 259 260static inline void percpu_counter_sync(struct percpu_counter *fbc) 261{ 262} 263#endif /* CONFIG_SMP */ 264 265static inline void percpu_counter_inc(struct percpu_counter *fbc) 266{ 267 percpu_counter_add(fbc, 1); 268} 269 270static inline void percpu_counter_dec(struct percpu_counter *fbc) 271{ 272 percpu_counter_add(fbc, -1); 273} 274 275static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) 276{ 277 percpu_counter_add(fbc, -amount); 278} 279 280static inline void 281percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount) 282{ 283 percpu_counter_add_local(fbc, -amount); 284} 285 286#endif /* _LINUX_PERCPU_COUNTER_H */ 287