1#ifndef __LINUX_PERCPU_H 2#define __LINUX_PERCPU_H 3 4#include <linux/spinlock.h> /* For preempt_disable() */ 5#include <linux/slab.h> /* For kmalloc() */ 6#include <linux/smp.h> 7#include <linux/string.h> /* For memset() */ 8#include <linux/cpumask.h> 9 10#include <asm/percpu.h> 11 12/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ 13#ifndef PERCPU_ENOUGH_ROOM 14#ifdef CONFIG_MODULES 15#define PERCPU_MODULE_RESERVE 8192 16#else 17#define PERCPU_MODULE_RESERVE 0 18#endif 19 20#define PERCPU_ENOUGH_ROOM \ 21 (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) 22#endif /* PERCPU_ENOUGH_ROOM */ 23 24/* 25 * Must be an lvalue. Since @var must be a simple identifier, 26 * we force a syntax error here if it isn't. 27 */ 28#define get_cpu_var(var) (*({ \ 29 extern int simple_identifier_##var(void); \ 30 preempt_disable(); \ 31 &__get_cpu_var(var); })) 32#define put_cpu_var(var) preempt_enable() 33 34#ifdef CONFIG_SMP 35 36struct percpu_data { 37 void *ptrs[NR_CPUS]; 38}; 39 40#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 41/* 42 * Use this to get to a cpu's version of the per-cpu object dynamically 43 * allocated. Non-atomic access to the current CPU's version should 44 * probably be combined with get_cpu()/put_cpu(). 45 */ 46#define percpu_ptr(ptr, cpu) \ 47({ \ 48 struct percpu_data *__p = __percpu_disguise(ptr); \ 49 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 50}) 51 52extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); 53extern void percpu_depopulate(void *__pdata, int cpu); 54extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, 55 cpumask_t *mask); 56extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); 57extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 58extern void percpu_free(void *__pdata); 59 60#else /* CONFIG_SMP */ 61 62#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 63 64static inline void percpu_depopulate(void *__pdata, int cpu) 65{ 66} 67 68static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 69{ 70} 71 72static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, 73 int cpu) 74{ 75 return percpu_ptr(__pdata, cpu); 76} 77 78static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, 79 cpumask_t *mask) 80{ 81 return 0; 82} 83 84static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 85{ 86 return kzalloc(size, gfp); 87} 88 89static inline void percpu_free(void *__pdata) 90{ 91 kfree(__pdata); 92} 93 94#endif /* CONFIG_SMP */ 95 96#define percpu_populate_mask(__pdata, size, gfp, mask) \ 97 __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) 98#define percpu_depopulate_mask(__pdata, mask) \ 99 __percpu_depopulate_mask((__pdata), &(mask)) 100#define percpu_alloc_mask(size, gfp, mask) \ 101 __percpu_alloc_mask((size), (gfp), &(mask)) 102 103#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) 104 105/* (legacy) interface for use without CPU hotplug handling */ 106 107#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ 108 cpu_possible_map) 109#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) 110#define free_percpu(ptr) percpu_free((ptr)) 111#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) 112 113#endif /* __LINUX_PERCPU_H */ 114