• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/include/linux/
1#ifndef _LINUX_SLAB_DEF_H
2#define	_LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18#include <trace/events/kmem.h>
19
20/*
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
28 */
29#ifdef ARCH_DMA_MINALIGN
30#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
31#else
32#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
33#endif
34
35#ifndef ARCH_SLAB_MINALIGN
36/*
37 * Enforce a minimum alignment for all caches.
38 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
39 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
40 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
41 * some debug features.
42 */
43#define ARCH_SLAB_MINALIGN 0
44#endif
45
46/*
47 * struct kmem_cache
48 *
49 * manages a cache.
50 */
51
52struct kmem_cache {
53/* 1) per-cpu data, touched during every alloc/free */
54	struct array_cache *array[NR_CPUS];
55/* 2) Cache tunables. Protected by cache_chain_mutex */
56	unsigned int batchcount;
57	unsigned int limit;
58	unsigned int shared;
59
60	unsigned int buffer_size;
61	u32 reciprocal_buffer_size;
62/* 3) touched by every alloc & free from the backend */
63
64	unsigned int flags;		/* constant flags */
65	unsigned int num;		/* # of objs per slab */
66
67/* 4) cache_grow/shrink */
68	/* order of pgs per slab (2^n) */
69	unsigned int gfporder;
70
71	/* force GFP flags, e.g. GFP_DMA */
72	gfp_t gfpflags;
73
74	size_t colour;			/* cache colouring range */
75	unsigned int colour_off;	/* colour offset */
76	struct kmem_cache *slabp_cache;
77	unsigned int slab_size;
78	unsigned int dflags;		/* dynamic flags */
79
80	/* constructor func */
81	void (*ctor)(void *obj);
82
83/* 5) cache creation/removal */
84	const char *name;
85	struct list_head next;
86
87/* 6) statistics */
88#ifdef CONFIG_DEBUG_SLAB
89	unsigned long num_active;
90	unsigned long num_allocations;
91	unsigned long high_mark;
92	unsigned long grown;
93	unsigned long reaped;
94	unsigned long errors;
95	unsigned long max_freeable;
96	unsigned long node_allocs;
97	unsigned long node_frees;
98	unsigned long node_overflow;
99	atomic_t allochit;
100	atomic_t allocmiss;
101	atomic_t freehit;
102	atomic_t freemiss;
103
104	/*
105	 * If debugging is enabled, then the allocator can add additional
106	 * fields and/or padding to every object. buffer_size contains the total
107	 * object size including these internal fields, the following two
108	 * variables contain the offset to the user object and its size.
109	 */
110	int obj_offset;
111	int obj_size;
112#endif /* CONFIG_DEBUG_SLAB */
113
114	/*
115	 * We put nodelists[] at the end of kmem_cache, because we want to size
116	 * this array to nr_node_ids slots instead of MAX_NUMNODES
117	 * (see kmem_cache_init())
118	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
119	 * is statically defined, so we reserve the max number of nodes.
120	 */
121	struct kmem_list3 *nodelists[MAX_NUMNODES];
122	/*
123	 * Do not add fields after nodelists[]
124	 */
125};
126
127/* Size description struct for general caches. */
128struct cache_sizes {
129	size_t		 	cs_size;
130	struct kmem_cache	*cs_cachep;
131#ifdef CONFIG_ZONE_DMA
132	struct kmem_cache	*cs_dmacachep;
133#endif
134};
135extern struct cache_sizes malloc_sizes[];
136
137void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138void *__kmalloc(size_t size, gfp_t flags);
139
140#ifdef CONFIG_TRACING
141extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
142extern size_t slab_buffer_size(struct kmem_cache *cachep);
143#else
144static __always_inline void *
145kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
146{
147	return kmem_cache_alloc(cachep, flags);
148}
149static inline size_t slab_buffer_size(struct kmem_cache *cachep)
150{
151	return 0;
152}
153#endif
154
155static __always_inline void *kmalloc(size_t size, gfp_t flags)
156{
157	struct kmem_cache *cachep;
158	void *ret;
159
160	if (__builtin_constant_p(size)) {
161		int i = 0;
162
163		if (!size)
164			return ZERO_SIZE_PTR;
165
166#define CACHE(x) \
167		if (size <= x) \
168			goto found; \
169		else \
170			i++;
171#include <linux/kmalloc_sizes.h>
172#undef CACHE
173		return NULL;
174found:
175#ifdef CONFIG_ZONE_DMA
176		if (flags & GFP_DMA)
177			cachep = malloc_sizes[i].cs_dmacachep;
178		else
179#endif
180			cachep = malloc_sizes[i].cs_cachep;
181
182		ret = kmem_cache_alloc_notrace(cachep, flags);
183
184		trace_kmalloc(_THIS_IP_, ret,
185			      size, slab_buffer_size(cachep), flags);
186
187		return ret;
188	}
189	return __kmalloc(size, flags);
190}
191
192#ifdef CONFIG_NUMA
193extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
194extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
195
196#ifdef CONFIG_TRACING
197extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
198					   gfp_t flags,
199					   int nodeid);
200#else
201static __always_inline void *
202kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
203			      gfp_t flags,
204			      int nodeid)
205{
206	return kmem_cache_alloc_node(cachep, flags, nodeid);
207}
208#endif
209
210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211{
212	struct kmem_cache *cachep;
213	void *ret;
214
215	if (__builtin_constant_p(size)) {
216		int i = 0;
217
218		if (!size)
219			return ZERO_SIZE_PTR;
220
221#define CACHE(x) \
222		if (size <= x) \
223			goto found; \
224		else \
225			i++;
226#include <linux/kmalloc_sizes.h>
227#undef CACHE
228		return NULL;
229found:
230#ifdef CONFIG_ZONE_DMA
231		if (flags & GFP_DMA)
232			cachep = malloc_sizes[i].cs_dmacachep;
233		else
234#endif
235			cachep = malloc_sizes[i].cs_cachep;
236
237		ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
238
239		trace_kmalloc_node(_THIS_IP_, ret,
240				   size, slab_buffer_size(cachep),
241				   flags, node);
242
243		return ret;
244	}
245	return __kmalloc_node(size, flags, node);
246}
247
248#endif	/* CONFIG_NUMA */
249
250#endif	/* _LINUX_SLAB_DEF_H */
251