1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
23#include <linux/mutex.h>
24#include <net/flow.h>
25#include <asm/atomic.h>
26#include <asm/semaphore.h>
27#include <linux/security.h>
28
29struct flow_cache_entry {
30	struct flow_cache_entry	*next;
31	u16			family;
32	u8			dir;
33	struct flowi		key;
34	u32			genid;
35	void			*object;
36	atomic_t		*object_ref;
37};
38
39atomic_t flow_cache_genid = ATOMIC_INIT(0);
40
41static u32 flow_hash_shift;
42#define flow_hash_size	(1 << flow_hash_shift)
43static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
44
45#define flow_table(cpu) (per_cpu(flow_tables, cpu))
46
47static struct kmem_cache *flow_cachep __read_mostly;
48
49static int flow_lwm, flow_hwm;
50
51struct flow_percpu_info {
52	int hash_rnd_recalc;
53	u32 hash_rnd;
54	int count;
55} ____cacheline_aligned;
56static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
57
58#define flow_hash_rnd_recalc(cpu) \
59	(per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
60#define flow_hash_rnd(cpu) \
61	(per_cpu(flow_hash_info, cpu).hash_rnd)
62#define flow_count(cpu) \
63	(per_cpu(flow_hash_info, cpu).count)
64
65static struct timer_list flow_hash_rnd_timer;
66
67#define FLOW_HASH_RND_PERIOD	(10 * 60 * HZ)
68
69struct flow_flush_info {
70	atomic_t cpuleft;
71	struct completion completion;
72};
73static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
74
75#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
76
77static void flow_cache_new_hashrnd(unsigned long arg)
78{
79	int i;
80
81	for_each_possible_cpu(i)
82		flow_hash_rnd_recalc(i) = 1;
83
84	flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
85	add_timer(&flow_hash_rnd_timer);
86}
87
88static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
89{
90	if (fle->object)
91		atomic_dec(fle->object_ref);
92	kmem_cache_free(flow_cachep, fle);
93	flow_count(cpu)--;
94}
95
96static void __flow_cache_shrink(int cpu, int shrink_to)
97{
98	struct flow_cache_entry *fle, **flp;
99	int i;
100
101	for (i = 0; i < flow_hash_size; i++) {
102		int k = 0;
103
104		flp = &flow_table(cpu)[i];
105		while ((fle = *flp) != NULL && k < shrink_to) {
106			k++;
107			flp = &fle->next;
108		}
109		while ((fle = *flp) != NULL) {
110			*flp = fle->next;
111			flow_entry_kill(cpu, fle);
112		}
113	}
114}
115
116static void flow_cache_shrink(int cpu)
117{
118	int shrink_to = flow_lwm / flow_hash_size;
119
120	__flow_cache_shrink(cpu, shrink_to);
121}
122
123static void flow_new_hash_rnd(int cpu)
124{
125	get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
126	flow_hash_rnd_recalc(cpu) = 0;
127
128	__flow_cache_shrink(cpu, 0);
129}
130
131static u32 flow_hash_code(struct flowi *key, int cpu)
132{
133	u32 *k = (u32 *) key;
134
135	return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
136		(flow_hash_size - 1));
137}
138
139#if (BITS_PER_LONG == 64)
140typedef u64 flow_compare_t;
141#else
142typedef u32 flow_compare_t;
143#endif
144
145extern void flowi_is_missized(void);
146
147/* I hear what you're saying, use memcmp.  But memcmp cannot make
148 * important assumptions that we can here, such as alignment and
149 * constant size.
150 */
151static int flow_key_compare(struct flowi *key1, struct flowi *key2)
152{
153	flow_compare_t *k1, *k1_lim, *k2;
154	const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
155
156	if (sizeof(struct flowi) % sizeof(flow_compare_t))
157		flowi_is_missized();
158
159	k1 = (flow_compare_t *) key1;
160	k1_lim = k1 + n_elem;
161
162	k2 = (flow_compare_t *) key2;
163
164	do {
165		if (*k1++ != *k2++)
166			return 1;
167	} while (k1 < k1_lim);
168
169	return 0;
170}
171
172void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
173			flow_resolve_t resolver)
174{
175	struct flow_cache_entry *fle, **head;
176	unsigned int hash;
177	int cpu;
178
179	local_bh_disable();
180	cpu = smp_processor_id();
181
182	fle = NULL;
183	/* Packet really early in init?  Making flow_cache_init a
184	 * pre-smp initcall would solve this.  --RR */
185	if (!flow_table(cpu))
186		goto nocache;
187
188	if (flow_hash_rnd_recalc(cpu))
189		flow_new_hash_rnd(cpu);
190	hash = flow_hash_code(key, cpu);
191
192	head = &flow_table(cpu)[hash];
193	for (fle = *head; fle; fle = fle->next) {
194		if (fle->family == family &&
195		    fle->dir == dir &&
196		    flow_key_compare(key, &fle->key) == 0) {
197			if (fle->genid == atomic_read(&flow_cache_genid)) {
198				void *ret = fle->object;
199
200				if (ret)
201					atomic_inc(fle->object_ref);
202				local_bh_enable();
203
204				return ret;
205			}
206			break;
207		}
208	}
209
210	if (!fle) {
211		if (flow_count(cpu) > flow_hwm)
212			flow_cache_shrink(cpu);
213
214		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
215		if (fle) {
216			fle->next = *head;
217			*head = fle;
218			fle->family = family;
219			fle->dir = dir;
220			memcpy(&fle->key, key, sizeof(*key));
221			fle->object = NULL;
222			flow_count(cpu)++;
223		}
224	}
225
226nocache:
227	{
228		int err;
229		void *obj;
230		atomic_t *obj_ref;
231
232		err = resolver(key, family, dir, &obj, &obj_ref);
233
234		if (fle && !err) {
235			fle->genid = atomic_read(&flow_cache_genid);
236
237			if (fle->object)
238				atomic_dec(fle->object_ref);
239
240			fle->object = obj;
241			fle->object_ref = obj_ref;
242			if (obj)
243				atomic_inc(fle->object_ref);
244		}
245		local_bh_enable();
246
247		if (err)
248			obj = ERR_PTR(err);
249		return obj;
250	}
251}
252
253static void flow_cache_flush_tasklet(unsigned long data)
254{
255	struct flow_flush_info *info = (void *)data;
256	int i;
257	int cpu;
258
259	cpu = smp_processor_id();
260	for (i = 0; i < flow_hash_size; i++) {
261		struct flow_cache_entry *fle;
262
263		fle = flow_table(cpu)[i];
264		for (; fle; fle = fle->next) {
265			unsigned genid = atomic_read(&flow_cache_genid);
266
267			if (!fle->object || fle->genid == genid)
268				continue;
269
270			fle->object = NULL;
271			atomic_dec(fle->object_ref);
272		}
273	}
274
275	if (atomic_dec_and_test(&info->cpuleft))
276		complete(&info->completion);
277}
278
279static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
280static void flow_cache_flush_per_cpu(void *data)
281{
282	struct flow_flush_info *info = data;
283	int cpu;
284	struct tasklet_struct *tasklet;
285
286	cpu = smp_processor_id();
287
288	tasklet = flow_flush_tasklet(cpu);
289	tasklet->data = (unsigned long)info;
290	tasklet_schedule(tasklet);
291}
292
293void flow_cache_flush(void)
294{
295	struct flow_flush_info info;
296	static DEFINE_MUTEX(flow_flush_sem);
297
298	/* Don't want cpus going down or up during this. */
299	lock_cpu_hotplug();
300	mutex_lock(&flow_flush_sem);
301	atomic_set(&info.cpuleft, num_online_cpus());
302	init_completion(&info.completion);
303
304	local_bh_disable();
305	smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
306	flow_cache_flush_tasklet((unsigned long)&info);
307	local_bh_enable();
308
309	wait_for_completion(&info.completion);
310	mutex_unlock(&flow_flush_sem);
311	unlock_cpu_hotplug();
312}
313
314static void __devinit flow_cache_cpu_prepare(int cpu)
315{
316	struct tasklet_struct *tasklet;
317	unsigned long order;
318
319	for (order = 0;
320	     (PAGE_SIZE << order) <
321		     (sizeof(struct flow_cache_entry *)*flow_hash_size);
322	     order++)
323		/* NOTHING */;
324
325	flow_table(cpu) = (struct flow_cache_entry **)
326		__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
327	if (!flow_table(cpu))
328		panic("NET: failed to allocate flow cache order %lu\n", order);
329
330	flow_hash_rnd_recalc(cpu) = 1;
331	flow_count(cpu) = 0;
332
333	tasklet = flow_flush_tasklet(cpu);
334	tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
335}
336
337static int flow_cache_cpu(struct notifier_block *nfb,
338			  unsigned long action,
339			  void *hcpu)
340{
341	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
342		__flow_cache_shrink((unsigned long)hcpu, 0);
343	return NOTIFY_OK;
344}
345
346static int __init flow_cache_init(void)
347{
348	int i;
349
350	flow_cachep = kmem_cache_create("flow_cache",
351					sizeof(struct flow_cache_entry),
352					0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
353					NULL, NULL);
354	flow_hash_shift = 10;
355	flow_lwm = 2 * flow_hash_size;
356	flow_hwm = 4 * flow_hash_size;
357
358	init_timer(&flow_hash_rnd_timer);
359	flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
360	flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
361	add_timer(&flow_hash_rnd_timer);
362
363	for_each_possible_cpu(i)
364		flow_cache_cpu_prepare(i);
365
366	hotcpu_notifier(flow_cache_cpu, 0);
367	return 0;
368}
369
370module_init(flow_cache_init);
371
372EXPORT_SYMBOL(flow_cache_genid);
373EXPORT_SYMBOL(flow_cache_lookup);
374