• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/net/netfilter/ipvs/
1/*
2 * ip_vs_app.c: Application module support for IPVS
3 *
4 * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
5 *
6 *              This program is free software; you can redistribute it and/or
7 *              modify it under the terms of the GNU General Public License
8 *              as published by the Free Software Foundation; either version
9 *              2 of the License, or (at your option) any later version.
10 *
11 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
12 * is that ip_vs_app module handles the reverse direction (incoming requests
13 * and outgoing responses).
14 *
15 *		IP_MASQ_APP application masquerading module
16 *
17 * Author:	Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
18 *
19 */
20
21#define KMSG_COMPONENT "IPVS"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/skbuff.h>
27#include <linux/in.h>
28#include <linux/ip.h>
29#include <linux/netfilter.h>
30#include <linux/slab.h>
31#include <net/net_namespace.h>
32#include <net/protocol.h>
33#include <net/tcp.h>
34#include <asm/system.h>
35#include <linux/stat.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/mutex.h>
39
40#include <net/ip_vs.h>
41
42EXPORT_SYMBOL(register_ip_vs_app);
43EXPORT_SYMBOL(unregister_ip_vs_app);
44EXPORT_SYMBOL(register_ip_vs_app_inc);
45
46/* ipvs application list head */
47static LIST_HEAD(ip_vs_app_list);
48static DEFINE_MUTEX(__ip_vs_app_mutex);
49
50
51/*
52 *	Get an ip_vs_app object
53 */
54static inline int ip_vs_app_get(struct ip_vs_app *app)
55{
56	return try_module_get(app->module);
57}
58
59
60static inline void ip_vs_app_put(struct ip_vs_app *app)
61{
62	module_put(app->module);
63}
64
65
66/*
67 *	Allocate/initialize app incarnation and register it in proto apps.
68 */
69static int
70ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
71{
72	struct ip_vs_protocol *pp;
73	struct ip_vs_app *inc;
74	int ret;
75
76	if (!(pp = ip_vs_proto_get(proto)))
77		return -EPROTONOSUPPORT;
78
79	if (!pp->unregister_app)
80		return -EOPNOTSUPP;
81
82	inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
83	if (!inc)
84		return -ENOMEM;
85	INIT_LIST_HEAD(&inc->p_list);
86	INIT_LIST_HEAD(&inc->incs_list);
87	inc->app = app;
88	inc->port = htons(port);
89	atomic_set(&inc->usecnt, 0);
90
91	if (app->timeouts) {
92		inc->timeout_table =
93			ip_vs_create_timeout_table(app->timeouts,
94						   app->timeouts_size);
95		if (!inc->timeout_table) {
96			ret = -ENOMEM;
97			goto out;
98		}
99	}
100
101	ret = pp->register_app(inc);
102	if (ret)
103		goto out;
104
105	list_add(&inc->a_list, &app->incs_list);
106	IP_VS_DBG(9, "%s application %s:%u registered\n",
107		  pp->name, inc->name, inc->port);
108
109	return 0;
110
111  out:
112	kfree(inc->timeout_table);
113	kfree(inc);
114	return ret;
115}
116
117
118/*
119 *	Release app incarnation
120 */
121static void
122ip_vs_app_inc_release(struct ip_vs_app *inc)
123{
124	struct ip_vs_protocol *pp;
125
126	if (!(pp = ip_vs_proto_get(inc->protocol)))
127		return;
128
129	if (pp->unregister_app)
130		pp->unregister_app(inc);
131
132	IP_VS_DBG(9, "%s App %s:%u unregistered\n",
133		  pp->name, inc->name, inc->port);
134
135	list_del(&inc->a_list);
136
137	kfree(inc->timeout_table);
138	kfree(inc);
139}
140
141
142/*
143 *	Get reference to app inc (only called from softirq)
144 *
145 */
146int ip_vs_app_inc_get(struct ip_vs_app *inc)
147{
148	int result;
149
150	atomic_inc(&inc->usecnt);
151	if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
152		atomic_dec(&inc->usecnt);
153	return result;
154}
155
156
157/*
158 *	Put the app inc (only called from timer or net softirq)
159 */
160void ip_vs_app_inc_put(struct ip_vs_app *inc)
161{
162	ip_vs_app_put(inc->app);
163	atomic_dec(&inc->usecnt);
164}
165
166
167/*
168 *	Register an application incarnation in protocol applications
169 */
170int
171register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
172{
173	int result;
174
175	mutex_lock(&__ip_vs_app_mutex);
176
177	result = ip_vs_app_inc_new(app, proto, port);
178
179	mutex_unlock(&__ip_vs_app_mutex);
180
181	return result;
182}
183
184
185/*
186 *	ip_vs_app registration routine
187 */
188int register_ip_vs_app(struct ip_vs_app *app)
189{
190	/* increase the module use count */
191	ip_vs_use_count_inc();
192
193	mutex_lock(&__ip_vs_app_mutex);
194
195	list_add(&app->a_list, &ip_vs_app_list);
196
197	mutex_unlock(&__ip_vs_app_mutex);
198
199	return 0;
200}
201
202
203/*
204 *	ip_vs_app unregistration routine
205 *	We are sure there are no app incarnations attached to services
206 */
207void unregister_ip_vs_app(struct ip_vs_app *app)
208{
209	struct ip_vs_app *inc, *nxt;
210
211	mutex_lock(&__ip_vs_app_mutex);
212
213	list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
214		ip_vs_app_inc_release(inc);
215	}
216
217	list_del(&app->a_list);
218
219	mutex_unlock(&__ip_vs_app_mutex);
220
221	/* decrease the module use count */
222	ip_vs_use_count_dec();
223}
224
225
226/*
227 *	Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
228 */
229int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
230{
231	return pp->app_conn_bind(cp);
232}
233
234
235/*
236 *	Unbind cp from application incarnation (called by cp destructor)
237 */
238void ip_vs_unbind_app(struct ip_vs_conn *cp)
239{
240	struct ip_vs_app *inc = cp->app;
241
242	if (!inc)
243		return;
244
245	if (inc->unbind_conn)
246		inc->unbind_conn(inc, cp);
247	if (inc->done_conn)
248		inc->done_conn(inc, cp);
249	ip_vs_app_inc_put(inc);
250	cp->app = NULL;
251}
252
253
254/*
255 *	Fixes th->seq based on ip_vs_seq info.
256 */
257static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
258{
259	__u32 seq = ntohl(th->seq);
260
261	/*
262	 *	Adjust seq with delta-offset for all packets after
263	 *	the most recent resized pkt seq and with previous_delta offset
264	 *	for all packets	before most recent resized pkt seq.
265	 */
266	if (vseq->delta || vseq->previous_delta) {
267		if(after(seq, vseq->init_seq)) {
268			th->seq = htonl(seq + vseq->delta);
269			IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
270				  __func__, vseq->delta);
271		} else {
272			th->seq = htonl(seq + vseq->previous_delta);
273			IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
274				  __func__, vseq->previous_delta);
275		}
276	}
277}
278
279
280/*
281 *	Fixes th->ack_seq based on ip_vs_seq info.
282 */
283static inline void
284vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
285{
286	__u32 ack_seq = ntohl(th->ack_seq);
287
288	/*
289	 * Adjust ack_seq with delta-offset for
290	 * the packets AFTER most recent resized pkt has caused a shift
291	 * for packets before most recent resized pkt, use previous_delta
292	 */
293	if (vseq->delta || vseq->previous_delta) {
294		/* since ack_seq is the number of octet that is expected
295		   to receive next, so compare it with init_seq+delta */
296		if(after(ack_seq, vseq->init_seq+vseq->delta)) {
297			th->ack_seq = htonl(ack_seq - vseq->delta);
298			IP_VS_DBG(9, "%s(): subtracted delta "
299				  "(%d) from ack_seq\n", __func__, vseq->delta);
300
301		} else {
302			th->ack_seq = htonl(ack_seq - vseq->previous_delta);
303			IP_VS_DBG(9, "%s(): subtracted "
304				  "previous_delta (%d) from ack_seq\n",
305				  __func__, vseq->previous_delta);
306		}
307	}
308}
309
310
311/*
312 *	Updates ip_vs_seq if pkt has been resized
313 *	Assumes already checked proto==IPPROTO_TCP and diff!=0.
314 */
315static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
316				 unsigned flag, __u32 seq, int diff)
317{
318	/* spinlock is to keep updating cp->flags atomic */
319	spin_lock(&cp->lock);
320	if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
321		vseq->previous_delta = vseq->delta;
322		vseq->delta += diff;
323		vseq->init_seq = seq;
324		cp->flags |= flag;
325	}
326	spin_unlock(&cp->lock);
327}
328
329static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
330				  struct ip_vs_app *app)
331{
332	int diff;
333	const unsigned int tcp_offset = ip_hdrlen(skb);
334	struct tcphdr *th;
335	__u32 seq;
336
337	if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
338		return 0;
339
340	th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
341
342	/*
343	 *	Remember seq number in case this pkt gets resized
344	 */
345	seq = ntohl(th->seq);
346
347	/*
348	 *	Fix seq stuff if flagged as so.
349	 */
350	if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
351		vs_fix_seq(&cp->out_seq, th);
352	if (cp->flags & IP_VS_CONN_F_IN_SEQ)
353		vs_fix_ack_seq(&cp->in_seq, th);
354
355	/*
356	 *	Call private output hook function
357	 */
358	if (app->pkt_out == NULL)
359		return 1;
360
361	if (!app->pkt_out(app, cp, skb, &diff))
362		return 0;
363
364	/*
365	 *	Update ip_vs seq stuff if len has changed.
366	 */
367	if (diff != 0)
368		vs_seq_update(cp, &cp->out_seq,
369			      IP_VS_CONN_F_OUT_SEQ, seq, diff);
370
371	return 1;
372}
373
374/*
375 *	Output pkt hook. Will call bound ip_vs_app specific function
376 *	called by ipvs packet handler, assumes previously checked cp!=NULL
377 *	returns false if it can't handle packet (oom)
378 */
379int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
380{
381	struct ip_vs_app *app;
382
383	/*
384	 *	check if application module is bound to
385	 *	this ip_vs_conn.
386	 */
387	if ((app = cp->app) == NULL)
388		return 1;
389
390	/* TCP is complicated */
391	if (cp->protocol == IPPROTO_TCP)
392		return app_tcp_pkt_out(cp, skb, app);
393
394	/*
395	 *	Call private output hook function
396	 */
397	if (app->pkt_out == NULL)
398		return 1;
399
400	return app->pkt_out(app, cp, skb, NULL);
401}
402
403
404static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
405				 struct ip_vs_app *app)
406{
407	int diff;
408	const unsigned int tcp_offset = ip_hdrlen(skb);
409	struct tcphdr *th;
410	__u32 seq;
411
412	if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
413		return 0;
414
415	th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
416
417	/*
418	 *	Remember seq number in case this pkt gets resized
419	 */
420	seq = ntohl(th->seq);
421
422	/*
423	 *	Fix seq stuff if flagged as so.
424	 */
425	if (cp->flags & IP_VS_CONN_F_IN_SEQ)
426		vs_fix_seq(&cp->in_seq, th);
427	if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
428		vs_fix_ack_seq(&cp->out_seq, th);
429
430	/*
431	 *	Call private input hook function
432	 */
433	if (app->pkt_in == NULL)
434		return 1;
435
436	if (!app->pkt_in(app, cp, skb, &diff))
437		return 0;
438
439	/*
440	 *	Update ip_vs seq stuff if len has changed.
441	 */
442	if (diff != 0)
443		vs_seq_update(cp, &cp->in_seq,
444			      IP_VS_CONN_F_IN_SEQ, seq, diff);
445
446	return 1;
447}
448
449/*
450 *	Input pkt hook. Will call bound ip_vs_app specific function
451 *	called by ipvs packet handler, assumes previously checked cp!=NULL.
452 *	returns false if can't handle packet (oom).
453 */
454int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
455{
456	struct ip_vs_app *app;
457
458	/*
459	 *	check if application module is bound to
460	 *	this ip_vs_conn.
461	 */
462	if ((app = cp->app) == NULL)
463		return 1;
464
465	/* TCP is complicated */
466	if (cp->protocol == IPPROTO_TCP)
467		return app_tcp_pkt_in(cp, skb, app);
468
469	/*
470	 *	Call private input hook function
471	 */
472	if (app->pkt_in == NULL)
473		return 1;
474
475	return app->pkt_in(app, cp, skb, NULL);
476}
477
478
479#ifdef CONFIG_PROC_FS
480/*
481 *	/proc/net/ip_vs_app entry function
482 */
483
484static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
485{
486	struct ip_vs_app *app, *inc;
487
488	list_for_each_entry(app, &ip_vs_app_list, a_list) {
489		list_for_each_entry(inc, &app->incs_list, a_list) {
490			if (pos-- == 0)
491				return inc;
492		}
493	}
494	return NULL;
495
496}
497
498static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
499{
500	mutex_lock(&__ip_vs_app_mutex);
501
502	return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
503}
504
505static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
506{
507	struct ip_vs_app *inc, *app;
508	struct list_head *e;
509
510	++*pos;
511	if (v == SEQ_START_TOKEN)
512		return ip_vs_app_idx(0);
513
514	inc = v;
515	app = inc->app;
516
517	if ((e = inc->a_list.next) != &app->incs_list)
518		return list_entry(e, struct ip_vs_app, a_list);
519
520	/* go on to next application */
521	for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
522		app = list_entry(e, struct ip_vs_app, a_list);
523		list_for_each_entry(inc, &app->incs_list, a_list) {
524			return inc;
525		}
526	}
527	return NULL;
528}
529
530static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
531{
532	mutex_unlock(&__ip_vs_app_mutex);
533}
534
535static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
536{
537	if (v == SEQ_START_TOKEN)
538		seq_puts(seq, "prot port    usecnt name\n");
539	else {
540		const struct ip_vs_app *inc = v;
541
542		seq_printf(seq, "%-3s  %-7u %-6d %-17s\n",
543			   ip_vs_proto_name(inc->protocol),
544			   ntohs(inc->port),
545			   atomic_read(&inc->usecnt),
546			   inc->name);
547	}
548	return 0;
549}
550
551static const struct seq_operations ip_vs_app_seq_ops = {
552	.start = ip_vs_app_seq_start,
553	.next  = ip_vs_app_seq_next,
554	.stop  = ip_vs_app_seq_stop,
555	.show  = ip_vs_app_seq_show,
556};
557
558static int ip_vs_app_open(struct inode *inode, struct file *file)
559{
560	return seq_open(file, &ip_vs_app_seq_ops);
561}
562
563static const struct file_operations ip_vs_app_fops = {
564	.owner	 = THIS_MODULE,
565	.open	 = ip_vs_app_open,
566	.read	 = seq_read,
567	.llseek  = seq_lseek,
568	.release = seq_release,
569};
570#endif
571
572int __init ip_vs_app_init(void)
573{
574	/* we will replace it with proc_net_ipvs_create() soon */
575	proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
576	return 0;
577}
578
579
580void ip_vs_app_cleanup(void)
581{
582	proc_net_remove(&init_net, "ip_vs_app");
583}
584